diff --git a/.cloudbuild.yaml b/.cloudbuild.yaml
index d8c770f4729..0f4403ddae3 100644
--- a/.cloudbuild.yaml
+++ b/.cloudbuild.yaml
@@ -88,6 +88,20 @@ steps:
'/workspace/backend/Dockerfile.visualization', '/workspace']
id: 'buildVisualizationServer'
waitFor: ["-"]
+- id: 'buildMetadataWriter'
+ name: 'gcr.io/cloud-builders/docker'
+ args: ['build', '-t', 'gcr.io/$PROJECT_ID/metadata-writer:$COMMIT_SHA',
+ '--build-arg', 'COMMIT_HASH=$COMMIT_SHA', '-f',
+ '/workspace/backend/metadata_writer/Dockerfile', '/workspace']
+ waitFor: ["-"]
+
+# Build marketplace deployer
+- id: 'buildMarketplaceDeployer'
+ name: 'gcr.io/cloud-builders/docker'
+ args: ['build', '-t', 'gcr.io/$PROJECT_ID/deployer:$COMMIT_SHA',
+ '--build-arg', 'COMMIT_HASH=$COMMIT_SHA', '-f',
+ '/workspace/manifests/gcp_marketplace/deployer/Dockerfile', '/workspace/manifests/gcp_marketplace']
+ waitFor: ["-"]
# Build the Kubeflow-based pipeline component images
- name: 'gcr.io/cloud-builders/docker'
@@ -100,6 +114,11 @@ steps:
args: ['-c', 'cd /workspace/components/kubeflow/launcher && ./build_image.sh -p $PROJECT_ID -t $COMMIT_SHA']
id: 'buildLauncher'
waitFor: ["-"]
+- id: 'buildCpuTrainer'
+ name: 'gcr.io/cloud-builders/docker'
+ entrypoint: '/bin/bash'
+ args: ['-c', 'cd /workspace/components/kubeflow/dnntrainer && ./build_image.sh -p $PROJECT_ID -t $COMMIT_SHA -l ml-pipeline-kubeflow-tf-trainer -b 1.6.0']
+ waitFor: ["-"]
- name: 'gcr.io/cloud-builders/docker'
entrypoint: '/bin/bash'
args: ['-c', 'cd /workspace/components/kubeflow/dnntrainer && ./build_image.sh -p $PROJECT_ID -t $COMMIT_SHA -l ml-pipeline-kubeflow-tf-trainer-gpu -b 1.6.0-gpu']
@@ -172,12 +191,16 @@ images:
- 'gcr.io/$PROJECT_ID/viewer-crd-controller:$COMMIT_SHA'
- 'gcr.io/$PROJECT_ID/inverse-proxy-agent:$COMMIT_SHA'
- 'gcr.io/$PROJECT_ID/visualization-server:$COMMIT_SHA'
+- 'gcr.io/$PROJECT_ID/metadata-writer:$COMMIT_SHA'
+
+# Images for Marketplace
+- 'gcr.io/$PROJECT_ID/deployer:$COMMIT_SHA'
# Images for the Kubeflow-based pipeline components
- 'gcr.io/$PROJECT_ID/ml-pipeline-kubeflow-deployer:$COMMIT_SHA'
- 'gcr.io/$PROJECT_ID/ml-pipeline-kubeflow-tf-trainer:$COMMIT_SHA'
- 'gcr.io/$PROJECT_ID/ml-pipeline-kubeflow-tf-trainer-gpu:$COMMIT_SHA'
-- 'gcr.io/$PROJECT_ID/ml-pipeline-kubeflow-tf:$COMMIT_SHA'
+- 'gcr.io/$PROJECT_ID/ml-pipeline-kubeflow-tfjob:$COMMIT_SHA'
# Images for the Dataproc-based pipeline components
- 'gcr.io/$PROJECT_ID/ml-pipeline-dataproc-analyze:$COMMIT_SHA'
diff --git a/.release.cloudbuild.yaml b/.release.cloudbuild.yaml
index 9a36d3487c6..1405f1dbac3 100644
--- a/.release.cloudbuild.yaml
+++ b/.release.cloudbuild.yaml
@@ -13,6 +13,17 @@
# limitations under the License.
steps:
+
+# Marketplace Major.Minor parsing
+- id: "parseMajorMinorVersion"
+ name: gcr.io/cloud-builders/docker
+ entrypoint: /bin/bash
+ args:
+ - -ceux
+ - |
+ # Parse major minor version and save to a file for reusing in other steps.
+ echo $TAG_NAME | sed -e "s#[^0-9]*\([0-9]*\)[.]\([0-9]*\)[.]\([0-9]*\)#\1.\2#" > /workspace/mm.ver
+
# Pull and retag the images for the pipeline system
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/$PROJECT_ID/frontend:$COMMIT_SHA']
@@ -29,6 +40,21 @@ steps:
args: ['tag', 'gcr.io/$PROJECT_ID/frontend:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines/frontend:$TAG_NAME']
id: 'tagFrontendForMarketplace'
waitFor: ['pullFrontend']
+- name: 'gcr.io/cloud-builders/docker'
+ args: ['tag', 'gcr.io/$PROJECT_ID/frontend:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines-test/frontend:$TAG_NAME']
+ id: 'tagFrontendForMarketplaceTest'
+ waitFor: ['pullFrontend']
+- id: 'tagFrontendForMarketplaceMajorMin'
+ waitFor: ['pullFrontend', 'parseMajorMinorVersion']
+ name: 'gcr.io/cloud-builders/docker'
+ entrypoint: bash
+ args:
+ - -ceux
+ - |
+ docker tag gcr.io/$PROJECT_ID/frontend:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines/frontend:$(cat /workspace/mm.ver)
+ docker tag gcr.io/$PROJECT_ID/frontend:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines-test/frontend:$(cat /workspace/mm.ver)
+ docker push gcr.io/ml-pipeline/google/pipelines/frontend:$(cat /workspace/mm.ver)
+ docker push gcr.io/ml-pipeline/google/pipelines-test/frontend:$(cat /workspace/mm.ver)
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/$PROJECT_ID/api-server:$COMMIT_SHA']
@@ -45,6 +71,21 @@ steps:
args: ['tag', 'gcr.io/$PROJECT_ID/api-server:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines/apiserver:$TAG_NAME']
id: 'tagAPIServerForMarketplace'
waitFor: ['pullAPIServer']
+- name: 'gcr.io/cloud-builders/docker'
+ args: ['tag', 'gcr.io/$PROJECT_ID/api-server:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines-test/apiserver:$TAG_NAME']
+ id: 'tagAPIServerForMarketplaceTest'
+ waitFor: ['pullAPIServer']
+- id: 'tagAPIServerForMarketplaceMajorMinor'
+ waitFor: ['pullAPIServer', 'parseMajorMinorVersion']
+ name: 'gcr.io/cloud-builders/docker'
+ entrypoint: bash
+ args:
+ - -ceux
+ - |
+ docker tag gcr.io/$PROJECT_ID/api-server:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines/apiserver:$(cat /workspace/mm.ver)
+ docker tag gcr.io/$PROJECT_ID/api-server:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines-test/apiserver:$(cat /workspace/mm.ver)
+ docker push gcr.io/ml-pipeline/google/pipelines/apiserver:$(cat /workspace/mm.ver)
+ docker push gcr.io/ml-pipeline/google/pipelines-test/apiserver:$(cat /workspace/mm.ver)
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/$PROJECT_ID/scheduledworkflow:$COMMIT_SHA']
@@ -61,6 +102,21 @@ steps:
args: ['tag', 'gcr.io/$PROJECT_ID/scheduledworkflow:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines/scheduledworkflow:$TAG_NAME']
id: 'tagScheduledworkflowForMarketplace'
waitFor: ['pullScheduledworkflow']
+- name: 'gcr.io/cloud-builders/docker'
+ args: ['tag', 'gcr.io/$PROJECT_ID/scheduledworkflow:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines-test/scheduledworkflow:$TAG_NAME']
+ id: 'tagScheduledworkflowForMarketplaceTest'
+ waitFor: ['pullScheduledworkflow']
+- id: 'tagScheduledworkflowForMarketplaceMajorMinor'
+ waitFor: ['pullScheduledworkflow', 'parseMajorMinorVersion']
+ name: 'gcr.io/cloud-builders/docker'
+ entrypoint: bash
+ args:
+ - -ceux
+ - |
+ docker tag gcr.io/$PROJECT_ID/scheduledworkflow:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines/scheduledworkflow:$(cat /workspace/mm.ver)
+ docker tag gcr.io/$PROJECT_ID/scheduledworkflow:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines-test/scheduledworkflow:$(cat /workspace/mm.ver)
+ docker push gcr.io/ml-pipeline/google/pipelines/scheduledworkflow:$(cat /workspace/mm.ver)
+ docker push gcr.io/ml-pipeline/google/pipelines-test/scheduledworkflow:$(cat /workspace/mm.ver)
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/$PROJECT_ID/viewer-crd-controller:$COMMIT_SHA']
@@ -77,6 +133,21 @@ steps:
args: ['tag', 'gcr.io/$PROJECT_ID/viewer-crd-controller:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines/viewercrd:$TAG_NAME']
id: 'tagViewerCrdControllerForMarketplace'
waitFor: ['pullViewerCrdController']
+- name: 'gcr.io/cloud-builders/docker'
+ args: ['tag', 'gcr.io/$PROJECT_ID/viewer-crd-controller:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines-test/viewercrd:$TAG_NAME']
+ id: 'tagViewerCrdControllerForMarketplaceTest'
+ waitFor: ['pullViewerCrdController']
+- id: 'tagViewerCrdControllerForMarketplaceMajorMinor'
+ waitFor: ['pullViewerCrdController', 'parseMajorMinorVersion']
+ name: 'gcr.io/cloud-builders/docker'
+ entrypoint: bash
+ args:
+ - -ceux
+ - |
+ docker tag gcr.io/$PROJECT_ID/viewer-crd-controller:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines/viewercrd:$(cat /workspace/mm.ver)
+ docker tag gcr.io/$PROJECT_ID/viewer-crd-controller:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines-test/viewercrd:$(cat /workspace/mm.ver)
+ docker push gcr.io/ml-pipeline/google/pipelines/viewercrd:$(cat /workspace/mm.ver)
+ docker push gcr.io/ml-pipeline/google/pipelines-test/viewercrd:$(cat /workspace/mm.ver)
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/$PROJECT_ID/persistenceagent:$COMMIT_SHA']
@@ -93,6 +164,21 @@ steps:
args: ['tag', 'gcr.io/$PROJECT_ID/persistenceagent:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines/persistenceagent:$TAG_NAME']
id: 'tagPersistenceagentForMarketplace'
waitFor: ['pullPersistenceagent']
+- name: 'gcr.io/cloud-builders/docker'
+ args: ['tag', 'gcr.io/$PROJECT_ID/persistenceagent:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines-test/persistenceagent:$TAG_NAME']
+ id: 'tagPersistenceagentForMarketplaceTest'
+ waitFor: ['pullPersistenceagent']
+- id: 'tagPersistenceagentForMarketplaceMajorMinor'
+ waitFor: ['pullPersistenceagent', 'parseMajorMinorVersion']
+ name: 'gcr.io/cloud-builders/docker'
+ entrypoint: bash
+ args:
+ - -ceux
+ - |
+ docker tag gcr.io/$PROJECT_ID/persistenceagent:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines/persistenceagent:$(cat /workspace/mm.ver)
+ docker tag gcr.io/$PROJECT_ID/persistenceagent:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines-test/persistenceagent:$(cat /workspace/mm.ver)
+ docker push gcr.io/ml-pipeline/google/pipelines/persistenceagent:$(cat /workspace/mm.ver)
+ docker push gcr.io/ml-pipeline/google/pipelines-test/persistenceagent:$(cat /workspace/mm.ver)
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/$PROJECT_ID/inverse-proxy-agent:$COMMIT_SHA']
@@ -109,6 +195,21 @@ steps:
args: ['tag', 'gcr.io/$PROJECT_ID/inverse-proxy-agent:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines/proxyagent:$TAG_NAME']
id: 'tagInverseProxyAgentForMarketplace'
waitFor: ['pullInverseProxyAgent']
+- name: 'gcr.io/cloud-builders/docker'
+ args: ['tag', 'gcr.io/$PROJECT_ID/inverse-proxy-agent:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines-test/proxyagent:$TAG_NAME']
+ id: 'tagInverseProxyAgentForMarketplaceTest'
+ waitFor: ['pullInverseProxyAgent']
+- id: 'tagInverseProxyAgentForMarketplaceMajorMinor'
+ waitFor: ['pullInverseProxyAgent', 'parseMajorMinorVersion']
+ name: 'gcr.io/cloud-builders/docker'
+ entrypoint: bash
+ args:
+ - -ceux
+ - |
+ docker tag gcr.io/$PROJECT_ID/inverse-proxy-agent:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines/proxyagent:$(cat /workspace/mm.ver)
+ docker tag gcr.io/$PROJECT_ID/inverse-proxy-agent:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines-test/proxyagent:$(cat /workspace/mm.ver)
+ docker push gcr.io/ml-pipeline/google/pipelines/proxyagent:$(cat /workspace/mm.ver)
+ docker push gcr.io/ml-pipeline/google/pipelines-test/proxyagent:$(cat /workspace/mm.ver)
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/$PROJECT_ID/visualization-server:$COMMIT_SHA']
@@ -125,55 +226,259 @@ steps:
args: ['tag', 'gcr.io/$PROJECT_ID/visualization-server:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines/visualizationserver:$TAG_NAME']
id: 'tagVisualizationServerForMarketplace'
waitFor: ['pullVisualizationServer']
+- name: 'gcr.io/cloud-builders/docker'
+ args: ['tag', 'gcr.io/$PROJECT_ID/visualization-server:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines-test/visualizationserver:$TAG_NAME']
+ id: 'tagVisualizationServerForMarketplaceTest'
+ waitFor: ['pullVisualizationServer']
+- id: 'tagVisualizationServerForMarketplaceMajorMinor'
+ waitFor: ['pullVisualizationServer', 'parseMajorMinorVersion']
+ name: 'gcr.io/cloud-builders/docker'
+ entrypoint: bash
+ args:
+ - -ceux
+ - |
+ docker tag gcr.io/$PROJECT_ID/visualization-server:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines/visualizationserver:$(cat /workspace/mm.ver)
+ docker tag gcr.io/$PROJECT_ID/visualization-server:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines-test/visualizationserver:$(cat /workspace/mm.ver)
+ docker push gcr.io/ml-pipeline/google/pipelines/visualizationserver:$(cat /workspace/mm.ver)
+ docker push gcr.io/ml-pipeline/google/pipelines-test/visualizationserver:$(cat /workspace/mm.ver)
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/tfx-oss-public/ml_metadata_store_server:0.14.0']
- id: 'PullMetadataServer'
+ id: 'pullMetadataServer'
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/tfx-oss-public/ml_metadata_store_server:0.14.0', 'gcr.io/ml-pipeline/google/pipelines/metadataserver:$TAG_NAME']
- waitFor: ['PullMetadataServer']
+ id: 'tagMetadataServerForMarketplace'
+ waitFor: ['pullMetadataServer']
+- name: 'gcr.io/cloud-builders/docker'
+ args: ['tag', 'gcr.io/tfx-oss-public/ml_metadata_store_server:0.14.0', 'gcr.io/ml-pipeline/google/pipelines-test/metadataserver:$TAG_NAME']
+ id: 'tagMetadataServerForMarketplaceTest'
+ waitFor: ['pullMetadataServer']
+- id: 'tagMetadataServerForMarketplaceMajorMinor'
+ waitFor: ['pullMetadataServer', 'parseMajorMinorVersion']
+ name: 'gcr.io/cloud-builders/docker'
+ entrypoint: bash
+ args:
+ - -ceux
+ - |
+ docker tag gcr.io/tfx-oss-public/ml_metadata_store_server:0.14.0 gcr.io/ml-pipeline/google/pipelines/metadataserver:$(cat /workspace/mm.ver)
+ docker tag gcr.io/tfx-oss-public/ml_metadata_store_server:0.14.0 gcr.io/ml-pipeline/google/pipelines-test/metadataserver:$(cat /workspace/mm.ver)
+ docker push gcr.io/ml-pipeline/google/pipelines/metadataserver:$(cat /workspace/mm.ver)
+ docker push gcr.io/ml-pipeline/google/pipelines-test/metadataserver:$(cat /workspace/mm.ver)
+
+- id: 'pullMetadataWriter'
+ name: 'gcr.io/cloud-builders/docker'
+ args: ['pull', 'gcr.io/$PROJECT_ID/metadata-writer:$COMMIT_SHA']
+ waitFor: ['-']
+- id: 'tagMetadataWriterVersionNumber'
+ name: 'gcr.io/cloud-builders/docker'
+ args: ['tag', 'gcr.io/$PROJECT_ID/metadata-writer:$COMMIT_SHA', 'gcr.io/ml-pipeline/metadata-writer:$TAG_NAME']
+ waitFor: ['pullMetadataWriter']
+- id: 'tagMetadataWriterCommitSHA'
+ name: 'gcr.io/cloud-builders/docker'
+ args: ['tag', 'gcr.io/$PROJECT_ID/metadata-writer:$COMMIT_SHA', 'gcr.io/ml-pipeline/metadata-writer:$COMMIT_SHA']
+ waitFor: ['pullMetadataWriter']
+- id: 'tagMetadataWriterForMarketplace'
+ name: 'gcr.io/cloud-builders/docker'
+ args: ['tag', 'gcr.io/$PROJECT_ID/metadata-writer:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines/metadatawriter:$TAG_NAME']
+ waitFor: ['pullMetadataWriter']
+- id: 'tagMetadataWriterForMarketplaceTest'
+ name: 'gcr.io/cloud-builders/docker'
+ args: ['tag', 'gcr.io/$PROJECT_ID/metadata-writer:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines-test/metadatawriter:$TAG_NAME']
+ waitFor: ['pullMetadataWriter']
+- id: 'tagMetadataWriterForMarketplaceMajorMinor'
+ waitFor: ['pullMetadataWriter', 'parseMajorMinorVersion']
+ name: 'gcr.io/cloud-builders/docker'
+ entrypoint: bash
+ args:
+ - -ceux
+ - |
+ docker tag gcr.io/$PROJECT_ID/metadata-writer:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines/metadatawriter:$(cat /workspace/mm.ver)
+ docker tag gcr.io/$PROJECT_ID/metadata-writer:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines-test/metadatawriter:$(cat /workspace/mm.ver)
+ docker push gcr.io/ml-pipeline/google/pipelines/metadatawriter:$(cat /workspace/mm.ver)
+ docker push gcr.io/ml-pipeline/google/pipelines-test/metadatawriter:$(cat /workspace/mm.ver)
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/$PROJECT_ID/metadata-envoy:$COMMIT_SHA']
- id: 'PullMetadataEnvoy'
+ id: 'pullMetadataEnvoy'
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/metadata-envoy:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines/metadataenvoy:$TAG_NAME']
- waitFor: ['PullMetadataEnvoy']
+ id: 'tagMetadataEnvoyForMarketplace'
+ waitFor: ['pullMetadataEnvoy']
+- name: 'gcr.io/cloud-builders/docker'
+ args: ['tag', 'gcr.io/$PROJECT_ID/metadata-envoy:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines-test/metadataenvoy:$TAG_NAME']
+ id: 'tagMetadataEnvoyForMarketplaceTest'
+ waitFor: ['pullMetadataEnvoy']
+- id: 'tagMetadataEnvoyForMarketplaceMajorMinor'
+ waitFor: ['pullMetadataEnvoy', 'parseMajorMinorVersion']
+ name: 'gcr.io/cloud-builders/docker'
+ entrypoint: bash
+ args:
+ - -ceux
+ - |
+ docker tag gcr.io/$PROJECT_ID/metadata-envoy:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines/metadataenvoy:$(cat /workspace/mm.ver)
+ docker tag gcr.io/$PROJECT_ID/metadata-envoy:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines-test/metadataenvoy:$(cat /workspace/mm.ver)
+ docker push gcr.io/ml-pipeline/google/pipelines/metadataenvoy:$(cat /workspace/mm.ver)
+ docker push gcr.io/ml-pipeline/google/pipelines-test/metadataenvoy:$(cat /workspace/mm.ver)
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/ml-pipeline/minio:RELEASE.2019-08-14T20-37-41Z-license-compliance']
- id: 'PullMinio'
+ id: 'pullMinio'
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/ml-pipeline/minio:RELEASE.2019-08-14T20-37-41Z-license-compliance', 'gcr.io/ml-pipeline/google/pipelines/minio:$TAG_NAME']
- waitFor: ['PullMinio']
+ id: 'tagMinioForMarketplace'
+ waitFor: ['pullMinio']
+- name: 'gcr.io/cloud-builders/docker'
+ args: ['tag', 'gcr.io/ml-pipeline/minio:RELEASE.2019-08-14T20-37-41Z-license-compliance', 'gcr.io/ml-pipeline/google/pipelines-test/minio:$TAG_NAME']
+ id: 'tagMinioForMarketplaceTest'
+ waitFor: ['pullMinio']
+- id: 'tagMinioForMarketplaceMajorMinor'
+ waitFor: ['pullMinio', 'parseMajorMinorVersion']
+ name: 'gcr.io/cloud-builders/docker'
+ entrypoint: bash
+ args:
+ - -ceux
+ - |
+ docker tag gcr.io/ml-pipeline/minio:RELEASE.2019-08-14T20-37-41Z-license-compliance gcr.io/ml-pipeline/google/pipelines/minio:$(cat /workspace/mm.ver)
+ docker tag gcr.io/ml-pipeline/minio:RELEASE.2019-08-14T20-37-41Z-license-compliance gcr.io/ml-pipeline/google/pipelines-test/minio:$(cat /workspace/mm.ver)
+ docker push gcr.io/ml-pipeline/google/pipelines/minio:$(cat /workspace/mm.ver)
+ docker push gcr.io/ml-pipeline/google/pipelines-test/minio:$(cat /workspace/mm.ver)
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/ml-pipeline/mysql:5.6']
- id: 'PullMysql'
+ id: 'pullMysql'
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/ml-pipeline/mysql:5.6', 'gcr.io/ml-pipeline/google/pipelines/mysql:$TAG_NAME']
- waitFor: ['PullMysql']
+ id: 'tagMySqlForMarketplace'
+ waitFor: ['pullMysql']
+- name: 'gcr.io/cloud-builders/docker'
+ args: ['tag', 'gcr.io/ml-pipeline/mysql:5.6', 'gcr.io/ml-pipeline/google/pipelines-test/mysql:$TAG_NAME']
+ id: 'tagMySqlForMarketplaceTest'
+ waitFor: ['pullMysql']
+- id: 'tagMySqlForMarketplaceMajorMinor'
+ waitFor: ['pullMysql', 'parseMajorMinorVersion']
+ name: 'gcr.io/cloud-builders/docker'
+ entrypoint: bash
+ args:
+ - -ceux
+ - |
+ docker tag gcr.io/ml-pipeline/mysql:5.6 gcr.io/ml-pipeline/google/pipelines/mysql:$(cat /workspace/mm.ver)
+ docker tag gcr.io/ml-pipeline/mysql:5.6 gcr.io/ml-pipeline/google/pipelines-test/mysql:$(cat /workspace/mm.ver)
+ docker push gcr.io/ml-pipeline/google/pipelines/mysql:$(cat /workspace/mm.ver)
+ docker push gcr.io/ml-pipeline/google/pipelines-test/mysql:$(cat /workspace/mm.ver)
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/cloudsql-docker/gce-proxy:1.14']
- id: 'PullCloudsqlProxy'
+ id: 'pullCloudsqlProxy'
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/cloudsql-docker/gce-proxy:1.14', 'gcr.io/ml-pipeline/google/pipelines/cloudsqlproxy:$TAG_NAME']
- waitFor: ['PullCloudsqlProxy']
+ id: 'tagCloudSqlProxyForMarketplace'
+ waitFor: ['pullCloudsqlProxy']
+- name: 'gcr.io/cloud-builders/docker'
+ args: ['tag', 'gcr.io/cloudsql-docker/gce-proxy:1.14', 'gcr.io/ml-pipeline/google/pipelines-test/cloudsqlproxy:$TAG_NAME']
+ id: 'tagCloudSqlProxyForMarketplaceTest'
+ waitFor: ['pullCloudsqlProxy']
+- id: 'tagCloudSqlProxyForMarketplaceMajorMinor'
+ waitFor: ['pullCloudsqlProxy', 'parseMajorMinorVersion']
+ name: 'gcr.io/cloud-builders/docker'
+ entrypoint: bash
+ args:
+ - -ceux
+ - |
+ docker tag gcr.io/cloudsql-docker/gce-proxy:1.14 gcr.io/ml-pipeline/google/pipelines/cloudsqlproxy:$(cat /workspace/mm.ver)
+ docker tag gcr.io/cloudsql-docker/gce-proxy:1.14 gcr.io/ml-pipeline/google/pipelines-test/cloudsqlproxy:$(cat /workspace/mm.ver)
+ docker push gcr.io/ml-pipeline/google/pipelines/cloudsqlproxy:$(cat /workspace/mm.ver)
+ docker push gcr.io/ml-pipeline/google/pipelines-test/cloudsqlproxy:$(cat /workspace/mm.ver)
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/ml-pipeline/argoexec:v2.3.0-license-compliance']
- id: 'PullArgoExecutor'
+ id: 'pullArgoExecutor'
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/ml-pipeline/argoexec:v2.3.0-license-compliance', 'gcr.io/ml-pipeline/google/pipelines/argoexecutor:$TAG_NAME']
- waitFor: ['PullArgoExecutor']
+ id: 'tagArgoExecutorForMarketplace'
+ waitFor: ['pullArgoExecutor']
+- name: 'gcr.io/cloud-builders/docker'
+ args: ['tag', 'gcr.io/ml-pipeline/argoexec:v2.3.0-license-compliance', 'gcr.io/ml-pipeline/google/pipelines-test/argoexecutor:$TAG_NAME']
+ id: 'tagArgoExecutorForMarketplaceTest'
+ waitFor: ['pullArgoExecutor']
+- id: 'tagArgoExecutorForMarketplaceMajorMinor'
+ waitFor: ['pullArgoExecutor', 'parseMajorMinorVersion']
+ name: 'gcr.io/cloud-builders/docker'
+ entrypoint: bash
+ args:
+ - -ceux
+ - |
+ docker tag gcr.io/ml-pipeline/argoexec:v2.3.0-license-compliance gcr.io/ml-pipeline/google/pipelines/argoexecutor:$(cat /workspace/mm.ver)
+ docker tag gcr.io/ml-pipeline/argoexec:v2.3.0-license-compliance gcr.io/ml-pipeline/google/pipelines-test/argoexecutor:$(cat /workspace/mm.ver)
+ docker push gcr.io/ml-pipeline/google/pipelines/argoexecutor:$(cat /workspace/mm.ver)
+ docker push gcr.io/ml-pipeline/google/pipelines-test/argoexecutor:$(cat /workspace/mm.ver)
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/ml-pipeline/workflow-controller:v2.3.0-license-compliance']
- id: 'PullArgoWorkflowController'
+ id: 'pullArgoWorkflowController'
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/ml-pipeline/workflow-controller:v2.3.0-license-compliance', 'gcr.io/ml-pipeline/google/pipelines/argoworkflowcontroller:$TAG_NAME']
- waitFor: ['PullArgoWorkflowController']
+ id: 'tagArgoWorkflowControllerForMarketplace'
+ waitFor: ['pullArgoWorkflowController']
+- name: 'gcr.io/cloud-builders/docker'
+ args: ['tag', 'gcr.io/ml-pipeline/workflow-controller:v2.3.0-license-compliance', 'gcr.io/ml-pipeline/google/pipelines-test/argoworkflowcontroller:$TAG_NAME']
+ id: 'tagArgoWorkflowControllerForMarketplaceTest'
+ waitFor: ['pullArgoWorkflowController']
+- id: 'tagArgoWorkflowControllerForMarketplaceMajorMinor'
+ waitFor: ['pullArgoWorkflowController', 'parseMajorMinorVersion']
+ name: 'gcr.io/cloud-builders/docker'
+ entrypoint: bash
+ args:
+ - -ceux
+ - |
+ docker tag gcr.io/ml-pipeline/workflow-controller:v2.3.0-license-compliance gcr.io/ml-pipeline/google/pipelines/argoworkflowcontroller:$(cat /workspace/mm.ver)
+ docker tag gcr.io/ml-pipeline/workflow-controller:v2.3.0-license-compliance gcr.io/ml-pipeline/google/pipelines-test/argoworkflowcontroller:$(cat /workspace/mm.ver)
+ docker push gcr.io/ml-pipeline/google/pipelines/argoworkflowcontroller:$(cat /workspace/mm.ver)
+ docker push gcr.io/ml-pipeline/google/pipelines-test/argoworkflowcontroller:$(cat /workspace/mm.ver)
+
+# Marketplace specific deployer and specific primary image
+- id: 'pullMarketplaceDeployer'
+ name: 'gcr.io/cloud-builders/docker'
+ args: ['pull', 'gcr.io/$PROJECT_ID/deployer:$COMMIT_SHA']
+ waitFor: ['-']
+- id: 'tagMarketplaceDeployerVersionNumber'
+ name: 'gcr.io/cloud-builders/docker'
+ args: ['tag', 'gcr.io/$PROJECT_ID/deployer:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines/deployer:$TAG_NAME']
+ waitFor: ['pullMarketplaceDeployer']
+- id: 'tagMarketplaceDeployerVersionNumberTest'
+ name: 'gcr.io/cloud-builders/docker'
+ args: ['tag', 'gcr.io/$PROJECT_ID/deployer:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines-test/deployer:$TAG_NAME']
+ waitFor: ['pullMarketplaceDeployer']
+- id: 'tagMarketplaceDeployerVersionNumberMajorMinor'
+ waitFor: ['pullMarketplaceDeployer', 'parseMajorMinorVersion']
+ name: 'gcr.io/cloud-builders/docker'
+ entrypoint: bash
+ args:
+ - -ceux
+ - |
+ docker tag gcr.io/$PROJECT_ID/deployer:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines/deployer:$(cat /workspace/mm.ver)
+ docker tag gcr.io/$PROJECT_ID/deployer:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines-test/deployer:$(cat /workspace/mm.ver)
+ docker push gcr.io/ml-pipeline/google/pipelines/deployer:$(cat /workspace/mm.ver)
+ docker push gcr.io/ml-pipeline/google/pipelines-test/deployer:$(cat /workspace/mm.ver)
+
+- id: 'tagMarketplacePrimaryVersionNumber'
+ name: 'gcr.io/cloud-builders/docker'
+ args: ['tag', 'gcr.io/$PROJECT_ID/deployer:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines:$TAG_NAME']
+ waitFor: ['pullMarketplaceDeployer']
+- id: 'tagMarketplacePrimaryVersionNumberTest'
+ name: 'gcr.io/cloud-builders/docker'
+ args: ['tag', 'gcr.io/$PROJECT_ID/deployer:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines-test:$TAG_NAME']
+ waitFor: ['pullMarketplaceDeployer']
+- id: 'tagMarketplacePrimaryVersionNumberMajorMinor'
+ waitFor: ['pullMarketplaceDeployer', 'parseMajorMinorVersion']
+ name: 'gcr.io/cloud-builders/docker'
+ entrypoint: bash
+ args:
+ - -ceux
+ - |
+ docker tag gcr.io/$PROJECT_ID/deployer:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines:$(cat /workspace/mm.ver)
+ docker tag gcr.io/$PROJECT_ID/deployer:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines-test:$(cat /workspace/mm.ver)
+ docker push gcr.io/ml-pipeline/google/pipelines:$(cat /workspace/mm.ver)
+ docker push gcr.io/ml-pipeline/google/pipelines-test:$(cat /workspace/mm.ver)
# Copy the Python SDK
- name: 'gcr.io/cloud-builders/gsutil'
@@ -201,6 +506,15 @@ steps:
id: 'copyPythonComponentSDKToLatest'
waitFor: ['copyPythonComponentSDKLocal']
+# Generate and copy the pipeline-lite crd deployment YAML
+- name: 'google/cloud-sdk'
+ args: ['bash', '-c', 'kubectl kustomize /workspace/manifests/kustomize/base/crds > crd.yaml']
+ id: 'generateCrdDeploymentYaml'
+- name: 'gcr.io/cloud-builders/gsutil'
+ args: ['cp', 'crd.yaml', 'gs://ml-pipeline/pipeline-lite/$TAG_NAME/crd.yaml']
+ id: 'copyCrdDeploymentYaml'
+ waitFor: ['generateCrdDeploymentYaml']
+
# Generate and copy the pipeline-lite deployment YAML
- name: 'google/cloud-sdk'
args: ['bash', '-c', 'kubectl kustomize /workspace/manifests/kustomize/env/dev > namespaced-install.yaml']
@@ -226,6 +540,8 @@ images:
- 'gcr.io/ml-pipeline/inverse-proxy-agent:$COMMIT_SHA'
- 'gcr.io/ml-pipeline/visualization-server:$TAG_NAME'
- 'gcr.io/ml-pipeline/visualization-server:$COMMIT_SHA'
+- 'gcr.io/ml-pipeline/metadata-writer:$TAG_NAME'
+- 'gcr.io/ml-pipeline/metadata-writer:$COMMIT_SHA'
- 'gcr.io/ml-pipeline/google/pipelines/frontend:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines/apiserver:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines/scheduledworkflow:$TAG_NAME'
@@ -240,6 +556,25 @@ images:
- 'gcr.io/ml-pipeline/google/pipelines/argoexecutor:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines/argoworkflowcontroller:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines/metadataenvoy:$TAG_NAME'
-
+- 'gcr.io/ml-pipeline/google/pipelines/metadatawriter:$TAG_NAME'
+- 'gcr.io/ml-pipeline/google/pipelines/deployer:$TAG_NAME'
+- 'gcr.io/ml-pipeline/google/pipelines:$TAG_NAME'
+- 'gcr.io/ml-pipeline/google/pipelines-test/frontend:$TAG_NAME'
+- 'gcr.io/ml-pipeline/google/pipelines-test/apiserver:$TAG_NAME'
+- 'gcr.io/ml-pipeline/google/pipelines-test/scheduledworkflow:$TAG_NAME'
+- 'gcr.io/ml-pipeline/google/pipelines-test/viewercrd:$TAG_NAME'
+- 'gcr.io/ml-pipeline/google/pipelines-test/persistenceagent:$TAG_NAME'
+- 'gcr.io/ml-pipeline/google/pipelines-test/proxyagent:$TAG_NAME'
+- 'gcr.io/ml-pipeline/google/pipelines-test/visualizationserver:$TAG_NAME'
+- 'gcr.io/ml-pipeline/google/pipelines-test/metadataserver:$TAG_NAME'
+- 'gcr.io/ml-pipeline/google/pipelines-test/minio:$TAG_NAME'
+- 'gcr.io/ml-pipeline/google/pipelines-test/mysql:$TAG_NAME'
+- 'gcr.io/ml-pipeline/google/pipelines-test/cloudsqlproxy:$TAG_NAME'
+- 'gcr.io/ml-pipeline/google/pipelines-test/argoexecutor:$TAG_NAME'
+- 'gcr.io/ml-pipeline/google/pipelines-test/argoworkflowcontroller:$TAG_NAME'
+- 'gcr.io/ml-pipeline/google/pipelines-test/metadataenvoy:$TAG_NAME'
+- 'gcr.io/ml-pipeline/google/pipelines-test/metadatawriter:$TAG_NAME'
+- 'gcr.io/ml-pipeline/google/pipelines-test/deployer:$TAG_NAME'
+- 'gcr.io/ml-pipeline/google/pipelines-test:$TAG_NAME'
timeout: '1200s'
diff --git a/.travis.yml b/.travis.yml
index 8bd319ec43c..9ed883ac2a4 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -57,7 +57,7 @@ matrix:
install: &0
- python3 -m pip install -r $TRAVIS_BUILD_DIR/sdk/python/requirements.txt
# Additional dependencies
- - pip3 install coverage coveralls
+ - pip3 install coverage==4.5.4 coveralls==1.9.2
# Sample test infra dependencies
- pip3 install minio
- pip3 install junit_xml
diff --git a/CHANGELOG.md b/CHANGELOG.md
index eb0744bb6e7..9c310e95c83 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,160 @@
# Changelog
+## [0.2.0](https://github.com/kubeflow/pipelines/tree/0.2.0) (2020-01-20)
+
+[Full Changelog](https://github.com/kubeflow/pipelines/compare/0.1.40...0.2.0)
+
+**Merged pull requests:**
+
+- fix the metadata writer manifest [\#2878](https://github.com/kubeflow/pipelines/pull/2878) ([rmgogogo](https://github.com/rmgogogo))
+- Coverage reporting for frontend server [\#2877](https://github.com/kubeflow/pipelines/pull/2877) ([Bobgy](https://github.com/Bobgy))
+- enlarge timeout to 45m to avoid flakyness [\#2875](https://github.com/kubeflow/pipelines/pull/2875) ([rmgogogo](https://github.com/rmgogogo))
+- Better Hosted Release - automate things as possible as we can [\#2874](https://github.com/kubeflow/pipelines/pull/2874) ([rmgogogo](https://github.com/rmgogogo))
+- Build deployer for each post-submit to avoid manual work [\#2873](https://github.com/kubeflow/pipelines/pull/2873) ([rmgogogo](https://github.com/rmgogogo))
+- from 0.1.40 to 0.2.0 [\#2872](https://github.com/kubeflow/pipelines/pull/2872) ([rmgogogo](https://github.com/rmgogogo))
+- enable full scope for testing version [\#2871](https://github.com/kubeflow/pipelines/pull/2871) ([rmgogogo](https://github.com/rmgogogo))
+- Samples - Updated the TFX-KFP pipeline [\#2867](https://github.com/kubeflow/pipelines/pull/2867) ([Ark-kun](https://github.com/Ark-kun))
+- Frontend - Metadata - Use custom properties in addition to plain properties [\#2854](https://github.com/kubeflow/pipelines/pull/2854) ([Ark-kun](https://github.com/Ark-kun))
+- fix panic in Viewer cleanup [\#2853](https://github.com/kubeflow/pipelines/pull/2853) ([mattnworb](https://github.com/mattnworb))
+- \[Proxy\] Split domain name [\#2851](https://github.com/kubeflow/pipelines/pull/2851) ([numerology](https://github.com/numerology))
+- \[Test\] Add KFP MKP deployment for e2e tests [\#2850](https://github.com/kubeflow/pipelines/pull/2850) ([rui5i](https://github.com/rui5i))
+- Opt out proxy agent configmap [\#2849](https://github.com/kubeflow/pipelines/pull/2849) ([IronPan](https://github.com/IronPan))
+- \[Doc\] Update link of preload TFX sample [\#2839](https://github.com/kubeflow/pipelines/pull/2839) ([numerology](https://github.com/numerology))
+- Fix wrong example with container\_op [\#2838](https://github.com/kubeflow/pipelines/pull/2838) ([kim-sardine](https://github.com/kim-sardine))
+- \[UI\] Simplify start server script and fix local cluster name endpoint [\#2836](https://github.com/kubeflow/pipelines/pull/2836) ([Bobgy](https://github.com/Bobgy))
+- Fix a typo in Tensorboard.tsx. Add a default TB version. [\#2832](https://github.com/kubeflow/pipelines/pull/2832) ([jingzhang36](https://github.com/jingzhang36))
+- \[UI\] Update node to 12 [\#2830](https://github.com/kubeflow/pipelines/pull/2830) ([Bobgy](https://github.com/Bobgy))
+- visualization server wants kubernetes serivce account too [\#2822](https://github.com/kubeflow/pipelines/pull/2822) ([jingzhang36](https://github.com/jingzhang36))
+- \[testing\] Fix e2e tests timed out, but there's empty test log [\#2810](https://github.com/kubeflow/pipelines/pull/2810) ([Bobgy](https://github.com/Bobgy))
+- \[Sample\] Update the Doc for TFX sample [\#2798](https://github.com/kubeflow/pipelines/pull/2798) ([numerology](https://github.com/numerology))
+- \[UI\] Add editor for json parameters in new run page [\#2747](https://github.com/kubeflow/pipelines/pull/2747) ([drewbutlerbb4](https://github.com/drewbutlerbb4))
+- Bump tensorflow from 1.12.1 to 1.15.0 in /components/kubeflow/dnntrainer/src [\#2743](https://github.com/kubeflow/pipelines/pull/2743) ([dependabot[bot]](https://github.com/apps/dependabot))
+- Update deployer base to 0.1.40 [\#2696](https://github.com/kubeflow/pipelines/pull/2696) ([rmgogogo](https://github.com/rmgogogo))
+- Service - Metadata writer [\#2674](https://github.com/kubeflow/pipelines/pull/2674) ([Ark-kun](https://github.com/Ark-kun))
+- Install application CRD and add pipeline application CR to pipeline standalone [\#2585](https://github.com/kubeflow/pipelines/pull/2585) ([IronPan](https://github.com/IronPan))
+- better handle inverse-proxy configmap for better GC [\#2391](https://github.com/kubeflow/pipelines/pull/2391) ([rmgogogo](https://github.com/rmgogogo))
+
+## [0.1.40](https://github.com/kubeflow/pipelines/tree/0.1.40) (2020-01-09)
+
+[Full Changelog](https://github.com/kubeflow/pipelines/compare/0.1.39...0.1.40)
+
+**Merged pull requests:**
+
+- \[Backend\] Fix report workflow error messages [\#2815](https://github.com/kubeflow/pipelines/pull/2815) ([Bobgy](https://github.com/Bobgy))
+- Pin pyarrow version [\#2813](https://github.com/kubeflow/pipelines/pull/2813) ([numerology](https://github.com/numerology))
+- \[UI\] fix artifact handler query parameter key [\#2809](https://github.com/kubeflow/pipelines/pull/2809) ([Bobgy](https://github.com/Bobgy))
+- Fix node server typing problems [\#2807](https://github.com/kubeflow/pipelines/pull/2807) ([Bobgy](https://github.com/Bobgy))
+- add jiaxiao to the component owners [\#2804](https://github.com/kubeflow/pipelines/pull/2804) ([gaoning777](https://github.com/gaoning777))
+- \[Component\] Fix the component arguments [\#2803](https://github.com/kubeflow/pipelines/pull/2803) ([numerology](https://github.com/numerology))
+- update changelog [\#2801](https://github.com/kubeflow/pipelines/pull/2801) ([gaoning777](https://github.com/gaoning777))
+- \[Manifest/Doc\] Another version bumping [\#2797](https://github.com/kubeflow/pipelines/pull/2797) ([numerology](https://github.com/numerology))
+- Making json output format easier to read [\#2792](https://github.com/kubeflow/pipelines/pull/2792) ([SinaChavoshi](https://github.com/SinaChavoshi))
+- \[Doc\] Change sample/component/sdk documentation to not use `use\_gcp\_secret` [\#2782](https://github.com/kubeflow/pipelines/pull/2782) ([Bobgy](https://github.com/Bobgy))
+- SDK - Bumping the upper version of Kubernetes package [\#2780](https://github.com/kubeflow/pipelines/pull/2780) ([Ark-kun](https://github.com/Ark-kun))
+- Delete namespaced-install.yaml [\#2603](https://github.com/kubeflow/pipelines/pull/2603) ([IronPan](https://github.com/IronPan))
+
+## [0.1.39](https://github.com/kubeflow/pipelines/tree/0.1.39) (2020-01-06)
+
+[Full Changelog](https://github.com/kubeflow/pipelines/compare/0.1.38...0.1.39)
+
+**Merged pull requests:**
+
+- release 0.1.39 [\#2799](https://github.com/kubeflow/pipelines/pull/2799) ([gaoning777](https://github.com/gaoning777))
+- \[Testing\] Reduce distracting command log [\#2796](https://github.com/kubeflow/pipelines/pull/2796) ([Bobgy](https://github.com/Bobgy))
+- SKL - DSL - Stabilized the PipelineVolume names [\#2794](https://github.com/kubeflow/pipelines/pull/2794) ([Ark-kun](https://github.com/Ark-kun))
+- \[Testing\] Save pod desc text and stackdriver link as artifacts [\#2791](https://github.com/kubeflow/pipelines/pull/2791) ([Bobgy](https://github.com/Bobgy))
+- \[UI\] Show corresponding GKE cluster name [\#2789](https://github.com/kubeflow/pipelines/pull/2789) ([Bobgy](https://github.com/Bobgy))
+- \[Clean-up\] Remove accidentally merged yaml file [\#2788](https://github.com/kubeflow/pipelines/pull/2788) ([numerology](https://github.com/numerology))
+- \[Sample tests\] Upgrade GCP sdk to 272 in sample test [\#2786](https://github.com/kubeflow/pipelines/pull/2786) ([numerology](https://github.com/numerology))
+- \[Manifest\] Bump standalone deployment version in doc [\#2785](https://github.com/kubeflow/pipelines/pull/2785) ([numerology](https://github.com/numerology))
+- \[Components\] Roll forward of \#2697 for real. [\#2779](https://github.com/kubeflow/pipelines/pull/2779) ([numerology](https://github.com/numerology))
+- \[Manifest\] fix metadata configmap [\#2778](https://github.com/kubeflow/pipelines/pull/2778) ([numerology](https://github.com/numerology))
+- Use new K8s version to improve workload identity stability [\#2777](https://github.com/kubeflow/pipelines/pull/2777) ([Bobgy](https://github.com/Bobgy))
+- Fix vscode README of prettier integration [\#2776](https://github.com/kubeflow/pipelines/pull/2776) ([Bobgy](https://github.com/Bobgy))
+- \[Sample\] Clean up accidentally committed file [\#2775](https://github.com/kubeflow/pipelines/pull/2775) ([Bobgy](https://github.com/Bobgy))
+- \[UI\] Fix UI crash when invalid pipeline uploaded [\#2774](https://github.com/kubeflow/pipelines/pull/2774) ([Bobgy](https://github.com/Bobgy))
+- \[Standalone\] Fix GCP cloudsql manifest [\#2772](https://github.com/kubeflow/pipelines/pull/2772) ([Bobgy](https://github.com/Bobgy))
+- Update version in generate\_api.sh [\#2770](https://github.com/kubeflow/pipelines/pull/2770) ([jingzhang36](https://github.com/jingzhang36))
+- \[UI\] Succeed snackbar shouldn't show when there is an error [\#2766](https://github.com/kubeflow/pipelines/pull/2766) ([Bobgy](https://github.com/Bobgy))
+- Adjust the namespace for Delete/Terminate/Retry run APIs [\#2765](https://github.com/kubeflow/pipelines/pull/2765) ([gaoning777](https://github.com/gaoning777))
+- update release changelog [\#2763](https://github.com/kubeflow/pipelines/pull/2763) ([gaoning777](https://github.com/gaoning777))
+- \[Component\] Rollforward of PR\#2697 [\#2760](https://github.com/kubeflow/pipelines/pull/2760) ([numerology](https://github.com/numerology))
+- Add min and max lines for displaying json editor [\#2752](https://github.com/kubeflow/pipelines/pull/2752) ([fenglixa](https://github.com/fenglixa))
+- \[Frontend\] unit tests for node server [\#2745](https://github.com/kubeflow/pipelines/pull/2745) ([eterna2](https://github.com/eterna2))
+- Add step by step tutorial using mnist as use case [\#2716](https://github.com/kubeflow/pipelines/pull/2716) ([luotigerlsx](https://github.com/luotigerlsx))
+- \[SDK/compiler\] Sanitize op name for PipelineParam [\#2711](https://github.com/kubeflow/pipelines/pull/2711) ([numerology](https://github.com/numerology))
+- Support choosing tensorboard version from UI [\#2690](https://github.com/kubeflow/pipelines/pull/2690) ([dldaisy](https://github.com/dldaisy))
+- SDK - Unified the function signature parsing implementations [\#2689](https://github.com/kubeflow/pipelines/pull/2689) ([Ark-kun](https://github.com/Ark-kun))
+- support extra parameters for mysql connection [\#2668](https://github.com/kubeflow/pipelines/pull/2668) ([xaniasd](https://github.com/xaniasd))
+- End to end mnist pipeline use case [\#2628](https://github.com/kubeflow/pipelines/pull/2628) ([hougangliu](https://github.com/hougangliu))
+
+## [0.1.38](https://github.com/kubeflow/pipelines/tree/0.1.38) (2019-12-19)
+
+[Full Changelog](https://github.com/kubeflow/pipelines/compare/0.1.37...0.1.38)
+
+**Merged pull requests:**
+
+- update manifest [\#2762](https://github.com/kubeflow/pipelines/pull/2762) ([gaoning777](https://github.com/gaoning777))
+- Release a8fcec5f702fc2528c87ed6fd698b9cfca8b509e [\#2761](https://github.com/kubeflow/pipelines/pull/2761) ([gaoning777](https://github.com/gaoning777))
+- Revert "Components - De-hardcoded the UI metadata file path in GCP co… [\#2759](https://github.com/kubeflow/pipelines/pull/2759) ([numerology](https://github.com/numerology))
+- Added alpha notice to READMEs for KFP Marketplace [\#2749](https://github.com/kubeflow/pipelines/pull/2749) ([sarahmaddox](https://github.com/sarahmaddox))
+- Move go license tools [\#2748](https://github.com/kubeflow/pipelines/pull/2748) ([Bobgy](https://github.com/Bobgy))
+- \[Test\] Pin the versions of coverage and coveralls [\#2740](https://github.com/kubeflow/pipelines/pull/2740) ([numerology](https://github.com/numerology))
+- SDK/DSL: Fix PipelineVolume name length [\#2739](https://github.com/kubeflow/pipelines/pull/2739) ([elikatsis](https://github.com/elikatsis))
+- Fix obsolete image cache when the same PR commit is tested with a new master [\#2738](https://github.com/kubeflow/pipelines/pull/2738) ([Bobgy](https://github.com/Bobgy))
+- Build - Fix building TF images [\#2736](https://github.com/kubeflow/pipelines/pull/2736) ([Ark-kun](https://github.com/Ark-kun))
+- Authorize other run api [\#2735](https://github.com/kubeflow/pipelines/pull/2735) ([gaoning777](https://github.com/gaoning777))
+- SDK - Bump version to 0.1.38 [\#2734](https://github.com/kubeflow/pipelines/pull/2734) ([Ark-kun](https://github.com/Ark-kun))
+- temporarily disable the jobservice in the multi-user mode [\#2733](https://github.com/kubeflow/pipelines/pull/2733) ([gaoning777](https://github.com/gaoning777))
+- Suggest run name based on the pipeline version used to create run [\#2731](https://github.com/kubeflow/pipelines/pull/2731) ([jingzhang36](https://github.com/jingzhang36))
+- \[Sample\] Add a notebook sample under parameterized\_tfx\_oss [\#2729](https://github.com/kubeflow/pipelines/pull/2729) ([numerology](https://github.com/numerology))
+- \[Doc\] Fix link validation complaint. [\#2727](https://github.com/kubeflow/pipelines/pull/2727) ([numerology](https://github.com/numerology))
+- \[UI\] Fix npm vulnerabilities [\#2724](https://github.com/kubeflow/pipelines/pull/2724) ([Bobgy](https://github.com/Bobgy))
+- Metadata: Adding metadata-grpc config map [\#2723](https://github.com/kubeflow/pipelines/pull/2723) ([dushyanthsc](https://github.com/dushyanthsc))
+- \[Doc\] Fix links in preload sample [\#2722](https://github.com/kubeflow/pipelines/pull/2722) ([numerology](https://github.com/numerology))
+- Format other frontend code using prettier [\#2717](https://github.com/kubeflow/pipelines/pull/2717) ([Bobgy](https://github.com/Bobgy))
+- update kfam service host env variable [\#2715](https://github.com/kubeflow/pipelines/pull/2715) ([gaoning777](https://github.com/gaoning777))
+- Fix combined unique keys [\#2712](https://github.com/kubeflow/pipelines/pull/2712) ([jingzhang36](https://github.com/jingzhang36))
+- Add samples to manage Azure Databricks in Kubeflow Pipelines [\#2709](https://github.com/kubeflow/pipelines/pull/2709) ([magencio](https://github.com/magencio))
+- Improve golang license CLI tools README [\#2707](https://github.com/kubeflow/pipelines/pull/2707) ([Bobgy](https://github.com/Bobgy))
+- Improve test cloudbuild yaml readability [\#2703](https://github.com/kubeflow/pipelines/pull/2703) ([Bobgy](https://github.com/Bobgy))
+- Components - De-hardcoded the UI metadata file path in GCP components [\#2697](https://github.com/kubeflow/pipelines/pull/2697) ([Ark-kun](https://github.com/Ark-kun))
+- \[Sample test\] Add parameterized\_tfx\_oss to 'normal' sample test corpus [\#2695](https://github.com/kubeflow/pipelines/pull/2695) ([numerology](https://github.com/numerology))
+- Separate run resources in namespaces [\#2694](https://github.com/kubeflow/pipelines/pull/2694) ([gaoning777](https://github.com/gaoning777))
+- SDK - Fixed the capitalization in \_python\_function\_name\_to\_component\_name [\#2688](https://github.com/kubeflow/pipelines/pull/2688) ([Ark-kun](https://github.com/Ark-kun))
+- 2682: Support a custom kube config location in K8sJobHelper [\#2683](https://github.com/kubeflow/pipelines/pull/2683) ([pahask8](https://github.com/pahask8))
+- TFjob v1 launcher [\#2677](https://github.com/kubeflow/pipelines/pull/2677) ([hougangliu](https://github.com/hougangliu))
+- \[UI\] Pass namespace to APIs [\#2676](https://github.com/kubeflow/pipelines/pull/2676) ([Bobgy](https://github.com/Bobgy))
+- Add pipeline version support to frontend [\#2667](https://github.com/kubeflow/pipelines/pull/2667) ([jingzhang36](https://github.com/jingzhang36))
+- Add ns in sdk [\#2665](https://github.com/kubeflow/pipelines/pull/2665) ([gaoning777](https://github.com/gaoning777))
+- Authorize create run requests [\#2663](https://github.com/kubeflow/pipelines/pull/2663) ([gaoning777](https://github.com/gaoning777))
+- \[manifest\] Added manifest for deploying on aws using s3 [\#2633](https://github.com/kubeflow/pipelines/pull/2633) ([eterna2](https://github.com/eterna2))
+- Migrate standalone deployment to workload identity on GCP [\#2619](https://github.com/kubeflow/pipelines/pull/2619) ([Bobgy](https://github.com/Bobgy))
+- SDK/Compiler - Preventing pipeline entrypoint template name from clashing with other template names [\#1555](https://github.com/kubeflow/pipelines/pull/1555) ([Ark-kun](https://github.com/Ark-kun))
+
+## [0.1.37](https://github.com/kubeflow/pipelines/tree/0.1.37) (2019-12-05)
+
+[Full Changelog](https://github.com/kubeflow/pipelines/compare/0.1.36...0.1.37)
+
+**Merged pull requests:**
+
+- update server and sdk version to 0.1.37 [\#2701](https://github.com/kubeflow/pipelines/pull/2701) ([hongye-sun](https://github.com/hongye-sun))
+- release-b63472062bd80737c7f39e0eda901db0fe23a5e0 [\#2700](https://github.com/kubeflow/pipelines/pull/2700) ([hongye-sun](https://github.com/hongye-sun))
+- Store ns in db [\#2698](https://github.com/kubeflow/pipelines/pull/2698) ([gaoning777](https://github.com/gaoning777))
+- fix visualization-server doc link [\#2681](https://github.com/kubeflow/pipelines/pull/2681) ([rmgogogo](https://github.com/rmgogogo))
+- Add readable step id for test cloudbuild steps [\#2673](https://github.com/kubeflow/pipelines/pull/2673) ([Bobgy](https://github.com/Bobgy))
+- Configure jest to hide noisy message about no coverage data on .d.ts files [\#2672](https://github.com/kubeflow/pipelines/pull/2672) ([Bobgy](https://github.com/Bobgy))
+- Components - TFX [\#2671](https://github.com/kubeflow/pipelines/pull/2671) ([Ark-kun](https://github.com/Ark-kun))
+- \[Release\] update changelog [\#2666](https://github.com/kubeflow/pipelines/pull/2666) ([numerology](https://github.com/numerology))
+- Components - Filesystem [\#2659](https://github.com/kubeflow/pipelines/pull/2659) ([Ark-kun](https://github.com/Ark-kun))
+- Components - Git clone [\#2658](https://github.com/kubeflow/pipelines/pull/2658) ([Ark-kun](https://github.com/Ark-kun))
+- \[UI\] Get kubeflow namespace from kfp UI [\#2655](https://github.com/kubeflow/pipelines/pull/2655) ([Bobgy](https://github.com/Bobgy))
+- add namespace to the resourcereference [\#2651](https://github.com/kubeflow/pipelines/pull/2651) ([gaoning777](https://github.com/gaoning777))
+- Remove travis CI frontend tests [\#2647](https://github.com/kubeflow/pipelines/pull/2647) ([Bobgy](https://github.com/Bobgy))
+- Frontend - Add support for Minio artifact URIs [\#2645](https://github.com/kubeflow/pipelines/pull/2645) ([Ark-kun](https://github.com/Ark-kun))
+- SDK - Python support for arbitrary secret, similar to ".use\_gcp\_secret\('user-gcp-sa'\)" [\#2639](https://github.com/kubeflow/pipelines/pull/2639) ([NikeNano](https://github.com/NikeNano))
+- Open version api in BE proto and auto-generate BE/FE api methods [\#2620](https://github.com/kubeflow/pipelines/pull/2620) ([jingzhang36](https://github.com/jingzhang36))
+
## [0.1.36](https://github.com/kubeflow/pipelines/tree/0.1.36) (2019-11-26)
[Full Changelog](https://github.com/kubeflow/pipelines/compare/0.1.35...0.1.36)
@@ -32,6 +187,2007 @@
- Katib experiment launcher [\#2577](https://github.com/kubeflow/pipelines/pull/2577) ([hougangliu](https://github.com/hougangliu))
- Add a new field "TensorflowImage" to KFP viewer CRD file template. [\#2544](https://github.com/kubeflow/pipelines/pull/2544) ([jingzhang36](https://github.com/jingzhang36))
+## [0.1.35](https://github.com/kubeflow/pipelines/tree/0.1.35) (2019-11-13)
+
+[Full Changelog](https://github.com/kubeflow/pipelines/compare/0.1.34...0.1.35)
+
+**Merged pull requests:**
+
+- Release 9670cc1aadfbbed9c52b84ea859ea97aa81213ad [\#2599](https://github.com/kubeflow/pipelines/pull/2599) ([Bobgy](https://github.com/Bobgy))
+- Bump version to 0.1.35 [\#2598](https://github.com/kubeflow/pipelines/pull/2598) ([Bobgy](https://github.com/Bobgy))
+- \[Sample\] Lint and clean up parameterized TFX sample [\#2594](https://github.com/kubeflow/pipelines/pull/2594) ([numerology](https://github.com/numerology))
+- Fix transformer sample message [\#2592](https://github.com/kubeflow/pipelines/pull/2592) ([hougangliu](https://github.com/hougangliu))
+- \[UI\] Simplify firefox fix in pull/2580 [\#2587](https://github.com/kubeflow/pipelines/pull/2587) ([Bobgy](https://github.com/Bobgy))
+- 0.1.34 changelog [\#2586](https://github.com/kubeflow/pipelines/pull/2586) ([jingzhang36](https://github.com/jingzhang36))
+- Fix missing run and pipeline id when buttons are clicked before content load [\#2584](https://github.com/kubeflow/pipelines/pull/2584) ([drewbutlerbb4](https://github.com/drewbutlerbb4))
+- Add a minimum sample to print hello world [\#2583](https://github.com/kubeflow/pipelines/pull/2583) ([IronPan](https://github.com/IronPan))
+- \[UI\] Fix firefox compatibility [\#2580](https://github.com/kubeflow/pipelines/pull/2580) ([Bobgy](https://github.com/Bobgy))
+- \[Doc\] Minor fix to MKP secret setup guide [\#2576](https://github.com/kubeflow/pipelines/pull/2576) ([numerology](https://github.com/numerology))
+- \[SDK\] Fix withItem loop [\#2572](https://github.com/kubeflow/pipelines/pull/2572) ([numerology](https://github.com/numerology))
+- fix slow query when get runs [\#2559](https://github.com/kubeflow/pipelines/pull/2559) ([wstian](https://github.com/wstian))
+- MDez patch 1 [\#2550](https://github.com/kubeflow/pipelines/pull/2550) ([OfficePop](https://github.com/OfficePop))
+- Components - Google Cloud Storage [\#2532](https://github.com/kubeflow/pipelines/pull/2532) ([Ark-kun](https://github.com/Ark-kun))
+- SDK - Components - Fixed YAML formatting for some components [\#2529](https://github.com/kubeflow/pipelines/pull/2529) ([Ark-kun](https://github.com/Ark-kun))
+- Add arm64 support for ml-pipeline [\#2507](https://github.com/kubeflow/pipelines/pull/2507) ([MrXinWang](https://github.com/MrXinWang))
+- SDK - Containers - Renamed constructor parameter in the private ContainerBuilder class [\#2261](https://github.com/kubeflow/pipelines/pull/2261) ([Ark-kun](https://github.com/Ark-kun))
+- SDK - Containers - Getting rid of \_get\_default\_image\_builder [\#2255](https://github.com/kubeflow/pipelines/pull/2255) ([Ark-kun](https://github.com/Ark-kun))
+- \[pipeline-ui\] Retrieve pod logs from argo archive [\#2081](https://github.com/kubeflow/pipelines/pull/2081) ([eterna2](https://github.com/eterna2))
+- SDK/Components - Added Json Schema spec for the component format [\#669](https://github.com/kubeflow/pipelines/pull/669) ([Ark-kun](https://github.com/Ark-kun))
+
+## [0.1.34](https://github.com/kubeflow/pipelines/tree/0.1.34) (2019-11-07)
+
+[Full Changelog](https://github.com/kubeflow/pipelines/compare/0.1.33...0.1.34)
+
+**Merged pull requests:**
+
+- Fix inverse-proxy test cloudbuild yaml [\#2568](https://github.com/kubeflow/pipelines/pull/2568) ([Bobgy](https://github.com/Bobgy))
+- Increase kfp version in kustomize and sdk init [\#2567](https://github.com/kubeflow/pipelines/pull/2567) ([jingzhang36](https://github.com/jingzhang36))
+- Fix inverse proxy agent in e2e tests [\#2566](https://github.com/kubeflow/pipelines/pull/2566) ([Bobgy](https://github.com/Bobgy))
+- Increase kfp version in component sdk [\#2565](https://github.com/kubeflow/pipelines/pull/2565) ([jingzhang36](https://github.com/jingzhang36))
+- Fixes test flakiness caused by "unsupported operand type\(s\) for -=: 'Retry' and 'int'" [\#2563](https://github.com/kubeflow/pipelines/pull/2563) ([Bobgy](https://github.com/Bobgy))
+- Fix postsubmit test which times out [\#2557](https://github.com/kubeflow/pipelines/pull/2557) ([Bobgy](https://github.com/Bobgy))
+- \[Testing\] Describe unhealthy pods when tests clean up. [\#2554](https://github.com/kubeflow/pipelines/pull/2554) ([Bobgy](https://github.com/Bobgy))
+- Release d6d9d8da19f7110fff3a5ba713710402edaeee65 [\#2553](https://github.com/kubeflow/pipelines/pull/2553) ([jingzhang36](https://github.com/jingzhang36))
+- Fix more notebook license issues [\#2552](https://github.com/kubeflow/pipelines/pull/2552) ([Bobgy](https://github.com/Bobgy))
+- Fix wrong kfserving url [\#2551](https://github.com/kubeflow/pipelines/pull/2551) ([hougangliu](https://github.com/hougangliu))
+- \[Misc\] Add releasing note [\#2549](https://github.com/kubeflow/pipelines/pull/2549) ([numerology](https://github.com/numerology))
+- \[Frontend\] Fix run id not populated in NewRun page when clicked too fast bug [\#2547](https://github.com/kubeflow/pipelines/pull/2547) ([Bobgy](https://github.com/Bobgy))
+- Fix broken license link for jupyter notebook [\#2546](https://github.com/kubeflow/pipelines/pull/2546) ([Bobgy](https://github.com/Bobgy))
+- Adding diagnose\_me to CLI tool [\#2543](https://github.com/kubeflow/pipelines/pull/2543) ([SinaChavoshi](https://github.com/SinaChavoshi))
+- Update CHANGELOG.md [\#2541](https://github.com/kubeflow/pipelines/pull/2541) ([IronPan](https://github.com/IronPan))
+- Frontend format check should be before linting. [\#2525](https://github.com/kubeflow/pipelines/pull/2525) ([Bobgy](https://github.com/Bobgy))
+- SDK - Client - Fixed client on Windows [\#2524](https://github.com/kubeflow/pipelines/pull/2524) ([Ark-kun](https://github.com/Ark-kun))
+- Set DB GroupConcatMaxLen config to default of 4MB [\#2497](https://github.com/kubeflow/pipelines/pull/2497) ([krajasek](https://github.com/krajasek))
+- SDK - Client - Added API models to the generated API [\#2418](https://github.com/kubeflow/pipelines/pull/2418) ([Ark-kun](https://github.com/Ark-kun))
+- SDK - DSL - Deprecated ArtifactLocation [\#2326](https://github.com/kubeflow/pipelines/pull/2326) ([Ark-kun](https://github.com/Ark-kun))
+- Add external links that make some content easier to discover [\#2068](https://github.com/kubeflow/pipelines/pull/2068) ([Bobgy](https://github.com/Bobgy))
+
+## [0.1.33](https://github.com/kubeflow/pipelines/tree/0.1.33) (2019-11-02)
+
+[Full Changelog](https://github.com/kubeflow/pipelines/compare/0.1.32...0.1.33)
+
+**Merged pull requests:**
+
+- Update kustomization.yaml [\#2530](https://github.com/kubeflow/pipelines/pull/2530) ([IronPan](https://github.com/IronPan))
+- Update setup.py [\#2528](https://github.com/kubeflow/pipelines/pull/2528) ([IronPan](https://github.com/IronPan))
+- Update \_\_init\_\_.py [\#2527](https://github.com/kubeflow/pipelines/pull/2527) ([IronPan](https://github.com/IronPan))
+- \[Sample\] Align preload TFX sample with TFX head [\#2526](https://github.com/kubeflow/pipelines/pull/2526) ([numerology](https://github.com/numerology))
+- SDK - Compiler - Fixed failures on Jinja placeholders [\#2522](https://github.com/kubeflow/pipelines/pull/2522) ([Ark-kun](https://github.com/Ark-kun))
+- \[Test\] Fix TFX related Travis tests [\#2521](https://github.com/kubeflow/pipelines/pull/2521) ([numerology](https://github.com/numerology))
+- update location for logo [\#2520](https://github.com/kubeflow/pipelines/pull/2520) ([IronPan](https://github.com/IronPan))
+- \[MKP\] Reduce the logo image size. [\#2519](https://github.com/kubeflow/pipelines/pull/2519) ([numerology](https://github.com/numerology))
+- Frontend - Added support for https artifact links [\#2517](https://github.com/kubeflow/pipelines/pull/2517) ([Ark-kun](https://github.com/Ark-kun))
+- Pin tensorboard version to 1.13.2 [\#2513](https://github.com/kubeflow/pipelines/pull/2513) ([IronPan](https://github.com/IronPan))
+- clean up viewer crd spec [\#2511](https://github.com/kubeflow/pipelines/pull/2511) ([IronPan](https://github.com/IronPan))
+- remove unnecessary namespace in UI rolebinding spec [\#2510](https://github.com/kubeflow/pipelines/pull/2510) ([IronPan](https://github.com/IronPan))
+- \[UI\] Fix metadata tabs loading state [\#2508](https://github.com/kubeflow/pipelines/pull/2508) ([Bobgy](https://github.com/Bobgy))
+- Regenerate api since https://github.com/kubeflow/pipelines/pull/2445 changed api proto [\#2506](https://github.com/kubeflow/pipelines/pull/2506) ([jingzhang36](https://github.com/jingzhang36))
+- \[Sample\] Replace deprecated KubeflowRunner in TFX sample [\#2499](https://github.com/kubeflow/pipelines/pull/2499) ([numerology](https://github.com/numerology))
+- Samples - Renamed component build to container build [\#2496](https://github.com/kubeflow/pipelines/pull/2496) ([Ark-kun](https://github.com/Ark-kun))
+- \[Frontend\] Enlarge choose pipeline dialog to show more description [\#2494](https://github.com/kubeflow/pipelines/pull/2494) ([Bobgy](https://github.com/Bobgy))
+- update inverse proxy custom permission setting and role setting [\#2493](https://github.com/kubeflow/pipelines/pull/2493) ([rmgogogo](https://github.com/rmgogogo))
+- Fix pipeline description 255 characters length limit [\#2492](https://github.com/kubeflow/pipelines/pull/2492) ([Bobgy](https://github.com/Bobgy))
+- \[MKP\] Fix metadata DB configmap [\#2491](https://github.com/kubeflow/pipelines/pull/2491) ([numerology](https://github.com/numerology))
+- \[Sample\] Add the run ID place holder to TFX sample, fix metadb config in preload sample as well [\#2487](https://github.com/kubeflow/pipelines/pull/2487) ([numerology](https://github.com/numerology))
+- Fix inverse proxy matching regex [\#2486](https://github.com/kubeflow/pipelines/pull/2486) ([IronPan](https://github.com/IronPan))
+- update base image to fix the GKE GC issue for marketplace deployment [\#2484](https://github.com/kubeflow/pipelines/pull/2484) ([IronPan](https://github.com/IronPan))
+- better doc for MKP-KFP deployment [\#2481](https://github.com/kubeflow/pipelines/pull/2481) ([rmgogogo](https://github.com/rmgogogo))
+- \[Frontend\] Fix log viewer cannot scroll horizontally + other minor issues [\#2480](https://github.com/kubeflow/pipelines/pull/2480) ([Bobgy](https://github.com/Bobgy))
+- \[Sample\] Update pre-load TFX::OSS sample [\#2476](https://github.com/kubeflow/pipelines/pull/2476) ([numerology](https://github.com/numerology))
+- SDK - Python components - Fixed bug when mixing file outputs with return value outputs [\#2473](https://github.com/kubeflow/pipelines/pull/2473) ([Ark-kun](https://github.com/Ark-kun))
+- Update samples/core/ai\_platform pipeline to follow data dependency [\#2472](https://github.com/kubeflow/pipelines/pull/2472) ([ucdmkt](https://github.com/ucdmkt))
+- Add option to hide tensorboard artifact [\#2466](https://github.com/kubeflow/pipelines/pull/2466) ([hlu09](https://github.com/hlu09))
+- Release notes for 0.1.32 [\#2465](https://github.com/kubeflow/pipelines/pull/2465) ([hongye-sun](https://github.com/hongye-sun))
+- \[Frontend\] Update CONTRIBUTING.md with frontend code style info [\#2464](https://github.com/kubeflow/pipelines/pull/2464) ([Bobgy](https://github.com/Bobgy))
+- \[Frontend\] Check format in travis CI [\#2463](https://github.com/kubeflow/pipelines/pull/2463) ([Bobgy](https://github.com/Bobgy))
+- Format all source files under frontend/src using prettier [\#2462](https://github.com/kubeflow/pipelines/pull/2462) ([Bobgy](https://github.com/Bobgy))
+- \[Frontend\] UI shows specified task display name in PipelineDetail page [\#2459](https://github.com/kubeflow/pipelines/pull/2459) ([Bobgy](https://github.com/Bobgy))
+- clusterrole for pipeline-runner with seldondeployments [\#2458](https://github.com/kubeflow/pipelines/pull/2458) ([MingfeiPan](https://github.com/MingfeiPan))
+- Use string literals for reference resource relation and for reference resource type in frontend [\#2453](https://github.com/kubeflow/pipelines/pull/2453) ([jingzhang36](https://github.com/jingzhang36))
+- SDK - Components - Added type to graph input references [\#2451](https://github.com/kubeflow/pipelines/pull/2451) ([Ark-kun](https://github.com/Ark-kun))
+- Fix documentation for filter.proto [\#2447](https://github.com/kubeflow/pipelines/pull/2447) ([neuromage](https://github.com/neuromage))
+- \[Request for comments\] Add config for yapf and pylintrc [\#2446](https://github.com/kubeflow/pipelines/pull/2446) ([numerology](https://github.com/numerology))
+- Runs and jobs can be created from pipeline version [\#2445](https://github.com/kubeflow/pipelines/pull/2445) ([jingzhang36](https://github.com/jingzhang36))
+- Fix CustomTable.tsx layout problems [\#2444](https://github.com/kubeflow/pipelines/pull/2444) ([Bobgy](https://github.com/Bobgy))
+- Add --bind\_all option for tensorboard [\#2441](https://github.com/kubeflow/pipelines/pull/2441) ([daikeshi](https://github.com/daikeshi))
+- \[Test\] Fix post-submit test [\#2439](https://github.com/kubeflow/pipelines/pull/2439) ([numerology](https://github.com/numerology))
+- SDK - Client - Makes the create\_run output nicer [\#2438](https://github.com/kubeflow/pipelines/pull/2438) ([Ark-kun](https://github.com/Ark-kun))
+- SDK - Setup - Added cloudpickle to requirements [\#2437](https://github.com/kubeflow/pipelines/pull/2437) ([Ark-kun](https://github.com/Ark-kun))
+- Add owner files for marketplace deployment [\#2436](https://github.com/kubeflow/pipelines/pull/2436) ([IronPan](https://github.com/IronPan))
+- Update backend OWNERS [\#2435](https://github.com/kubeflow/pipelines/pull/2435) ([IronPan](https://github.com/IronPan))
+- Diagnose me dev env [\#2425](https://github.com/kubeflow/pipelines/pull/2425) ([SinaChavoshi](https://github.com/SinaChavoshi))
+- \[Doc\] Update permission requirement in README [\#2422](https://github.com/kubeflow/pipelines/pull/2422) ([numerology](https://github.com/numerology))
+- Adding the core libraries for diagnose\_me tool. [\#2417](https://github.com/kubeflow/pipelines/pull/2417) ([SinaChavoshi](https://github.com/SinaChavoshi))
+- \[Doc\] Minor doc update [\#2394](https://github.com/kubeflow/pipelines/pull/2394) ([numerology](https://github.com/numerology))
+- update link so that user can easily jump to CAIP after deployment [\#2377](https://github.com/kubeflow/pipelines/pull/2377) ([rmgogogo](https://github.com/rmgogogo))
+- \[Frontend\] Make links in pipeline description clickable [\#2376](https://github.com/kubeflow/pipelines/pull/2376) ([Bobgy](https://github.com/Bobgy))
+- \[Doc\] Fix a typo in MKP guide [\#2342](https://github.com/kubeflow/pipelines/pull/2342) ([numerology](https://github.com/numerology))
+- SDK/CLI: Implement kfp pipeline group [\#2340](https://github.com/kubeflow/pipelines/pull/2340) ([elikatsis](https://github.com/elikatsis))
+- SDK - Tests - Fixed most of the test warnings [\#2336](https://github.com/kubeflow/pipelines/pull/2336) ([Ark-kun](https://github.com/Ark-kun))
+- SDK - Refactoring - Split the K8sHelper class [\#2333](https://github.com/kubeflow/pipelines/pull/2333) ([Ark-kun](https://github.com/Ark-kun))
+- tech writer edits [\#2332](https://github.com/kubeflow/pipelines/pull/2332) ([jay-saldanha](https://github.com/jay-saldanha))
+- Tests - Use base image for frontend tests [\#190](https://github.com/kubeflow/pipelines/pull/190) ([Ark-kun](https://github.com/Ark-kun))
+
+## [0.1.32](https://github.com/kubeflow/pipelines/tree/0.1.32) (2019-10-18)
+
+[Full Changelog](https://github.com/kubeflow/pipelines/compare/0.1.31...0.1.32)
+
+**Merged pull requests:**
+
+- Update sdk release version [\#2434](https://github.com/kubeflow/pipelines/pull/2434) ([hongye-sun](https://github.com/hongye-sun))
+- Release e9b96de317989a9673ef88d88fb9dab9dac3005f [\#2433](https://github.com/kubeflow/pipelines/pull/2433) ([hongye-sun](https://github.com/hongye-sun))
+- \[Frontend\] Configure tslint for better DX [\#2431](https://github.com/kubeflow/pipelines/pull/2431) ([Bobgy](https://github.com/Bobgy))
+- \[Frontend\] Upgrade typescript to 3.6 [\#2428](https://github.com/kubeflow/pipelines/pull/2428) ([Bobgy](https://github.com/Bobgy))
+- SDK - DSL - Make is\_exit\_handler unnecessary in ContainerOp [\#2411](https://github.com/kubeflow/pipelines/pull/2411) ([Ark-kun](https://github.com/Ark-kun))
+- \[Frontend\] Prettier config to be consistent with existing code style [\#2409](https://github.com/kubeflow/pipelines/pull/2409) ([Bobgy](https://github.com/Bobgy))
+- tech writer edits [\#2403](https://github.com/kubeflow/pipelines/pull/2403) ([jay-saldanha](https://github.com/jay-saldanha))
+- \[Test/Sample test\] Fix model version in AI platform sample [\#2400](https://github.com/kubeflow/pipelines/pull/2400) ([numerology](https://github.com/numerology))
+- Update Watson ML default framework version [\#2398](https://github.com/kubeflow/pipelines/pull/2398) ([Tomcli](https://github.com/Tomcli))
+- Add Tomcli as kfs component reviewer [\#2396](https://github.com/kubeflow/pipelines/pull/2396) ([Tomcli](https://github.com/Tomcli))
+- License crawler for third party golang libraries [\#2393](https://github.com/kubeflow/pipelines/pull/2393) ([Bobgy](https://github.com/Bobgy))
+- quick custom spec fix [\#2390](https://github.com/kubeflow/pipelines/pull/2390) ([animeshsingh](https://github.com/animeshsingh))
+- add test config comment [\#2389](https://github.com/kubeflow/pipelines/pull/2389) ([gaoning777](https://github.com/gaoning777))
+- Remove jingzhang36 and rmgogogo from frontend reviewer list [\#2388](https://github.com/kubeflow/pipelines/pull/2388) ([Bobgy](https://github.com/Bobgy))
+- enable the check for dataflow [\#2387](https://github.com/kubeflow/pipelines/pull/2387) ([gaoning777](https://github.com/gaoning777))
+- Update samples/core/tfx-oss to tfx==0.14.0 and kfp=0.1.31 [\#2385](https://github.com/kubeflow/pipelines/pull/2385) ([ucdmkt](https://github.com/ucdmkt))
+- \[Sample\] Add back visualization in XGBoost sample [\#2384](https://github.com/kubeflow/pipelines/pull/2384) ([numerology](https://github.com/numerology))
+- move favicon path, root is override by inverse proxy [\#2382](https://github.com/kubeflow/pipelines/pull/2382) ([rmgogogo](https://github.com/rmgogogo))
+- \[Frontend\] Script and documentation to start a frontend dev env that works with all API endpoints [\#2381](https://github.com/kubeflow/pipelines/pull/2381) ([Bobgy](https://github.com/Bobgy))
+- add animesh to the approvers of the kfserving [\#2380](https://github.com/kubeflow/pipelines/pull/2380) ([gaoning777](https://github.com/gaoning777))
+- SDK - Added version [\#2374](https://github.com/kubeflow/pipelines/pull/2374) ([Ark-kun](https://github.com/Ark-kun))
+- tech writer edits [\#2373](https://github.com/kubeflow/pipelines/pull/2373) ([jay-saldanha](https://github.com/jay-saldanha))
+- \[Samples\] Add numerology as samples/OWNERS [\#2371](https://github.com/kubeflow/pipelines/pull/2371) ([numerology](https://github.com/numerology))
+- \[Frontend\] Fix cannot copy logs in LogViewer when scrolling [\#2370](https://github.com/kubeflow/pipelines/pull/2370) ([Bobgy](https://github.com/Bobgy))
+- KFServing move to v1alpha2 [\#2369](https://github.com/kubeflow/pipelines/pull/2369) ([animeshsingh](https://github.com/animeshsingh))
+- Components - Updated the gcp dataproc create\_cluster component image [\#2366](https://github.com/kubeflow/pipelines/pull/2366) ([Ark-kun](https://github.com/Ark-kun))
+- \[Doc\] Fix some description of preload samples [\#2361](https://github.com/kubeflow/pipelines/pull/2361) ([numerology](https://github.com/numerology))
+- \[Sample\] Improve tfx oss sample [\#2360](https://github.com/kubeflow/pipelines/pull/2360) ([numerology](https://github.com/numerology))
+- add cloud-platform scope in the test to reclaim the ai platform sample models [\#2355](https://github.com/kubeflow/pipelines/pull/2355) ([gaoning777](https://github.com/gaoning777))
+- Fix potential issue of the ai platform sample when running it in the ai platform notebook [\#2349](https://github.com/kubeflow/pipelines/pull/2349) ([gaoning777](https://github.com/gaoning777))
+- Typo in Data passing in python components.ipynb [\#2347](https://github.com/kubeflow/pipelines/pull/2347) ([pingsutw](https://github.com/pingsutw))
+- \[Test\] Add unittest against TFX [\#2346](https://github.com/kubeflow/pipelines/pull/2346) ([numerology](https://github.com/numerology))
+- SDK - Python components - Fixed handling multiline decorators [\#2345](https://github.com/kubeflow/pipelines/pull/2345) ([Ark-kun](https://github.com/Ark-kun))
+- \[License\] Fix third-party license [\#2344](https://github.com/kubeflow/pipelines/pull/2344) ([numerology](https://github.com/numerology))
+- Fix pipeline cannot run bug when using marketplace managed storage [\#2341](https://github.com/kubeflow/pipelines/pull/2341) ([Bobgy](https://github.com/Bobgy))
+- enlarge MKP cluster constraint [\#2339](https://github.com/kubeflow/pipelines/pull/2339) ([rmgogogo](https://github.com/rmgogogo))
+- Add pipeline version api methods [\#2338](https://github.com/kubeflow/pipelines/pull/2338) ([jingzhang36](https://github.com/jingzhang36))
+- tech writer edits [\#2331](https://github.com/kubeflow/pipelines/pull/2331) ([jay-saldanha](https://github.com/jay-saldanha))
+- Add sample test for multiple output [\#2328](https://github.com/kubeflow/pipelines/pull/2328) ([gaoning777](https://github.com/gaoning777))
+- add ai\_platform test [\#2327](https://github.com/kubeflow/pipelines/pull/2327) ([gaoning777](https://github.com/gaoning777))
+- Tests - When testing SDK install it using pip [\#2325](https://github.com/kubeflow/pipelines/pull/2325) ([Ark-kun](https://github.com/Ark-kun))
+- tech writer edits [\#2324](https://github.com/kubeflow/pipelines/pull/2324) ([jay-saldanha](https://github.com/jay-saldanha))
+- SDK - Compiler - Added the component spec annotations to the compiled workflow [\#2323](https://github.com/kubeflow/pipelines/pull/2323) ([Ark-kun](https://github.com/Ark-kun))
+- \[SDK/Compiler\] Add \_create\_and\_write\_workflow method [\#2321](https://github.com/kubeflow/pipelines/pull/2321) ([numerology](https://github.com/numerology))
+- \[Sample\] Add new TFX::OSS sample [\#2319](https://github.com/kubeflow/pipelines/pull/2319) ([numerology](https://github.com/numerology))
+- SDK - Containers - Made python package installation more robust [\#2316](https://github.com/kubeflow/pipelines/pull/2316) ([Ark-kun](https://github.com/Ark-kun))
+- Extend KFServing component with autoscaling and server mode [\#2315](https://github.com/kubeflow/pipelines/pull/2315) ([Tomcli](https://github.com/Tomcli))
+- SDK - Tests - Test creating component from the real AutoML pipeline [\#2314](https://github.com/kubeflow/pipelines/pull/2314) ([Ark-kun](https://github.com/Ark-kun))
+- tech writer edits [\#2313](https://github.com/kubeflow/pipelines/pull/2313) ([jay-saldanha](https://github.com/jay-saldanha))
+- \[MKP/doc\] Update doc for changes of service account credential. [\#2309](https://github.com/kubeflow/pipelines/pull/2309) ([numerology](https://github.com/numerology))
+- \[MKP\] Remove service account credential from deployment page. [\#2308](https://github.com/kubeflow/pipelines/pull/2308) ([numerology](https://github.com/numerology))
+- SDK/DSL: ContainerOp.add\_pvolume - Fix volume passed in add\_volume [\#2306](https://github.com/kubeflow/pipelines/pull/2306) ([elikatsis](https://github.com/elikatsis))
+- \[Frontend\] Node detail view now can show workflow input/output artifacts [\#2305](https://github.com/kubeflow/pipelines/pull/2305) ([eterna2](https://github.com/eterna2))
+- SDK - Compiler - Fixed deprecation warning when calling compile [\#2303](https://github.com/kubeflow/pipelines/pull/2303) ([Ark-kun](https://github.com/Ark-kun))
+- tech writer edits [\#2301](https://github.com/kubeflow/pipelines/pull/2301) ([jay-saldanha](https://github.com/jay-saldanha))
+- \[Component\] Add VPC Interface Endpoint Support for SageMaker [\#2299](https://github.com/kubeflow/pipelines/pull/2299) ([RedbackThomson](https://github.com/RedbackThomson))
+- SDK - Compiler - Fix bugs in the data passing rewriter [\#2297](https://github.com/kubeflow/pipelines/pull/2297) ([deepio-oc](https://github.com/deepio-oc))
+- Add CMLE deploy comment missing parameters [\#2296](https://github.com/kubeflow/pipelines/pull/2296) ([hongye-sun](https://github.com/hongye-sun))
+- Samples - Simplified pipeline submission code in samples [\#2293](https://github.com/kubeflow/pipelines/pull/2293) ([Ark-kun](https://github.com/Ark-kun))
+- SDK - Client - Added a way to set experiment name using environment variables [\#2292](https://github.com/kubeflow/pipelines/pull/2292) ([Ark-kun](https://github.com/Ark-kun))
+- tech writer edits [\#2291](https://github.com/kubeflow/pipelines/pull/2291) ([jay-saldanha](https://github.com/jay-saldanha))
+- \[MKP\] Fix gcr paths in values.yaml [\#2289](https://github.com/kubeflow/pipelines/pull/2289) ([numerology](https://github.com/numerology))
+- fix for MKP [\#2288](https://github.com/kubeflow/pipelines/pull/2288) ([rmgogogo](https://github.com/rmgogogo))
+- tech writer edits [\#2285](https://github.com/kubeflow/pipelines/pull/2285) ([jay-saldanha](https://github.com/jay-saldanha))
+- Disable cloudsql and update to v0.1.31 [\#2284](https://github.com/kubeflow/pipelines/pull/2284) ([rmgogogo](https://github.com/rmgogogo))
+- tech writer edits [\#2282](https://github.com/kubeflow/pipelines/pull/2282) ([jay-saldanha](https://github.com/jay-saldanha))
+- Remove usage of deprecated ContainerOp methods in use\_gcp\_secret [\#2280](https://github.com/kubeflow/pipelines/pull/2280) ([andrewsmartin](https://github.com/andrewsmartin))
+- Samples - Switched the build\_component sample to the new container API [\#2279](https://github.com/kubeflow/pipelines/pull/2279) ([Ark-kun](https://github.com/Ark-kun))
+- Components - Added the "AutoML Export data to GCS" component [\#2278](https://github.com/kubeflow/pipelines/pull/2278) ([Ark-kun](https://github.com/Ark-kun))
+- SDK - Components - Deprecated build\_python\_component [\#2277](https://github.com/kubeflow/pipelines/pull/2277) ([Ark-kun](https://github.com/Ark-kun))
+- SDK - Containers - Deprecated build\_docker\_image [\#2276](https://github.com/kubeflow/pipelines/pull/2276) ([Ark-kun](https://github.com/Ark-kun))
+- Refactor resource op sample for sample test coverage [\#2274](https://github.com/kubeflow/pipelines/pull/2274) ([numerology](https://github.com/numerology))
+- SDK - Components - Creating graph components from python pipeline function [\#2273](https://github.com/kubeflow/pipelines/pull/2273) ([Ark-kun](https://github.com/Ark-kun))
+- SDK - Components - Verify the object type when serializing primitive arguments [\#2272](https://github.com/kubeflow/pipelines/pull/2272) ([Ark-kun](https://github.com/Ark-kun))
+- SDK - Compiler - Make it possible to create more portable pipelines [\#2271](https://github.com/kubeflow/pipelines/pull/2271) ([Ark-kun](https://github.com/Ark-kun))
+- SDK - Components - Reorganized TaskSpec execution options [\#2270](https://github.com/kubeflow/pipelines/pull/2270) ([Ark-kun](https://github.com/Ark-kun))
+- SDK - Components - Fixed small bugs in graph component resolving [\#2269](https://github.com/kubeflow/pipelines/pull/2269) ([Ark-kun](https://github.com/Ark-kun))
+- SDK - Notebooks - Deprecated the docker magic [\#2266](https://github.com/kubeflow/pipelines/pull/2266) ([Ark-kun](https://github.com/Ark-kun))
+- SDK - Components - component\_ref.name should only be set when component was loaded by name [\#2265](https://github.com/kubeflow/pipelines/pull/2265) ([Ark-kun](https://github.com/Ark-kun))
+- SDK - Components - Added the ComponentSpec.save method [\#2264](https://github.com/kubeflow/pipelines/pull/2264) ([Ark-kun](https://github.com/Ark-kun))
+- Components - Removed trailing whitespace from AutoML components code [\#2263](https://github.com/kubeflow/pipelines/pull/2263) ([Ark-kun](https://github.com/Ark-kun))
+- SDK - Components - Restored attribute order when generating component.yaml files [\#2262](https://github.com/kubeflow/pipelines/pull/2262) ([Ark-kun](https://github.com/Ark-kun))
+- SDK - Lightweight - Convert the names of file inputs and outputs [\#2260](https://github.com/kubeflow/pipelines/pull/2260) ([Ark-kun](https://github.com/Ark-kun))
+- SDK - Compiler - Fixed small bug in data passing rewriter [\#2259](https://github.com/kubeflow/pipelines/pull/2259) ([Ark-kun](https://github.com/Ark-kun))
+- Samples - Added the data passing tutorial [\#2258](https://github.com/kubeflow/pipelines/pull/2258) ([Ark-kun](https://github.com/Ark-kun))
+- SDK - Components - Deprecate the get and set methods for default image in favor of plain variable [\#2257](https://github.com/kubeflow/pipelines/pull/2257) ([Ark-kun](https://github.com/Ark-kun))
+- SDK - Containers - Getting namespace lazily [\#2256](https://github.com/kubeflow/pipelines/pull/2256) ([Ark-kun](https://github.com/Ark-kun))
+- remove default namespace [\#2250](https://github.com/kubeflow/pipelines/pull/2250) ([SinaChavoshi](https://github.com/SinaChavoshi))
+- SDK - Lightweight - Added package installation support to func\_to\_container\_op [\#2245](https://github.com/kubeflow/pipelines/pull/2245) ([Ark-kun](https://github.com/Ark-kun))
+- SDK: fix label check for ContainerOP entities [\#2243](https://github.com/kubeflow/pipelines/pull/2243) ([solovyevt](https://github.com/solovyevt))
+- Update doc for MKP release [\#2242](https://github.com/kubeflow/pipelines/pull/2242) ([rmgogogo](https://github.com/rmgogogo))
+- Update changelog for release 0.1.31. \(And also for 0.1.30 and 0.1.29, whose are not added before\) [\#2232](https://github.com/kubeflow/pipelines/pull/2232) ([jingzhang36](https://github.com/jingzhang36))
+- SDK - Compiler - Move Argo volume specifications to templates [\#2229](https://github.com/kubeflow/pipelines/pull/2229) ([Ark-kun](https://github.com/Ark-kun))
+- Updated README Swagger CodeGen version [\#2228](https://github.com/kubeflow/pipelines/pull/2228) ([RedbackThomson](https://github.com/RedbackThomson))
+- SDK - Components - Fix - Stop serializing string values [\#2227](https://github.com/kubeflow/pipelines/pull/2227) ([Ark-kun](https://github.com/Ark-kun))
+- third\_party/metadata\_envoy: Modify license file [\#2224](https://github.com/kubeflow/pipelines/pull/2224) ([dushyanthsc](https://github.com/dushyanthsc))
+- \[SDK/Client\] Improve the url format check for kfp.Client [\#2222](https://github.com/kubeflow/pipelines/pull/2222) ([numerology](https://github.com/numerology))
+- \[Sample\] update XGBoost sample [\#2220](https://github.com/kubeflow/pipelines/pull/2220) ([numerology](https://github.com/numerology))
+- \[Component\] Add Managed Spot Training Support for SageMaker [\#2219](https://github.com/kubeflow/pipelines/pull/2219) ([RedbackThomson](https://github.com/RedbackThomson))
+- SDK - Containers - Added support for container image cache [\#2216](https://github.com/kubeflow/pipelines/pull/2216) ([Ark-kun](https://github.com/Ark-kun))
+- Add third party license + source code to argo and minio images to comply with their license [\#2201](https://github.com/kubeflow/pipelines/pull/2201) ([Bobgy](https://github.com/Bobgy))
+- SDK - Moved the \_container\_builder from kfp.compiler to kfp.containers [\#2192](https://github.com/kubeflow/pipelines/pull/2192) ([Ark-kun](https://github.com/Ark-kun))
+- Added the backend Go module cache to .gitignote [\#2190](https://github.com/kubeflow/pipelines/pull/2190) ([Ark-kun](https://github.com/Ark-kun))
+- Docs - Added the direct kfp module members to documentation [\#2183](https://github.com/kubeflow/pipelines/pull/2183) ([Ark-kun](https://github.com/Ark-kun))
+- Components - Added AutoML Tables components and tests [\#2174](https://github.com/kubeflow/pipelines/pull/2174) ([Ark-kun](https://github.com/Ark-kun))
+- GUI: should pop-up a correct info when delete more than one pipeline [\#2156](https://github.com/kubeflow/pipelines/pull/2156) ([QxiaoQ](https://github.com/QxiaoQ))
+- \[SDK-compiler\] Refactor Compiler to expose an API to write out yaml spec of pipeline. [\#2146](https://github.com/kubeflow/pipelines/pull/2146) ([numerology](https://github.com/numerology))
+- Add necessary data types to api and database to support pipeline version. [\#1873](https://github.com/kubeflow/pipelines/pull/1873) ([jingzhang36](https://github.com/jingzhang36))
+- SDK - Hiding Argo's workflow.uid placeholder behind DSL [\#1683](https://github.com/kubeflow/pipelines/pull/1683) ([Ark-kun](https://github.com/Ark-kun))
+- Allows uploading a pipeline in new run form [\#1643](https://github.com/kubeflow/pipelines/pull/1643) ([rileyjbauer](https://github.com/rileyjbauer))
+- SDK - Improve errors when ContainerOp.output is unavailable [\#1578](https://github.com/kubeflow/pipelines/pull/1578) ([Ark-kun](https://github.com/Ark-kun))
+- Use Remote Build Execution for Bazel builds. [\#1031](https://github.com/kubeflow/pipelines/pull/1031) ([neuromage](https://github.com/neuromage))
+
+## [0.1.31](https://github.com/kubeflow/pipelines/tree/0.1.31) (2019-09-25)
+[Full Changelog](https://github.com/kubeflow/pipelines/compare/0.1.30...0.1.31)
+
+**Merged pull requests:**
+
+- Fix publishing deploy YAML in .release.cloudbuild.yaml [\#2231](https://github.com/kubeflow/pipelines/pull/2231) ([IronPan](https://github.com/IronPan))
+- SDK - Lightweight - Added support for file outputs [\#2221](https://github.com/kubeflow/pipelines/pull/2221) ([Ark-kun](https://github.com/Ark-kun))
+- Add samples for loops [\#2218](https://github.com/kubeflow/pipelines/pull/2218) ([kevinbache](https://github.com/kevinbache))
+- Fix pipeline lite README [\#2217](https://github.com/kubeflow/pipelines/pull/2217) ([Bobgy](https://github.com/Bobgy))
+- fix release cloud build YAML to generate pipeline-lite YAML [\#2214](https://github.com/kubeflow/pipelines/pull/2214) ([IronPan](https://github.com/IronPan))
+- \[Component\] Fix the image version specification in create cluster component. [\#2213](https://github.com/kubeflow/pipelines/pull/2213) ([numerology](https://github.com/numerology))
+- SDK - Components - Fixed serialization of lists and dicts containing `PipelineParam` items [\#2212](https://github.com/kubeflow/pipelines/pull/2212) ([Ark-kun](https://github.com/Ark-kun))
+- SDK - Lightweight - Added support for file inputs [\#2207](https://github.com/kubeflow/pipelines/pull/2207) ([Ark-kun](https://github.com/Ark-kun))
+- add support for hard and soft constraint in the preemptible nodepools [\#2205](https://github.com/kubeflow/pipelines/pull/2205) ([gaoning777](https://github.com/gaoning777))
+- fix release CB and update readme [\#2204](https://github.com/kubeflow/pipelines/pull/2204) ([IronPan](https://github.com/IronPan))
+- fix envoy dockerfile [\#2200](https://github.com/kubeflow/pipelines/pull/2200) ([IronPan](https://github.com/IronPan))
+- SDK - Python components - Properly serializing outputs [\#2198](https://github.com/kubeflow/pipelines/pull/2198) ([Ark-kun](https://github.com/Ark-kun))
+- update dockerfile for envoy [\#2197](https://github.com/kubeflow/pipelines/pull/2197) ([IronPan](https://github.com/IronPan))
+- SDK - Removed some dead code [\#2194](https://github.com/kubeflow/pipelines/pull/2194) ([Ark-kun](https://github.com/Ark-kun))
+- SDK - Components - Simplified arg-parsing code using argparse.SUPPRESS [\#2193](https://github.com/kubeflow/pipelines/pull/2193) ([Ark-kun](https://github.com/Ark-kun))
+- Update pipeline-lite to v0.1.31 [\#2189](https://github.com/kubeflow/pipelines/pull/2189) ([IronPan](https://github.com/IronPan))
+- Build inverse proxy image as part of the presubmit test [\#2187](https://github.com/kubeflow/pipelines/pull/2187) ([IronPan](https://github.com/IronPan))
+- push pipeline-lite YAML as part of release CB [\#2186](https://github.com/kubeflow/pipelines/pull/2186) ([IronPan](https://github.com/IronPan))
+- Release 57d9f7f1cfd458e945d297957621716062d89a49 [\#2184](https://github.com/kubeflow/pipelines/pull/2184) ([IronPan](https://github.com/IronPan))
+- Docs - Added kfp.containers module [\#2182](https://github.com/kubeflow/pipelines/pull/2182) ([Ark-kun](https://github.com/Ark-kun))
+- Added generated Python SDK documentation to .gitignore [\#2181](https://github.com/kubeflow/pipelines/pull/2181) ([Ark-kun](https://github.com/Ark-kun))
+- metadata: Deployment scripts update [\#2180](https://github.com/kubeflow/pipelines/pull/2180) ([dushyanthsc](https://github.com/dushyanthsc))
+- \[MKP/misc\] Update help info for service account. [\#2179](https://github.com/kubeflow/pipelines/pull/2179) ([numerology](https://github.com/numerology))
+- metadata-envoy: Build script for metadata-envoy docker image [\#2178](https://github.com/kubeflow/pipelines/pull/2178) ([dushyanthsc](https://github.com/dushyanthsc))
+- Testing - Output Argo workflow information when the workflow times out [\#2176](https://github.com/kubeflow/pipelines/pull/2176) ([Ark-kun](https://github.com/Ark-kun))
+- SDK - Compiler - Fix large data passing [\#2173](https://github.com/kubeflow/pipelines/pull/2173) ([Ark-kun](https://github.com/Ark-kun))
+- \[Test/sample\] Fix the model version in kubeflow\_tf\_serving sample to avoid potential conflict. [\#2170](https://github.com/kubeflow/pipelines/pull/2170) ([numerology](https://github.com/numerology))
+- Update owner for thirdparty [\#2168](https://github.com/kubeflow/pipelines/pull/2168) ([IronPan](https://github.com/IronPan))
+- Make gs:// links in metadata UI point to GCS console page [\#2167](https://github.com/kubeflow/pipelines/pull/2167) ([Bobgy](https://github.com/Bobgy))
+- Add Bobgy to test infra owner [\#2166](https://github.com/kubeflow/pipelines/pull/2166) ([IronPan](https://github.com/IronPan))
+- Pretty print json value in execution/artifact detail page [\#2165](https://github.com/kubeflow/pipelines/pull/2165) ([Bobgy](https://github.com/Bobgy))
+- Update backend owner file [\#2164](https://github.com/kubeflow/pipelines/pull/2164) ([IronPan](https://github.com/IronPan))
+- add kubeflow serving sample test [\#2163](https://github.com/kubeflow/pipelines/pull/2163) ([gaoning777](https://github.com/gaoning777))
+- SDK - Components - Fixed the output types for outputs with converted names [\#2162](https://github.com/kubeflow/pipelines/pull/2162) ([Ark-kun](https://github.com/Ark-kun))
+- Remove dataflow components [\#2161](https://github.com/kubeflow/pipelines/pull/2161) ([gaoning777](https://github.com/gaoning777))
+- remove tfx-taxi sample in favor of the tfx oss sample [\#2160](https://github.com/kubeflow/pipelines/pull/2160) ([gaoning777](https://github.com/gaoning777))
+- Fix more typos in KFP marketplace application [\#2158](https://github.com/kubeflow/pipelines/pull/2158) ([IronPan](https://github.com/IronPan))
+- Add URI to artifact details page [\#2157](https://github.com/kubeflow/pipelines/pull/2157) ([Bobgy](https://github.com/Bobgy))
+- Execution detail page shows inputs and outputs [\#2155](https://github.com/kubeflow/pipelines/pull/2155) ([Bobgy](https://github.com/Bobgy))
+- Utils to convert metadata api from callback paradigm to promise paradigm [\#2153](https://github.com/kubeflow/pipelines/pull/2153) ([Bobgy](https://github.com/Bobgy))
+- Fix presubmit test failure -- build job in QUEUED state [\#2152](https://github.com/kubeflow/pipelines/pull/2152) ([Bobgy](https://github.com/Bobgy))
+- Removing the unnecessary trunk when generating the DB name [\#2151](https://github.com/kubeflow/pipelines/pull/2151) ([IronPan](https://github.com/IronPan))
+- add preemptible vm and volume snapshot op sample tests [\#2149](https://github.com/kubeflow/pipelines/pull/2149) ([gaoning777](https://github.com/gaoning777))
+- Tests - Samples - Added the component\_build sample test [\#2147](https://github.com/kubeflow/pipelines/pull/2147) ([Ark-kun](https://github.com/Ark-kun))
+- SDK - Testing - Fix metadata comparison instability [\#2145](https://github.com/kubeflow/pipelines/pull/2145) ([Ark-kun](https://github.com/Ark-kun))
+- SDK - Containers - Raise exception on job failure [\#2144](https://github.com/kubeflow/pipelines/pull/2144) ([Ark-kun](https://github.com/Ark-kun))
+- SDK - Components - Fixed build\_python\_component [\#2143](https://github.com/kubeflow/pipelines/pull/2143) ([Ark-kun](https://github.com/Ark-kun))
+- add artifact location tests [\#2142](https://github.com/kubeflow/pipelines/pull/2142) ([gaoning777](https://github.com/gaoning777))
+- add retry tests [\#2141](https://github.com/kubeflow/pipelines/pull/2141) ([gaoning777](https://github.com/gaoning777))
+- Use latest Tensorflow image for Tensorboard. [\#2140](https://github.com/kubeflow/pipelines/pull/2140) ([neuromage](https://github.com/neuromage))
+- Small fixes to the kfp marketplace [\#2138](https://github.com/kubeflow/pipelines/pull/2138) ([IronPan](https://github.com/IronPan))
+- SDK - Fix pipeline metadata serialization [\#2137](https://github.com/kubeflow/pipelines/pull/2137) ([Ark-kun](https://github.com/Ark-kun))
+- Remove envoy related from root directory [\#2136](https://github.com/kubeflow/pipelines/pull/2136) ([IronPan](https://github.com/IronPan))
+- Fix missing execution name in execution list/detail page. [\#2135](https://github.com/kubeflow/pipelines/pull/2135) ([Bobgy](https://github.com/Bobgy))
+- SDK - Persisting all output values [\#2134](https://github.com/kubeflow/pipelines/pull/2134) ([Ark-kun](https://github.com/Ark-kun))
+- Fix the test name of image pull secrets sample test. [\#2132](https://github.com/kubeflow/pipelines/pull/2132) ([numerology](https://github.com/numerology))
+- Fix permission issue running GCP pipelines [\#2129](https://github.com/kubeflow/pipelines/pull/2129) ([IronPan](https://github.com/IronPan))
+- Add documentation for Marketplace CLI deployment [\#2128](https://github.com/kubeflow/pipelines/pull/2128) ([IronPan](https://github.com/IronPan))
+- Fix execution detail page fetch params. [\#2127](https://github.com/kubeflow/pipelines/pull/2127) ([Bobgy](https://github.com/Bobgy))
+- Workaround error thrown from metadata server when there is no record [\#2126](https://github.com/kubeflow/pipelines/pull/2126) ([Bobgy](https://github.com/Bobgy))
+- Update documentations for Kubeflow Pipelines marketplace [\#2125](https://github.com/kubeflow/pipelines/pull/2125) ([IronPan](https://github.com/IronPan))
+- Update gitignore to ignore all .iml files [\#2123](https://github.com/kubeflow/pipelines/pull/2123) ([IronPan](https://github.com/IronPan))
+- A few fixes for Marketplace manifests [\#2122](https://github.com/kubeflow/pipelines/pull/2122) ([IronPan](https://github.com/IronPan))
+- SDK - Python components - Fixed the base\_image default value [\#2119](https://github.com/kubeflow/pipelines/pull/2119) ([Ark-kun](https://github.com/Ark-kun))
+- SDK - Components - Fixed kfp.components.set\_default\_base\_image [\#2118](https://github.com/kubeflow/pipelines/pull/2118) ([Ark-kun](https://github.com/Ark-kun))
+- SDK - Started to explicitly import submodules into kfp namespace [\#2117](https://github.com/kubeflow/pipelines/pull/2117) ([Ark-kun](https://github.com/Ark-kun))
+- SDK - Containers - Fixed kfp.containers.get\_default\_image\_builder [\#2116](https://github.com/kubeflow/pipelines/pull/2116) ([Ark-kun](https://github.com/Ark-kun))
+- Add execution order sample test [\#2114](https://github.com/kubeflow/pipelines/pull/2114) ([numerology](https://github.com/numerology))
+- Add imagepullsecrets sample into sample test [\#2113](https://github.com/kubeflow/pipelines/pull/2113) ([numerology](https://github.com/numerology))
+- Add a convention to make sure experiment\_name is parameterized in notebook sample. [\#2112](https://github.com/kubeflow/pipelines/pull/2112) ([numerology](https://github.com/numerology))
+- update kustomize\(for 0.1.30 release\) [\#2111](https://github.com/kubeflow/pipelines/pull/2111) ([gaoning777](https://github.com/gaoning777))
+- fix bug: list.pop\(\) is not expecting keyword arg [\#2107](https://github.com/kubeflow/pipelines/pull/2107) ([gaoning777](https://github.com/gaoning777))
+- Add sidecar sample test [\#2106](https://github.com/kubeflow/pipelines/pull/2106) ([gaoning777](https://github.com/gaoning777))
+- Artifact list column creation time [\#2105](https://github.com/kubeflow/pipelines/pull/2105) ([Bobgy](https://github.com/Bobgy))
+- Add script that automatically proxies backend servers for frontend dev server [\#2104](https://github.com/kubeflow/pipelines/pull/2104) ([Bobgy](https://github.com/Bobgy))
+- kfserving pipeline update [\#2102](https://github.com/kubeflow/pipelines/pull/2102) ([animeshsingh](https://github.com/animeshsingh))
+- Release 1449d08aeeeb47731d019ea046d90904d9c77953 [\#2099](https://github.com/kubeflow/pipelines/pull/2099) ([gaoning777](https://github.com/gaoning777))
+- add dataflow sample with the new GCP component [\#2096](https://github.com/kubeflow/pipelines/pull/2096) ([gaoning777](https://github.com/gaoning777))
+- pipeline-lite/metadata: Update readme [\#2093](https://github.com/kubeflow/pipelines/pull/2093) ([dushyanthsc](https://github.com/dushyanthsc))
+- Close the streams after getting gcloud auth token [\#2084](https://github.com/kubeflow/pipelines/pull/2084) ([IronPan](https://github.com/IronPan))
+- add us to OWNERS for SHA [\#2047](https://github.com/kubeflow/pipelines/pull/2047) ([rmgogogo](https://github.com/rmgogogo))
+- WithParams [\#2044](https://github.com/kubeflow/pipelines/pull/2044) ([kevinbache](https://github.com/kevinbache))
+- SDK - Lightweigh - Made wrapper code compatible with python2 [\#2035](https://github.com/kubeflow/pipelines/pull/2035) ([Ark-kun](https://github.com/Ark-kun))
+- SDK - Refactoring - Replaced the \*Meta classes with the \*Spec classes [\#1944](https://github.com/kubeflow/pipelines/pull/1944) ([Ark-kun](https://github.com/Ark-kun))
+- configure db host and port from from config file [\#1940](https://github.com/kubeflow/pipelines/pull/1940) ([xaniasd](https://github.com/xaniasd))
+- SDK - Containers - Do not create GCS bucket unless building the image [\#1938](https://github.com/kubeflow/pipelines/pull/1938) ([Ark-kun](https://github.com/Ark-kun))
+
+## [0.1.30](https://github.com/kubeflow/pipelines/tree/0.1.30) (2019-09-13)
+[Full Changelog](https://github.com/kubeflow/pipelines/compare/0.1.29...0.1.30)
+
+**Merged pull requests:**
+
+- update sdk versions [\#2100](https://github.com/kubeflow/pipelines/pull/2100) ([gaoning777](https://github.com/gaoning777))
+- Fix the logic of passing default values of pipeline parameters. [\#2098](https://github.com/kubeflow/pipelines/pull/2098) ([numerology](https://github.com/numerology))
+- feature parity between notebook sample and normal sample [\#2095](https://github.com/kubeflow/pipelines/pull/2095) ([gaoning777](https://github.com/gaoning777))
+- Remove broken environment variables in namespace install [\#2087](https://github.com/kubeflow/pipelines/pull/2087) ([IronPan](https://github.com/IronPan))
+- gcp-marketplace: Updating metadata deployment to use gRPC server [\#2083](https://github.com/kubeflow/pipelines/pull/2083) ([dushyanthsc](https://github.com/dushyanthsc))
+- Expose an API for appending params/names/descriptions in a programmable way. [\#2082](https://github.com/kubeflow/pipelines/pull/2082) ([numerology](https://github.com/numerology))
+- Expose DB username to marketplace schema.yaml [\#2079](https://github.com/kubeflow/pipelines/pull/2079) ([IronPan](https://github.com/IronPan))
+- Use GCSHelper to upload test result [\#2078](https://github.com/kubeflow/pipelines/pull/2078) ([numerology](https://github.com/numerology))
+- Fix sample test result upload failure. [\#2077](https://github.com/kubeflow/pipelines/pull/2077) ([numerology](https://github.com/numerology))
+- Add cmle tpu sample link [\#2074](https://github.com/kubeflow/pipelines/pull/2074) ([gaoning777](https://github.com/gaoning777))
+- Make wget quieter [\#2069](https://github.com/kubeflow/pipelines/pull/2069) ([Ark-kun](https://github.com/Ark-kun))
+- Store DB password as K8s secret for marketplace deployment [\#2067](https://github.com/kubeflow/pipelines/pull/2067) ([IronPan](https://github.com/IronPan))
+- Chang object store naming for marketplace deployment [\#2066](https://github.com/kubeflow/pipelines/pull/2066) ([IronPan](https://github.com/IronPan))
+- fixes name inconsistency for marketplace manifest and bump kfp version [\#2065](https://github.com/kubeflow/pipelines/pull/2065) ([IronPan](https://github.com/IronPan))
+- Release - Fixed the deprecated component image build [\#2063](https://github.com/kubeflow/pipelines/pull/2063) ([Ark-kun](https://github.com/Ark-kun))
+- Release - Creating the release branch from the specified commit [\#2062](https://github.com/kubeflow/pipelines/pull/2062) ([Ark-kun](https://github.com/Ark-kun))
+- manifests/metadata: Metadata server invocation change [\#2041](https://github.com/kubeflow/pipelines/pull/2041) ([dushyanthsc](https://github.com/dushyanthsc))
+- Improve notebook check automation [\#2040](https://github.com/kubeflow/pipelines/pull/2040) ([numerology](https://github.com/numerology))
+- Add parameter for service account credential [\#2039](https://github.com/kubeflow/pipelines/pull/2039) ([IronPan](https://github.com/IronPan))
+- SDK - Testing - Run some unit-tests in a more correct way [\#2036](https://github.com/kubeflow/pipelines/pull/2036) ([Ark-kun](https://github.com/Ark-kun))
+- Add warning message when there are empty parameters in NewRun.tsx [\#2016](https://github.com/kubeflow/pipelines/pull/2016) ([ajchili](https://github.com/ajchili))
+- Fix bug where source and variables are not accessible to visualization [\#2012](https://github.com/kubeflow/pipelines/pull/2012) ([ajchili](https://github.com/ajchili))
+- SDK - Stop adding empty descriptions and inputs [\#1969](https://github.com/kubeflow/pipelines/pull/1969) ([Ark-kun](https://github.com/Ark-kun))
+
+## [0.1.29](https://github.com/kubeflow/pipelines/tree/0.1.29) (2019-09-07)
+[Full Changelog](https://github.com/kubeflow/pipelines/compare/0.1.27...0.1.29)
+
+**Merged pull requests:**
+
+- Add execution list and details pages [\#2059](https://github.com/kubeflow/pipelines/pull/2059) ([rileyjbauer](https://github.com/rileyjbauer))
+- update sample readme for the new structure [\#2058](https://github.com/kubeflow/pipelines/pull/2058) ([gaoning777](https://github.com/gaoning777))
+- Adds Metadata Artifacts to UI [\#2057](https://github.com/kubeflow/pipelines/pull/2057) ([rileyjbauer](https://github.com/rileyjbauer))
+- SDK - Compiler - Failing when PipelineParam is unresolved [\#2055](https://github.com/kubeflow/pipelines/pull/2055) ([Ark-kun](https://github.com/Ark-kun))
+- Removing ResNet-CMLE sample [\#2053](https://github.com/kubeflow/pipelines/pull/2053) ([SinaChavoshi](https://github.com/SinaChavoshi))
+- Refactor kfp.compiler for better modularity [\#2052](https://github.com/kubeflow/pipelines/pull/2052) ([numerology](https://github.com/numerology))
+- SDK - Compiler - Stopped adding mlpipeline artifacts to every compiled template [\#2046](https://github.com/kubeflow/pipelines/pull/2046) ([Ark-kun](https://github.com/Ark-kun))
+- SDK - Components - Hiding signature attribute from CloudPickle [\#2045](https://github.com/kubeflow/pipelines/pull/2045) ([Ark-kun](https://github.com/Ark-kun))
+- SDK - Compiler - Fixed dsl.Condition when parameter resolves to string with spaces [\#2043](https://github.com/kubeflow/pipelines/pull/2043) ([Ark-kun](https://github.com/Ark-kun))
+- SDK - Compiler - Fixed handling of PipelineParams in artifact arguments [\#2042](https://github.com/kubeflow/pipelines/pull/2042) ([Ark-kun](https://github.com/Ark-kun))
+- SDK - Testing - Make dsl and compiler tests discoverable by unittest [\#2038](https://github.com/kubeflow/pipelines/pull/2038) ([Ark-kun](https://github.com/Ark-kun))
+- Fix Local Development Quickstart sample [\#2037](https://github.com/kubeflow/pipelines/pull/2037) ([numerology](https://github.com/numerology))
+- SDK - Refactoring - Reduced the usage of dsl.Pipeline context [\#2034](https://github.com/kubeflow/pipelines/pull/2034) ([Ark-kun](https://github.com/Ark-kun))
+- move old gcp components to deprecated folder [\#2031](https://github.com/kubeflow/pipelines/pull/2031) ([gaoning777](https://github.com/gaoning777))
+- add more core samples in the sample test [\#2030](https://github.com/kubeflow/pipelines/pull/2030) ([gaoning777](https://github.com/gaoning777))
+- Add guideline for including a sample in sample test [\#2026](https://github.com/kubeflow/pipelines/pull/2026) ([numerology](https://github.com/numerology))
+- SDK: fix setting pipeline-wide artifact\_location for ResourceOp and VolumeOp classes and add description field for create\_experiment\(\) function [\#2025](https://github.com/kubeflow/pipelines/pull/2025) ([solovyevt](https://github.com/solovyevt))
+- update service account for non managed storage case [\#2023](https://github.com/kubeflow/pipelines/pull/2023) ([IronPan](https://github.com/IronPan))
+- update release cb yaml [\#2022](https://github.com/kubeflow/pipelines/pull/2022) ([IronPan](https://github.com/IronPan))
+- Add readme and update application parameter [\#2021](https://github.com/kubeflow/pipelines/pull/2021) ([IronPan](https://github.com/IronPan))
+- Extend E2E tests: Add VolumeOp test [\#2019](https://github.com/kubeflow/pipelines/pull/2019) ([elikatsis](https://github.com/elikatsis))
+- SDK/DSL: Fix bug when using PipelineParam in `pvc` of PipelineVolume [\#2018](https://github.com/kubeflow/pipelines/pull/2018) ([elikatsis](https://github.com/elikatsis))
+- api: fix generate\_api.sh script with bash shebang [\#2017](https://github.com/kubeflow/pipelines/pull/2017) ([yanniszark](https://github.com/yanniszark))
+- SDK - Components - Enable loading graph components [\#2010](https://github.com/kubeflow/pipelines/pull/2010) ([Ark-kun](https://github.com/Ark-kun))
+- Change schema.yaml to v2 [\#2009](https://github.com/kubeflow/pipelines/pull/2009) ([IronPan](https://github.com/IronPan))
+- fix cb for release [\#2008](https://github.com/kubeflow/pipelines/pull/2008) ([IronPan](https://github.com/IronPan))
+- speed up cb for merged pr [\#2007](https://github.com/kubeflow/pipelines/pull/2007) ([IronPan](https://github.com/IronPan))
+- remove the bigquery components to fabvor the new gcp components [\#2006](https://github.com/kubeflow/pipelines/pull/2006) ([gaoning777](https://github.com/gaoning777))
+- update release CB to copy images to marketplace compliant location [\#2005](https://github.com/kubeflow/pipelines/pull/2005) ([IronPan](https://github.com/IronPan))
+- Changed isVisualizationServiceAlive implementation to not block API server startup [\#2004](https://github.com/kubeflow/pipelines/pull/2004) ([ajchili](https://github.com/ajchili))
+- Improve visualization server docker image [\#2003](https://github.com/kubeflow/pipelines/pull/2003) ([ajchili](https://github.com/ajchili))
+- Fix ROC Curve visualization argument placeholder [\#2002](https://github.com/kubeflow/pipelines/pull/2002) ([ajchili](https://github.com/ajchili))
+- Add schema validation for sample test config yaml [\#2000](https://github.com/kubeflow/pipelines/pull/2000) ([numerology](https://github.com/numerology))
+- pass pipeline runner service account to api server [\#1996](https://github.com/kubeflow/pipelines/pull/1996) ([IronPan](https://github.com/IronPan))
+- SDK - Components - Added type to TaskOutputReference [\#1995](https://github.com/kubeflow/pipelines/pull/1995) ([Ark-kun](https://github.com/Ark-kun))
+- add connection for inverse proxy [\#1993](https://github.com/kubeflow/pipelines/pull/1993) ([IronPan](https://github.com/IronPan))
+- SDK - Components - Added output references to TaskSpec [\#1991](https://github.com/kubeflow/pipelines/pull/1991) ([Ark-kun](https://github.com/Ark-kun))
+- Testing - Fixed the comparison bug in presubmit test script [\#1990](https://github.com/kubeflow/pipelines/pull/1990) ([Ark-kun](https://github.com/Ark-kun))
+- Print cloud build logs when the task fails [\#1989](https://github.com/kubeflow/pipelines/pull/1989) ([Bobgy](https://github.com/Bobgy))
+- move pipeline runner service account to backend [\#1988](https://github.com/kubeflow/pipelines/pull/1988) ([IronPan](https://github.com/IronPan))
+- Change sample test to read test arguments from yaml config file [\#1987](https://github.com/kubeflow/pipelines/pull/1987) ([numerology](https://github.com/numerology))
+- Disable TFMA visualization [\#1986](https://github.com/kubeflow/pipelines/pull/1986) ([ajchili](https://github.com/ajchili))
+- Refactor tedious redefinition using withItem. [\#1985](https://github.com/kubeflow/pipelines/pull/1985) ([numerology](https://github.com/numerology))
+- Remove service account from helm chart [\#1983](https://github.com/kubeflow/pipelines/pull/1983) ([IronPan](https://github.com/IronPan))
+- SDK - Tests - Fixed bug in the Artifact location test pipeline [\#1982](https://github.com/kubeflow/pipelines/pull/1982) ([Ark-kun](https://github.com/Ark-kun))
+- Add e2e visualization tests [\#1981](https://github.com/kubeflow/pipelines/pull/1981) ([ajchili](https://github.com/ajchili))
+- update kubeflow pipelines application name [\#1980](https://github.com/kubeflow/pipelines/pull/1980) ([IronPan](https://github.com/IronPan))
+- Add generated client from visualization swagger [\#1979](https://github.com/kubeflow/pipelines/pull/1979) ([ajchili](https://github.com/ajchili))
+- Add CRD admin permission to deployer SA [\#1977](https://github.com/kubeflow/pipelines/pull/1977) ([IronPan](https://github.com/IronPan))
+- Remove stdout/stderr from predefined visualization [\#1976](https://github.com/kubeflow/pipelines/pull/1976) ([ajchili](https://github.com/ajchili))
+- Update GCP marketplace deployer dockerfile [\#1975](https://github.com/kubeflow/pipelines/pull/1975) ([IronPan](https://github.com/IronPan))
+- Fix JavaScript and CSS loading issue with table visualization [\#1974](https://github.com/kubeflow/pipelines/pull/1974) ([ajchili](https://github.com/ajchili))
+- manifest/metadata: Switching to use metadata grpc server [\#1973](https://github.com/kubeflow/pipelines/pull/1973) ([dushyanthsc](https://github.com/dushyanthsc))
+- Fix python syntax of TFMA visualization [\#1972](https://github.com/kubeflow/pipelines/pull/1972) ([ajchili](https://github.com/ajchili))
+- SDK - Containers - Build python container image based on current working directory [\#1970](https://github.com/kubeflow/pipelines/pull/1970) ([Ark-kun](https://github.com/Ark-kun))
+- SDK - Components - Only yaml component files can be used as source [\#1966](https://github.com/kubeflow/pipelines/pull/1966) ([Ark-kun](https://github.com/Ark-kun))
+- SDK - Verifying that the serializer returns string [\#1965](https://github.com/kubeflow/pipelines/pull/1965) ([Ark-kun](https://github.com/Ark-kun))
+- test/project-cleanup - fix [\#1964](https://github.com/kubeflow/pipelines/pull/1964) ([dushyanthsc](https://github.com/dushyanthsc))
+- Refactor run\_sample\_test.py script into python class [\#1963](https://github.com/kubeflow/pipelines/pull/1963) ([numerology](https://github.com/numerology))
+- Add travis CI for new unit test. [\#1960](https://github.com/kubeflow/pipelines/pull/1960) ([numerology](https://github.com/numerology))
+- Add frontend support for Python based visualizations [\#1959](https://github.com/kubeflow/pipelines/pull/1959) ([ajchili](https://github.com/ajchili))
+- Test infra - Fixed the batch Prow tests [\#1958](https://github.com/kubeflow/pipelines/pull/1958) ([Ark-kun](https://github.com/Ark-kun))
+- 1953: added missed target\_image parameter to build\_docker\_image method [\#1955](https://github.com/kubeflow/pipelines/pull/1955) ([pahask8](https://github.com/pahask8))
+- minor fix for tfx oss readme [\#1954](https://github.com/kubeflow/pipelines/pull/1954) ([gaoning777](https://github.com/gaoning777))
+- Bug fix and add unit test for sample-test/utils.file\_injection [\#1952](https://github.com/kubeflow/pipelines/pull/1952) ([numerology](https://github.com/numerology))
+- Fix support for custom visualizations [\#1951](https://github.com/kubeflow/pipelines/pull/1951) ([ajchili](https://github.com/ajchili))
+- Fix the missing argument of re.sub [\#1948](https://github.com/kubeflow/pipelines/pull/1948) ([numerology](https://github.com/numerology))
+- Refactor check\_notebook\_results.py into python module. [\#1947](https://github.com/kubeflow/pipelines/pull/1947) ([numerology](https://github.com/numerology))
+- SDK - Components - Add support for the Base64Pickle type [\#1946](https://github.com/kubeflow/pipelines/pull/1946) ([Ark-kun](https://github.com/Ark-kun))
+- SDK - Components - Add support for the List, Dict and Json types [\#1945](https://github.com/kubeflow/pipelines/pull/1945) ([Ark-kun](https://github.com/Ark-kun))
+- SDK - Compiler - Deprecated dsl-compile --package [\#1941](https://github.com/kubeflow/pipelines/pull/1941) ([Ark-kun](https://github.com/Ark-kun))
+- Move postsubmit tests to lite deployment [\#1939](https://github.com/kubeflow/pipelines/pull/1939) ([Bobgy](https://github.com/Bobgy))
+- SDK - Components - Setting default base image or image factory [\#1937](https://github.com/kubeflow/pipelines/pull/1937) ([Ark-kun](https://github.com/Ark-kun))
+- SDK - Components - Add support for the Boolean type [\#1936](https://github.com/kubeflow/pipelines/pull/1936) ([Ark-kun](https://github.com/Ark-kun))
+- Update changelog for 0.1.27 [\#1935](https://github.com/kubeflow/pipelines/pull/1935) ([hongye-sun](https://github.com/hongye-sun))
+- SDK - Components - Improved serialization and deserialization of arguments and defaults [\#1934](https://github.com/kubeflow/pipelines/pull/1934) ([Ark-kun](https://github.com/Ark-kun))
+- sync namespaced install file [\#1932](https://github.com/kubeflow/pipelines/pull/1932) ([IronPan](https://github.com/IronPan))
+- Add endpoint to allow custom visualizations [\#1931](https://github.com/kubeflow/pipelines/pull/1931) ([ajchili](https://github.com/ajchili))
+- SDK - Refactoring - Replaced the TypeMeta class [\#1930](https://github.com/kubeflow/pipelines/pull/1930) ([Ark-kun](https://github.com/Ark-kun))
+- Add custom visualization support for Python based visualizations [\#1929](https://github.com/kubeflow/pipelines/pull/1929) ([ajchili](https://github.com/ajchili))
+- clean up owner file [\#1928](https://github.com/kubeflow/pipelines/pull/1928) ([IronPan](https://github.com/IronPan))
+- Add pipeline id to pipeline summary card [\#1927](https://github.com/kubeflow/pipelines/pull/1927) ([ajchili](https://github.com/ajchili))
+- Exclude visualization types from flake8 testing [\#1925](https://github.com/kubeflow/pipelines/pull/1925) ([ajchili](https://github.com/ajchili))
+- Use cloud build to build images instead [\#1923](https://github.com/kubeflow/pipelines/pull/1923) ([Bobgy](https://github.com/Bobgy))
+- upgrade backend image versions [\#1918](https://github.com/kubeflow/pipelines/pull/1918) ([hongye-sun](https://github.com/hongye-sun))
+- reduce taxi and xgboost test data to speed up sample test [\#1917](https://github.com/kubeflow/pipelines/pull/1917) ([gaoning777](https://github.com/gaoning777))
+- versioned the 0.13.0 tfx example [\#1912](https://github.com/kubeflow/pipelines/pull/1912) ([gaoning777](https://github.com/gaoning777))
+- Sample test improvement - using python fire to launch sample test [\#1897](https://github.com/kubeflow/pipelines/pull/1897) ([numerology](https://github.com/numerology))
+- SDK - Switching python container components to Lightweight components code generator [\#1889](https://github.com/kubeflow/pipelines/pull/1889) ([Ark-kun](https://github.com/Ark-kun))
+- Support Affinity for ContainerOps [\#1886](https://github.com/kubeflow/pipelines/pull/1886) ([hamedhsn](https://github.com/hamedhsn))
+- WithItems Support [\#1868](https://github.com/kubeflow/pipelines/pull/1868) ([kevinbache](https://github.com/kevinbache))
+- Added README.md for Python based visualizations [\#1853](https://github.com/kubeflow/pipelines/pull/1853) ([ajchili](https://github.com/ajchili))
+- Initial kfserving pipeline component [\#1838](https://github.com/kubeflow/pipelines/pull/1838) ([animeshsingh](https://github.com/animeshsingh))
+- SDK - Components - Added component properties to the task factory function [\#1771](https://github.com/kubeflow/pipelines/pull/1771) ([Ark-kun](https://github.com/Ark-kun))
+- add GCP marketplace application manifest for kubeflow pipelines [\#1621](https://github.com/kubeflow/pipelines/pull/1621) ([IronPan](https://github.com/IronPan))
+- SDK - Added kfp.run\_pipeline\_func\_on\_cluster function \(alias\) [\#1521](https://github.com/kubeflow/pipelines/pull/1521) ([Ark-kun](https://github.com/Ark-kun))
+- SDK - Skip attributes with missing values during PipelineMeta serialization [\#1448](https://github.com/kubeflow/pipelines/pull/1448) ([Ark-kun](https://github.com/Ark-kun))
+- SDK - Added support for raw input artifact argument values to ContainerOp [\#791](https://github.com/kubeflow/pipelines/pull/791) ([Ark-kun](https://github.com/Ark-kun))
+
+## [0.1.27](https://github.com/kubeflow/pipelines/tree/0.1.27) (2019-08-22)
+[Full Changelog](https://github.com/kubeflow/pipelines/compare/0.1.26...0.1.27)
+
+ **Merged pull requests:**
+
+ - update namespaced-install.yaml [\#1926](https://github.com/kubeflow/pipelines/pull/1926) ([IronPan](https://github.com/IronPan))
+- Fix lint related issue [\#1922](https://github.com/kubeflow/pipelines/pull/1922) ([numerology](https://github.com/numerology))
+- Cleanup pipeline-lite deployment [\#1921](https://github.com/kubeflow/pipelines/pull/1921) ([IronPan](https://github.com/IronPan))
+- Allow visualization kernel timeout to be specifiable via environment variables [\#1920](https://github.com/kubeflow/pipelines/pull/1920) ([ajchili](https://github.com/ajchili))
+- Release 151c5349f13bea9d626c988563c04c0a86210c21 [\#1916](https://github.com/kubeflow/pipelines/pull/1916) ([hongye-sun](https://github.com/hongye-sun))
+- cleanup test directory [\#1914](https://github.com/kubeflow/pipelines/pull/1914) ([IronPan](https://github.com/IronPan))
+- SDK - Airflow - Fixed bug in AirFlow op creation [\#1911](https://github.com/kubeflow/pipelines/pull/1911) ([Ark-kun](https://github.com/Ark-kun))
+- Add cloud sql and gcs connection for pipeline-lite deployment [\#1910](https://github.com/kubeflow/pipelines/pull/1910) ([IronPan](https://github.com/IronPan))
+- Enable error propagation from nbconvert to frontend [\#1909](https://github.com/kubeflow/pipelines/pull/1909) ([ajchili](https://github.com/ajchili))
+- remove tfx notebook sample in favor of the TFX OSS sample [\#1908](https://github.com/kubeflow/pipelines/pull/1908) ([gaoning777](https://github.com/gaoning777))
+- \[front-end-server\] Allow viewer:tensorboard podTemplateSpec to be customizable [\#1906](https://github.com/kubeflow/pipelines/pull/1906) ([eterna2](https://github.com/eterna2))
+- Change the type of resource reference payload column [\#1905](https://github.com/kubeflow/pipelines/pull/1905) ([IronPan](https://github.com/IronPan))
+- apiserver: Remove TFX output artifact recording to metadatastore [\#1904](https://github.com/kubeflow/pipelines/pull/1904) ([dushyanthsc](https://github.com/dushyanthsc))
+- remove kubeflow training to favor tfx components [\#1902](https://github.com/kubeflow/pipelines/pull/1902) ([gaoning777](https://github.com/gaoning777))
+- Add TFDV, TFMA, and Table visualization support for Python based visualizations [\#1898](https://github.com/kubeflow/pipelines/pull/1898) ([ajchili](https://github.com/ajchili))
+- Add run with json data as input within fixed-data.ts for UI testing and development [\#1895](https://github.com/kubeflow/pipelines/pull/1895) ([ajchili](https://github.com/ajchili))
+- Use single part as default [\#1893](https://github.com/kubeflow/pipelines/pull/1893) ([IronPan](https://github.com/IronPan))
+- fix unit tests and address some comments [\#1892](https://github.com/kubeflow/pipelines/pull/1892) ([gaoning777](https://github.com/gaoning777))
+- Replace codemirror editor react component with react-ace editor component [\#1890](https://github.com/kubeflow/pipelines/pull/1890) ([ajchili](https://github.com/ajchili))
+- Simplified the build\_docker\_image function [\#1887](https://github.com/kubeflow/pipelines/pull/1887) ([Ark-kun](https://github.com/Ark-kun))
+- IBM Watson samples: from six.moves import xrange [\#1877](https://github.com/kubeflow/pipelines/pull/1877) ([cclauss](https://github.com/cclauss))
+- Undefined name 'e' in openvino [\#1876](https://github.com/kubeflow/pipelines/pull/1876) ([cclauss](https://github.com/cclauss))
+- SDK - Lightweight - Fixed custom types in multi-output case [\#1875](https://github.com/kubeflow/pipelines/pull/1875) ([Ark-kun](https://github.com/Ark-kun))
+- SDK - Components - Fixed ModelBase comparison bug [\#1874](https://github.com/kubeflow/pipelines/pull/1874) ([Ark-kun](https://github.com/Ark-kun))
+- Update changelog for 0.1.26 [\#1872](https://github.com/kubeflow/pipelines/pull/1872) ([neuromage](https://github.com/neuromage))
+- Remove copying of tfx data for cloudbuild release steps. [\#1871](https://github.com/kubeflow/pipelines/pull/1871) ([neuromage](https://github.com/neuromage))
+- Update manifests to point to 0.26 release. [\#1870](https://github.com/kubeflow/pipelines/pull/1870) ([neuromage](https://github.com/neuromage))
+- add compile step in the samples to generate zip files [\#1866](https://github.com/kubeflow/pipelines/pull/1866) ([gaoning777](https://github.com/gaoning777))
+- Update Python SDK versions for release. [\#1860](https://github.com/kubeflow/pipelines/pull/1860) ([neuromage](https://github.com/neuromage))
+- test/project-cleanup: Support to cleanup gke-clusters in test project [\#1857](https://github.com/kubeflow/pipelines/pull/1857) ([dushyanthsc](https://github.com/dushyanthsc))
+- Created extensible code editor based on react-ace [\#1855](https://github.com/kubeflow/pipelines/pull/1855) ([ajchili](https://github.com/ajchili))
+- Add visualization-server service to lightweight deployment [\#1844](https://github.com/kubeflow/pipelines/pull/1844) ([ajchili](https://github.com/ajchili))
+- SDK - Tests - Improved the "ContainerOp.set\_retry" test [\#1843](https://github.com/kubeflow/pipelines/pull/1843) ([Ark-kun](https://github.com/Ark-kun))
+- pipeline-lite: Introduce metadata component to pipeline-lite [\#1840](https://github.com/kubeflow/pipelines/pull/1840) ([dushyanthsc](https://github.com/dushyanthsc))
+- \[Bug Fix\] Delete ResourceOp should not have output parameters [\#1822](https://github.com/kubeflow/pipelines/pull/1822) ([eterna2](https://github.com/eterna2))
+- Use KFP lite deployment for presubmit tests [\#1808](https://github.com/kubeflow/pipelines/pull/1808) ([Bobgy](https://github.com/Bobgy))
+- SDK - Fixed string comparisons [\#1756](https://github.com/kubeflow/pipelines/pull/1756) ([Ark-kun](https://github.com/Ark-kun))
+- 'core' folder included to parameters related On-Premise cluster [\#1751](https://github.com/kubeflow/pipelines/pull/1751) ([olegchorny](https://github.com/olegchorny))
+- Refactor presubmit-tests-with-pipeline-deployment.sh to run in other projects [\#1732](https://github.com/kubeflow/pipelines/pull/1732) ([Bobgy](https://github.com/Bobgy))
+- Lint Python code for undefined names [\#1721](https://github.com/kubeflow/pipelines/pull/1721) ([cclauss](https://github.com/cclauss))
+- Let backend apiserver mysql dbname configurable [\#1714](https://github.com/kubeflow/pipelines/pull/1714) ([jwwandy](https://github.com/jwwandy))
+- gcp cred bug fix for multiple credentials in single pipeline [\#1384](https://github.com/kubeflow/pipelines/pull/1384) ([aakashbajaj](https://github.com/aakashbajaj))
+- Collecting coverage when running python tests [\#898](https://github.com/kubeflow/pipelines/pull/898) ([Ark-kun](https://github.com/Ark-kun))
+
+## [0.1.26](https://github.com/kubeflow/pipelines/tree/0.1.26) (2019-08-16)
+[Full Changelog](https://github.com/kubeflow/pipelines/compare/0.1.25...0.1.26)
+
+**Merged pull requests:**
+
+- update gcloud ml-engine to ai-platform [\#1863](https://github.com/kubeflow/pipelines/pull/1863) ([gaoning777](https://github.com/gaoning777))
+- Release 0517114dc2b365a4a6d95424af6157ead774eff3 [\#1859](https://github.com/kubeflow/pipelines/pull/1859) ([neuromage](https://github.com/neuromage))
+- Reduce getPipeline calls in RunList [\#1852](https://github.com/kubeflow/pipelines/pull/1852) ([rileyjbauer](https://github.com/rileyjbauer))
+- Add back coveralls. [\#1849](https://github.com/kubeflow/pipelines/pull/1849) ([numerology](https://github.com/numerology))
+- Propagate pipeline name in pipeline spec [\#1842](https://github.com/kubeflow/pipelines/pull/1842) ([IronPan](https://github.com/IronPan))
+- Create composite indexes \[ResourceType, ReferenceUUID, ReferenceType\] [\#1836](https://github.com/kubeflow/pipelines/pull/1836) ([IronPan](https://github.com/IronPan))
+- Improve sql efficiency for getting the run [\#1835](https://github.com/kubeflow/pipelines/pull/1835) ([IronPan](https://github.com/IronPan))
+- Adding a sample for serving component [\#1830](https://github.com/kubeflow/pipelines/pull/1830) ([SinaChavoshi](https://github.com/SinaChavoshi))
+- Update for sample repo restructuring [\#1828](https://github.com/kubeflow/pipelines/pull/1828) ([zanedurante](https://github.com/zanedurante))
+- Fix run duration bug [\#1827](https://github.com/kubeflow/pipelines/pull/1827) ([rileyjbauer](https://github.com/rileyjbauer))
+- Reduce API usage by utilizing reference name in reference resource API [\#1824](https://github.com/kubeflow/pipelines/pull/1824) ([ajchili](https://github.com/ajchili))
+- Update npm test to not use coverall [\#1819](https://github.com/kubeflow/pipelines/pull/1819) ([IronPan](https://github.com/IronPan))
+- Add subprocess pip install example in lightweight component example notebook [\#1817](https://github.com/kubeflow/pipelines/pull/1817) ([Bobgy](https://github.com/Bobgy))
+- Build - Fix CloudBuild bug [\#1816](https://github.com/kubeflow/pipelines/pull/1816) ([Ark-kun](https://github.com/Ark-kun))
+- Refactors toolbar buttons to use a map rather than an array [\#1812](https://github.com/kubeflow/pipelines/pull/1812) ([rileyjbauer](https://github.com/rileyjbauer))
+- Disable flaky tests temporarily [\#1809](https://github.com/kubeflow/pipelines/pull/1809) ([Bobgy](https://github.com/Bobgy))
+- Fix test loophole for loading samples during KFP startup [\#1807](https://github.com/kubeflow/pipelines/pull/1807) ([IronPan](https://github.com/IronPan))
+- Container builder default gcr [\#1806](https://github.com/kubeflow/pipelines/pull/1806) ([gaoning777](https://github.com/gaoning777))
+- Fix the broken sample path in API [\#1805](https://github.com/kubeflow/pipelines/pull/1805) ([IronPan](https://github.com/IronPan))
+- Garbage collect the completed workflow after persisted to database [\#1802](https://github.com/kubeflow/pipelines/pull/1802) ([IronPan](https://github.com/IronPan))
+- Fix github security alert. [\#1798](https://github.com/kubeflow/pipelines/pull/1798) ([hongye-sun](https://github.com/hongye-sun))
+- ContainerBuilder loading kube config [\#1795](https://github.com/kubeflow/pipelines/pull/1795) ([gaoning777](https://github.com/gaoning777))
+- Move TF installation to notebooks [\#1793](https://github.com/kubeflow/pipelines/pull/1793) ([numerology](https://github.com/numerology))
+- Move argo installation to dockerfile from bash script. [\#1792](https://github.com/kubeflow/pipelines/pull/1792) ([numerology](https://github.com/numerology))
+- fix sample reference link [\#1789](https://github.com/kubeflow/pipelines/pull/1789) ([gaoning777](https://github.com/gaoning777))
+- skip storing log to files [\#1788](https://github.com/kubeflow/pipelines/pull/1788) ([IronPan](https://github.com/IronPan))
+- Remove yebrahim from approvers/reviewers [\#1787](https://github.com/kubeflow/pipelines/pull/1787) ([yebrahim](https://github.com/yebrahim))
+- update owner files in samples and test [\#1785](https://github.com/kubeflow/pipelines/pull/1785) ([gaoning777](https://github.com/gaoning777))
+- Fixed Dockerfile used for buildVisualizationServer in Cloud Build [\#1783](https://github.com/kubeflow/pipelines/pull/1783) ([ajchili](https://github.com/ajchili))
+- Add retry button in Pipeline UI [\#1782](https://github.com/kubeflow/pipelines/pull/1782) ([IronPan](https://github.com/IronPan))
+- add reference name to resource reference API proto [\#1781](https://github.com/kubeflow/pipelines/pull/1781) ([IronPan](https://github.com/IronPan))
+- Update images, bug fixes, clean up code [\#1778](https://github.com/kubeflow/pipelines/pull/1778) ([carolynwang](https://github.com/carolynwang))
+- Container builder [\#1774](https://github.com/kubeflow/pipelines/pull/1774) ([gaoning777](https://github.com/gaoning777))
+- fix api server sort test [\#1769](https://github.com/kubeflow/pipelines/pull/1769) ([IronPan](https://github.com/IronPan))
+- SDK - Containers - Returning image name with digest [\#1768](https://github.com/kubeflow/pipelines/pull/1768) ([Ark-kun](https://github.com/Ark-kun))
+- Move imagepullsecrets sample to samples/core [\#1767](https://github.com/kubeflow/pipelines/pull/1767) ([numerology](https://github.com/numerology))
+- Not return error if run update doesn't change DB entry [\#1765](https://github.com/kubeflow/pipelines/pull/1765) ([IronPan](https://github.com/IronPan))
+- remove copying the samples since we are not releasing the samples in the GCS [\#1764](https://github.com/kubeflow/pipelines/pull/1764) ([gaoning777](https://github.com/gaoning777))
+- Backend - Docker build should fail on sample compilation failures [\#1760](https://github.com/kubeflow/pipelines/pull/1760) ([Ark-kun](https://github.com/Ark-kun))
+- Move samples to the correct location [\#1759](https://github.com/kubeflow/pipelines/pull/1759) ([gaoning777](https://github.com/gaoning777))
+- Change how Variables are Provided to Visualizations [\#1754](https://github.com/kubeflow/pipelines/pull/1754) ([ajchili](https://github.com/ajchili))
+- Add preemtptible gpu sample [\#1749](https://github.com/kubeflow/pipelines/pull/1749) ([numerology](https://github.com/numerology))
+- Revert "Backend - Updated the version of the ml metadata package" [\#1747](https://github.com/kubeflow/pipelines/pull/1747) ([Ark-kun](https://github.com/Ark-kun))
+- Revert "Backend - Starting the api-server container build from scratch" [\#1742](https://github.com/kubeflow/pipelines/pull/1742) ([Ark-kun](https://github.com/Ark-kun))
+- Refactor aws samples to match new folder structure [\#1741](https://github.com/kubeflow/pipelines/pull/1741) ([carolynwang](https://github.com/carolynwang))
+- Components - Added the pymongo license URL [\#1740](https://github.com/kubeflow/pipelines/pull/1740) ([Ark-kun](https://github.com/Ark-kun))
+- Add Visualization Server to Cloud Build yaml Files [\#1738](https://github.com/kubeflow/pipelines/pull/1738) ([ajchili](https://github.com/ajchili))
+- Update Watson Machine Learning auth with IAM [\#1737](https://github.com/kubeflow/pipelines/pull/1737) ([Tomcli](https://github.com/Tomcli))
+- Delete KFP component before reinstalling again [\#1736](https://github.com/kubeflow/pipelines/pull/1736) ([IronPan](https://github.com/IronPan))
+- Moving samples to match the new folder structure [\#1734](https://github.com/kubeflow/pipelines/pull/1734) ([SinaChavoshi](https://github.com/SinaChavoshi))
+- fix cloudbuild failure [\#1733](https://github.com/kubeflow/pipelines/pull/1733) ([gaoning777](https://github.com/gaoning777))
+- Refactor sample tests configuration to reduce the efforts of adding samples. [\#1730](https://github.com/kubeflow/pipelines/pull/1730) ([numerology](https://github.com/numerology))
+- SDK - Lightweight - Fixed regression for components without outputs [\#1726](https://github.com/kubeflow/pipelines/pull/1726) ([Ark-kun](https://github.com/Ark-kun))
+- Backend - Updated the version of the ml metadata package [\#1725](https://github.com/kubeflow/pipelines/pull/1725) ([Ark-kun](https://github.com/Ark-kun))
+- Add API to rerun the pipeline [\#1720](https://github.com/kubeflow/pipelines/pull/1720) ([IronPan](https://github.com/IronPan))
+- Remove outdated argo install instruction [\#1719](https://github.com/kubeflow/pipelines/pull/1719) ([Bobgy](https://github.com/Bobgy))
+- SDK - ContainerOp.set\_display\_name should return self to enable chaining [\#1718](https://github.com/kubeflow/pipelines/pull/1718) ([Ark-kun](https://github.com/Ark-kun))
+- Rename InputPath -\> Source for Visualization API definition [\#1717](https://github.com/kubeflow/pipelines/pull/1717) ([ajchili](https://github.com/ajchili))
+- Add SageMaker create workteam and Ground Truth components, sample demo pipeline, other minor updates [\#1716](https://github.com/kubeflow/pipelines/pull/1716) ([carolynwang](https://github.com/carolynwang))
+- Support Single part PutFile [\#1713](https://github.com/kubeflow/pipelines/pull/1713) ([nirsagi](https://github.com/nirsagi))
+- Fixes cloning of recurring runs [\#1712](https://github.com/kubeflow/pipelines/pull/1712) ([rileyjbauer](https://github.com/rileyjbauer))
+- Restructure samples [\#1710](https://github.com/kubeflow/pipelines/pull/1710) ([gaoning777](https://github.com/gaoning777))
+- Simplify sample\_test.yaml [\#1709](https://github.com/kubeflow/pipelines/pull/1709) ([numerology](https://github.com/numerology))
+- add jxzheng to the reviewers for samples [\#1705](https://github.com/kubeflow/pipelines/pull/1705) ([gaoning777](https://github.com/gaoning777))
+- Component build fix [\#1703](https://github.com/kubeflow/pipelines/pull/1703) ([gaoning777](https://github.com/gaoning777))
+- Allows creation of jobs without experiments [\#1702](https://github.com/kubeflow/pipelines/pull/1702) ([rileyjbauer](https://github.com/rileyjbauer))
+- Backend - Starting the api-server container build from scratch [\#1699](https://github.com/kubeflow/pipelines/pull/1699) ([Ark-kun](https://github.com/Ark-kun))
+- Moving component\_sdk to components/gcp/ [\#1698](https://github.com/kubeflow/pipelines/pull/1698) ([Ark-kun](https://github.com/Ark-kun))
+- SDK - Lightweight - Added support for complex default values [\#1696](https://github.com/kubeflow/pipelines/pull/1696) ([Ark-kun](https://github.com/Ark-kun))
+- Changelog 0.1.25 [\#1695](https://github.com/kubeflow/pipelines/pull/1695) ([jingzhang36](https://github.com/jingzhang36))
+- Move kustomize manifests a dedicate directory [\#1690](https://github.com/kubeflow/pipelines/pull/1690) ([IronPan](https://github.com/IronPan))
+- Clears the workflow's name in GetWorkflowSpec and uses it for the GenerateName [\#1689](https://github.com/kubeflow/pipelines/pull/1689) ([rileyjbauer](https://github.com/rileyjbauer))
+- API - Updated swagger-codegen-cli version [\#1686](https://github.com/kubeflow/pipelines/pull/1686) ([Ark-kun](https://github.com/Ark-kun))
+- Update SageMaker components and sample pipeline [\#1682](https://github.com/kubeflow/pipelines/pull/1682) ([carolynwang](https://github.com/carolynwang))
+- Basic component build sample [\#1681](https://github.com/kubeflow/pipelines/pull/1681) ([SinaChavoshi](https://github.com/SinaChavoshi))
+- Separate codegen from containerbuild 2 [\#1680](https://github.com/kubeflow/pipelines/pull/1680) ([gaoning777](https://github.com/gaoning777))
+- Separate codegen from containerbuild [\#1679](https://github.com/kubeflow/pipelines/pull/1679) ([gaoning777](https://github.com/gaoning777))
+- Add new PlotType to Allow for Visualization Creation [\#1677](https://github.com/kubeflow/pipelines/pull/1677) ([ajchili](https://github.com/ajchili))
+- Container op mount secret sample [\#1676](https://github.com/kubeflow/pipelines/pull/1676) ([SinaChavoshi](https://github.com/SinaChavoshi))
+- SDK/Lightweight - Updated default image to tensorflow:1.13.2-py3 [\#1671](https://github.com/kubeflow/pipelines/pull/1671) ([Ark-kun](https://github.com/Ark-kun))
+- Adding a sample for explicitly defining the execution order [\#1668](https://github.com/kubeflow/pipelines/pull/1668) ([SinaChavoshi](https://github.com/SinaChavoshi))
+- Adding multiple outputs into sdk with sample [\#1667](https://github.com/kubeflow/pipelines/pull/1667) ([zanedurante](https://github.com/zanedurante))
+- SDK - Removed the build\_image parameter from build\_python\_component function [\#1657](https://github.com/kubeflow/pipelines/pull/1657) ([Ark-kun](https://github.com/Ark-kun))
+- update kaniko executor version to speed up image build [\#1652](https://github.com/kubeflow/pipelines/pull/1652) ([gaoning777](https://github.com/gaoning777))
+- Add code for python visualization service [\#1651](https://github.com/kubeflow/pipelines/pull/1651) ([ajchili](https://github.com/ajchili))
+- SDK/Client - Added the create\_run\_from\_pipeline\_package method [\#1523](https://github.com/kubeflow/pipelines/pull/1523) ([Ark-kun](https://github.com/Ark-kun))
+- SDK - Using Airflow ops in Pipelines [\#1483](https://github.com/kubeflow/pipelines/pull/1483) ([Ark-kun](https://github.com/Ark-kun))
+- SDK - Cleanup - Serialized PipelineParamTuple does not need value or type [\#1469](https://github.com/kubeflow/pipelines/pull/1469) ([Ark-kun](https://github.com/Ark-kun))
+- Reorganize ResourceOp samples [\#1433](https://github.com/kubeflow/pipelines/pull/1433) ([elikatsis](https://github.com/elikatsis))
+- add default value type checking [\#1407](https://github.com/kubeflow/pipelines/pull/1407) ([gaoning777](https://github.com/gaoning777))
+- Seldon examples [\#1405](https://github.com/kubeflow/pipelines/pull/1405) ([ryandawsonuk](https://github.com/ryandawsonuk))
+
+## [0.1.25](https://github.com/kubeflow/pipelines/tree/0.1.25) (2019-07-26)
+[Full Changelog](https://github.com/kubeflow/pipelines/compare/0.1.24...0.1.25)
+
+**Merged pull requests:**
+
+- Increase version in namespaced-install.yaml too [\#1684](https://github.com/kubeflow/pipelines/pull/1684) ([jingzhang36](https://github.com/jingzhang36))
+- Add visualization API service to frontend [\#1675](https://github.com/kubeflow/pipelines/pull/1675) ([ajchili](https://github.com/ajchili))
+- Increase KFP version in \(1\) sdk/python/setup.py \(2\) component\_sdk/python/setup.py \(3\) manifests/base/kustomization.yaml [\#1674](https://github.com/kubeflow/pipelines/pull/1674) ([jingzhang36](https://github.com/jingzhang36))
+- Release fe639f41661d8e17fcda64ff8242127620b80ba0 [\#1672](https://github.com/kubeflow/pipelines/pull/1672) ([jingzhang36](https://github.com/jingzhang36))
+- Adding kfp.cil to kfp setup.py. [\#1666](https://github.com/kubeflow/pipelines/pull/1666) ([hongye-sun](https://github.com/hongye-sun))
+- Fix exit handler sample [\#1665](https://github.com/kubeflow/pipelines/pull/1665) ([hongye-sun](https://github.com/hongye-sun))
+- Add visualization swagger files to frontend [\#1663](https://github.com/kubeflow/pipelines/pull/1663) ([ajchili](https://github.com/ajchili))
+- move gcshelper out of component\_builder [\#1658](https://github.com/kubeflow/pipelines/pull/1658) ([gaoning777](https://github.com/gaoning777))
+- Remove redundant import. [\#1656](https://github.com/kubeflow/pipelines/pull/1656) ([numerology](https://github.com/numerology))
+- Fix broken license link [\#1655](https://github.com/kubeflow/pipelines/pull/1655) ([hongye-sun](https://github.com/hongye-sun))
+- update kaniko executor version to speed up image build [\#1652](https://github.com/kubeflow/pipelines/pull/1652) ([gaoning777](https://github.com/gaoning777))
+- add init container for container op [\#1650](https://github.com/kubeflow/pipelines/pull/1650) ([IronPan](https://github.com/IronPan))
+- update python test to use python3 [\#1649](https://github.com/kubeflow/pipelines/pull/1649) ([IronPan](https://github.com/IronPan))
+- Add visualization server and unit tests for visualization server [\#1647](https://github.com/kubeflow/pipelines/pull/1647) ([ajchili](https://github.com/ajchili))
+- Sets min widths for buttons to avoid text wrapping [\#1637](https://github.com/kubeflow/pipelines/pull/1637) ([rileyjbauer](https://github.com/rileyjbauer))
+- Changed arguments parameter type and built visualization pb and swagger files [\#1636](https://github.com/kubeflow/pipelines/pull/1636) ([ajchili](https://github.com/ajchili))
+- Add recurring run column to run lists [\#1635](https://github.com/kubeflow/pipelines/pull/1635) ([rileyjbauer](https://github.com/rileyjbauer))
+- update persistent agence to only store the argo spec [\#1634](https://github.com/kubeflow/pipelines/pull/1634) ([IronPan](https://github.com/IronPan))
+- Add OWNERS file in aws components and examples [\#1633](https://github.com/kubeflow/pipelines/pull/1633) ([Jeffwan](https://github.com/Jeffwan))
+- Samples - Cleaned up unnecessary usage of PipelineParam [\#1631](https://github.com/kubeflow/pipelines/pull/1631) ([Ark-kun](https://github.com/Ark-kun))
+- Samples - Removed the immediate\_value sample [\#1630](https://github.com/kubeflow/pipelines/pull/1630) ([Ark-kun](https://github.com/Ark-kun))
+- Add SageMaker HPO component and sample usage in a pipeline [\#1628](https://github.com/kubeflow/pipelines/pull/1628) ([carolynwang](https://github.com/carolynwang))
+- propagate database password when init db connection [\#1627](https://github.com/kubeflow/pipelines/pull/1627) ([IronPan](https://github.com/IronPan))
+- SDK - Lightweight - Added support for "None" default values [\#1626](https://github.com/kubeflow/pipelines/pull/1626) ([Ark-kun](https://github.com/Ark-kun))
+- Removed duplicate CircularProgress within UI [\#1625](https://github.com/kubeflow/pipelines/pull/1625) ([ajchili](https://github.com/ajchili))
+- viewer controller is now namespaced so no need for cluster role [\#1623](https://github.com/kubeflow/pipelines/pull/1623) ([jingzhang36](https://github.com/jingzhang36))
+- switch third party images to GCR [\#1622](https://github.com/kubeflow/pipelines/pull/1622) ([IronPan](https://github.com/IronPan))
+- update changelog [\#1618](https://github.com/kubeflow/pipelines/pull/1618) ([gaoning777](https://github.com/gaoning777))
+- Update samples to use the latest versions of the python SDK [\#1607](https://github.com/kubeflow/pipelines/pull/1607) ([gaoning777](https://github.com/gaoning777))
+- Trims whitespace from pipeline params on creation of new run [\#1605](https://github.com/kubeflow/pipelines/pull/1605) ([rileyjbauer](https://github.com/rileyjbauer))
+- SDK/DSL: Fix bug when specifying custom resource for VolumeOp [\#1595](https://github.com/kubeflow/pipelines/pull/1595) ([elikatsis](https://github.com/elikatsis))
+- \[Feature\] Set ttlSecondsAfterFinished in argo workflow with PipelineConf [\#1594](https://github.com/kubeflow/pipelines/pull/1594) ([eterna2](https://github.com/eterna2))
+- Delete go CLI [\#1592](https://github.com/kubeflow/pipelines/pull/1592) ([IronPan](https://github.com/IronPan))
+- Added image captioning sample to samples/notebooks [\#1591](https://github.com/kubeflow/pipelines/pull/1591) ([zanedurante](https://github.com/zanedurante))
+- tox.ini: Add flake8 tests to find Python syntax errors and undefined names [\#1577](https://github.com/kubeflow/pipelines/pull/1577) ([cclauss](https://github.com/cclauss))
+- Add gcp-connector label and remove pod name [\#1542](https://github.com/kubeflow/pipelines/pull/1542) ([hongye-sun](https://github.com/hongye-sun))
+- Remove legacy hardcoded version of TFX [\#1502](https://github.com/kubeflow/pipelines/pull/1502) ([IreneGi](https://github.com/IreneGi))
+- add support for flexible config \(via env var\) for the pipline service and UI, fix broken links \(pointed to API vs UI service\) [\#1293](https://github.com/kubeflow/pipelines/pull/1293) ([yaronha](https://github.com/yaronha))
+
+## [0.1.24](https://github.com/kubeflow/pipelines/tree/0.1.24) (2019-07-12)
+[Full Changelog](https://github.com/kubeflow/pipelines/compare/0.1.23...0.1.24)
+
+**Merged pull requests:**
+
+- update manifest and sdk version [\#1617](https://github.com/kubeflow/pipelines/pull/1617) ([gaoning777](https://github.com/gaoning777))
+- fix dependency bug in the recursion support [\#1616](https://github.com/kubeflow/pipelines/pull/1616) ([gaoning777](https://github.com/gaoning777))
+- Release ac833a084b32324b56ca56e9109e05cde02816a4 [\#1614](https://github.com/kubeflow/pipelines/pull/1614) ([gaoning777](https://github.com/gaoning777))
+- Set user credentials when creating viewer object [\#1603](https://github.com/kubeflow/pipelines/pull/1603) ([jingzhang36](https://github.com/jingzhang36))
+- List pipelines [\#1588](https://github.com/kubeflow/pipelines/pull/1588) ([kevinbache](https://github.com/kevinbache))
+- Update Watson training component to output model location UID [\#1587](https://github.com/kubeflow/pipelines/pull/1587) ([Tomcli](https://github.com/Tomcli))
+- fix recursion bug [\#1583](https://github.com/kubeflow/pipelines/pull/1583) ([gaoning777](https://github.com/gaoning777))
+- Release 2d0d8619507743ec4ff1e213735a8f82d3913281 [\#1581](https://github.com/kubeflow/pipelines/pull/1581) ([IronPan](https://github.com/IronPan))
+- Pass dataset\_location to container [\#1579](https://github.com/kubeflow/pipelines/pull/1579) ([hongye-sun](https://github.com/hongye-sun))
+- Add PV/PVC admin permission for pipeline runner [\#1576](https://github.com/kubeflow/pipelines/pull/1576) ([IronPan](https://github.com/IronPan))
+- Updates the changelog for the v0.1.23 release [\#1571](https://github.com/kubeflow/pipelines/pull/1571) ([rileyjbauer](https://github.com/rileyjbauer))
+- Manifests: Update pipeline-runner permissions [\#1570](https://github.com/kubeflow/pipelines/pull/1570) ([elikatsis](https://github.com/elikatsis))
+- Update manifests for 0.1.23 [\#1567](https://github.com/kubeflow/pipelines/pull/1567) ([rileyjbauer](https://github.com/rileyjbauer))
+- Add Visualization API endpoint [\#1565](https://github.com/kubeflow/pipelines/pull/1565) ([ajchili](https://github.com/ajchili))
+- Viewer CRD controller running under namespace [\#1562](https://github.com/kubeflow/pipelines/pull/1562) ([jingzhang36](https://github.com/jingzhang36))
+- Pipeline sample: Updated research notebook to use Seaborn [\#1546](https://github.com/kubeflow/pipelines/pull/1546) ([kweinmeister](https://github.com/kweinmeister))
+- add\_pod\_env op handler [\#1540](https://github.com/kubeflow/pipelines/pull/1540) ([hongye-sun](https://github.com/hongye-sun))
+- Step 7 auto-generated change log for release 0.1.21 [\#1527](https://github.com/kubeflow/pipelines/pull/1527) ([jingzhang36](https://github.com/jingzhang36))
+
+
+## [0.1.23](https://github.com/kubeflow/pipelines/tree/0.1.23) (2019-06-26)
+[Full Changelog](https://github.com/kubeflow/pipelines/compare/0.1.22...0.1.23)
+
+**Merged pull requests:**
+
+- Sort keys in nested dictionaries for fixing unit tests [\#1558](https://github.com/kubeflow/pipelines/pull/1558) ([derekhh](https://github.com/derekhh))
+- Use sorted\(dict.items\(\)\) for stable output [\#1554](https://github.com/kubeflow/pipelines/pull/1554) ([derekhh](https://github.com/derekhh))
+- Use GCP external Stackdriver links [\#1552](https://github.com/kubeflow/pipelines/pull/1552) ([derekhh](https://github.com/derekhh))
+- configurable timeout and namespace in docker magic [\#1550](https://github.com/kubeflow/pipelines/pull/1550) ([gaoning777](https://github.com/gaoning777))
+- Updated links in READMEs [\#1544](https://github.com/kubeflow/pipelines/pull/1544) ([sarahmaddox](https://github.com/sarahmaddox))
+- SDK/Client - Add optional parameter "name" to upload\_pipeline method [\#1543](https://github.com/kubeflow/pipelines/pull/1543) ([fabito](https://github.com/fabito))
+- Parameterize namespace and suppress pod error [\#1539](https://github.com/kubeflow/pipelines/pull/1539) ([hongye-sun](https://github.com/hongye-sun))
+- SDK/Lightweight - Use argparse for command-line parsing [\#1534](https://github.com/kubeflow/pipelines/pull/1534) ([Ark-kun](https://github.com/Ark-kun))
+- Release v0.22 [\#1525](https://github.com/kubeflow/pipelines/pull/1525) ([kevinbache](https://github.com/kevinbache))
+- SDK/Lightweight - Added python version compatibility checks [\#1524](https://github.com/kubeflow/pipelines/pull/1524) ([Ark-kun](https://github.com/Ark-kun))
+- fix api doc link url [\#1496](https://github.com/kubeflow/pipelines/pull/1496) ([xieqihui](https://github.com/xieqihui))
+- add issue template [\#1492](https://github.com/kubeflow/pipelines/pull/1492) ([gaoning777](https://github.com/gaoning777))
+- SDK - Travis configuration for Python 3.5 and 3.7 [\#1467](https://github.com/kubeflow/pipelines/pull/1467) ([kvalev](https://github.com/kvalev))
+- Add timeout out in dsl [\#1465](https://github.com/kubeflow/pipelines/pull/1465) ([gaoning777](https://github.com/gaoning777))
+
+ ## [0.1.22](https://github.com/kubeflow/pipelines/tree/0.1.22) (2019-06-21)
+[Full Changelog](https://github.com/kubeflow/pipelines/compare/0.1.21...0.1.22)
+
+ **Merged pull requests:**
+
+ - increment sdk versions [\#1538](https://github.com/kubeflow/pipelines/pull/1538) ([hongye-sun](https://github.com/hongye-sun))
+- SDK/Client - Added support for all APIs [\#1536](https://github.com/kubeflow/pipelines/pull/1536) ([Ark-kun](https://github.com/Ark-kun))
+- SDK/Client - Added the upload\_pipeline API [\#1535](https://github.com/kubeflow/pipelines/pull/1535) ([Ark-kun](https://github.com/Ark-kun))
+- Update Watson pipeline component source to the latest commit [\#1533](https://github.com/kubeflow/pipelines/pull/1533) ([Tomcli](https://github.com/Tomcli))
+- SDK - Generated paths will be in /tmp by default [\#1531](https://github.com/kubeflow/pipelines/pull/1531) ([Ark-kun](https://github.com/Ark-kun))
+- Add metrics table to run details page, run outputs tab [\#1530](https://github.com/kubeflow/pipelines/pull/1530) ([rileyjbauer](https://github.com/rileyjbauer))
+- SDK+Frontend - Fixed the task display name annotation key [\#1526](https://github.com/kubeflow/pipelines/pull/1526) ([Ark-kun](https://github.com/Ark-kun))
+- SDK - Fixed import [\#1522](https://github.com/kubeflow/pipelines/pull/1522) ([Ark-kun](https://github.com/Ark-kun))
+- Sample: model retraining scenario using AI Platform components [\#1513](https://github.com/kubeflow/pipelines/pull/1513) ([kweinmeister](https://github.com/kweinmeister))
+- SDK/Lightweight - Disabled code pickling by default [\#1512](https://github.com/kubeflow/pipelines/pull/1512) ([Ark-kun](https://github.com/Ark-kun))
+- SDK/Lightweight - Enable cloudpickle installation from non-root users [\#1511](https://github.com/kubeflow/pipelines/pull/1511) ([Ark-kun](https://github.com/Ark-kun))
+- SDK - Improving python component logs by making stdout and stderr unbuffered [\#1510](https://github.com/kubeflow/pipelines/pull/1510) ([Ark-kun](https://github.com/Ark-kun))
+- Release 1d55a27cf8b69696f3ab5c10687edf2fde0068c7 [\#1506](https://github.com/kubeflow/pipelines/pull/1506) ([hongye-sun](https://github.com/hongye-sun))
+- Add wait\_job command [\#1505](https://github.com/kubeflow/pipelines/pull/1505) ([hongye-sun](https://github.com/hongye-sun))
+- Add myself as a approver/reviewer. [\#1503](https://github.com/kubeflow/pipelines/pull/1503) ([neuromage](https://github.com/neuromage))
+- Update README.md [\#1500](https://github.com/kubeflow/pipelines/pull/1500) ([krazyhaas](https://github.com/krazyhaas))
+- use cluster role for viewer crd for now since viewer crd doesn't support namespaced deployemnt [\#1499](https://github.com/kubeflow/pipelines/pull/1499) ([IronPan](https://github.com/IronPan))
+- SDK - Make it easier to compile and submit a pipeline run [\#1484](https://github.com/kubeflow/pipelines/pull/1484) ([Ark-kun](https://github.com/Ark-kun))
+- Fixed broken link in README.md [\#1480](https://github.com/kubeflow/pipelines/pull/1480) ([ajchili](https://github.com/ajchili))
+- Base64 encode the pickled code [\#1476](https://github.com/kubeflow/pipelines/pull/1476) ([kvalev](https://github.com/kvalev))
+- removing some creds [\#1470](https://github.com/kubeflow/pipelines/pull/1470) ([animeshsingh](https://github.com/animeshsingh))
+- SDK/Compiler - Invoke the op\_transformers as early as possible [\#1464](https://github.com/kubeflow/pipelines/pull/1464) ([kvalev](https://github.com/kvalev))
+- Frontend - Show customized task display names [\#1463](https://github.com/kubeflow/pipelines/pull/1463) ([Ark-kun](https://github.com/Ark-kun))
+- Output default job\_dir.txt file [\#1459](https://github.com/kubeflow/pipelines/pull/1459) ([hongye-sun](https://github.com/hongye-sun))
+- Add op\_to\_templates\_handler to compiler [\#1458](https://github.com/kubeflow/pipelines/pull/1458) ([hongye-sun](https://github.com/hongye-sun))
+- Pin selenium chrome version [\#1457](https://github.com/kubeflow/pipelines/pull/1457) ([rileyjbauer](https://github.com/rileyjbauer))
+- Apiserver s3 and MySQL env vars [\#1455](https://github.com/kubeflow/pipelines/pull/1455) ([yaronha](https://github.com/yaronha))
+- update manifest/ readme [\#1454](https://github.com/kubeflow/pipelines/pull/1454) ([IronPan](https://github.com/IronPan))
+- SDK/Compiler - Fix s3 artifact key names [\#1451](https://github.com/kubeflow/pipelines/pull/1451) ([kvalev](https://github.com/kvalev))
+- KFP CLI [\#1449](https://github.com/kubeflow/pipelines/pull/1449) ([hongye-sun](https://github.com/hongye-sun))
+- Add rileyjbauer as to frontend integration tests OWNERS [\#1447](https://github.com/kubeflow/pipelines/pull/1447) ([yebrahim](https://github.com/yebrahim))
+- update namespaced install manifest to include inverse proxy [\#1446](https://github.com/kubeflow/pipelines/pull/1446) ([IronPan](https://github.com/IronPan))
+- Added license URL for google-cloud-datastore [\#1445](https://github.com/kubeflow/pipelines/pull/1445) ([Ark-kun](https://github.com/Ark-kun))
+- Build - Simplify python SDK package installation [\#1444](https://github.com/kubeflow/pipelines/pull/1444) ([Ark-kun](https://github.com/Ark-kun))
+- Load auth from kube config. [\#1443](https://github.com/kubeflow/pipelines/pull/1443) ([hongye-sun](https://github.com/hongye-sun))
+- Fix Katib-launcher component with python file open issue [\#1441](https://github.com/kubeflow/pipelines/pull/1441) ([Tomcli](https://github.com/Tomcli))
+- expose add\_op\_transformer in the PipelineConf and add an example [\#1440](https://github.com/kubeflow/pipelines/pull/1440) ([gaoning777](https://github.com/gaoning777))
+- remove support for deserializing a string of a dict for typemeta [\#1439](https://github.com/kubeflow/pipelines/pull/1439) ([gaoning777](https://github.com/gaoning777))
+- SDK - Controlling which modules are captured with Lightweight components [\#1435](https://github.com/kubeflow/pipelines/pull/1435) ([Ark-kun](https://github.com/Ark-kun))
+- SDK - Only install cloudpickle if it's not available [\#1434](https://github.com/kubeflow/pipelines/pull/1434) ([Ark-kun](https://github.com/Ark-kun))
+- Update the doc to enable proxy agent by default [\#1432](https://github.com/kubeflow/pipelines/pull/1432) ([IronPan](https://github.com/IronPan))
+- enable proxy by default [\#1431](https://github.com/kubeflow/pipelines/pull/1431) ([IronPan](https://github.com/IronPan))
+- SDK - Dynamically installing cloudpickle module [\#1429](https://github.com/kubeflow/pipelines/pull/1429) ([Ark-kun](https://github.com/Ark-kun))
+- Backend - Updated Argo package from v2.3.0 RC to final version [\#1428](https://github.com/kubeflow/pipelines/pull/1428) ([Ark-kun](https://github.com/Ark-kun))
+- Testing - Updated Argo CLI to v2.3.0 [\#1427](https://github.com/kubeflow/pipelines/pull/1427) ([Ark-kun](https://github.com/Ark-kun))
+- Manifests: Update Argo version to v2.3.0 [\#1425](https://github.com/kubeflow/pipelines/pull/1425) ([elikatsis](https://github.com/elikatsis))
+- Update pipeline version in KFP manifest to 0.1.21 https://github.com/kubeflow/pipelines/releases/tag/0.1.21 [\#1424](https://github.com/kubeflow/pipelines/pull/1424) ([jingzhang36](https://github.com/jingzhang36))
+- Updates Google Cloud Storage and pins axios version [\#1415](https://github.com/kubeflow/pipelines/pull/1415) ([rileyjbauer](https://github.com/rileyjbauer))
+- update the API doc description [\#1410](https://github.com/kubeflow/pipelines/pull/1410) ([IronPan](https://github.com/IronPan))
+- add single API swagger json file [\#1408](https://github.com/kubeflow/pipelines/pull/1408) ([IronPan](https://github.com/IronPan))
+- add default value type checking [\#1407](https://github.com/kubeflow/pipelines/pull/1407) ([gaoning777](https://github.com/gaoning777))
+- Add doc for API [\#1406](https://github.com/kubeflow/pipelines/pull/1406) ([IronPan](https://github.com/IronPan))
+- Fix API package names and regenerate checked-in proto files. [\#1404](https://github.com/kubeflow/pipelines/pull/1404) ([neuromage](https://github.com/neuromage))
+- update DSL client documentation [\#1403](https://github.com/kubeflow/pipelines/pull/1403) ([IronPan](https://github.com/IronPan))
+- SDK/DSL: Make 'name' argument of a PipelineVolume omittable [\#1402](https://github.com/kubeflow/pipelines/pull/1402) ([elikatsis](https://github.com/elikatsis))
+- Fix auto generated resource link for DSL [\#1400](https://github.com/kubeflow/pipelines/pull/1400) ([IronPan](https://github.com/IronPan))
+- set dataset\_location as the default location. [\#1399](https://github.com/kubeflow/pipelines/pull/1399) ([hongye-sun](https://github.com/hongye-sun))
+- Added the link to the SDK reference docs to README.md [\#1398](https://github.com/kubeflow/pipelines/pull/1398) ([Ark-kun](https://github.com/Ark-kun))
+- Fixed the handling of PipelineParam-based default values in extract\_metadata [\#1380](https://github.com/kubeflow/pipelines/pull/1380) ([Ark-kun](https://github.com/Ark-kun))
+- SDK - Use different volume names for different secrets [\#1379](https://github.com/kubeflow/pipelines/pull/1379) ([Ark-kun](https://github.com/Ark-kun))
+- Add HyperParameters back to SageMaker training job [\#1377](https://github.com/kubeflow/pipelines/pull/1377) ([Jeffwan](https://github.com/Jeffwan))
+- SDK/Compiler: Add add\_pvolumes\(\) method to ContainerOp [\#1353](https://github.com/kubeflow/pipelines/pull/1353) ([elikatsis](https://github.com/elikatsis))
+- SDK - Refactored \_func\_to\_component\_spec to split code generation from signature analysis [\#1334](https://github.com/kubeflow/pipelines/pull/1334) ([Ark-kun](https://github.com/Ark-kun))
+- SDK - Improved test script compatibility with editable package installation [\#1200](https://github.com/kubeflow/pipelines/pull/1200) ([Ark-kun](https://github.com/Ark-kun))
+- Enhance pipeline TFX taxi sample to support on-prem cluster [\#749](https://github.com/kubeflow/pipelines/pull/749) ([jinchihe](https://github.com/jinchihe))
+
+## [0.1.21](https://github.com/kubeflow/pipelines/tree/0.1.21) (2019-05-29)
+[Full Changelog](https://github.com/kubeflow/pipelines/compare/0.1.20...0.1.21)
+
+**Merged pull requests:**
+
+- Cleanup code - Kaniko pod parameterization [\#1394](https://github.com/kubeflow/pipelines/pull/1394) ([IronPan](https://github.com/IronPan))
+- expose configuration for setting the max number of tensorboard [\#1393](https://github.com/kubeflow/pipelines/pull/1393) ([IronPan](https://github.com/IronPan))
+- Fix naming from sagamaker to sagemaker [\#1386](https://github.com/kubeflow/pipelines/pull/1386) ([tiffanyfay](https://github.com/tiffanyfay))
+- typo [\#1385](https://github.com/kubeflow/pipelines/pull/1385) ([gaoning777](https://github.com/gaoning777))
+- SDK - Capturing function dependencies when creating lightweight components [\#1372](https://github.com/kubeflow/pipelines/pull/1372) ([Ark-kun](https://github.com/Ark-kun))
+- SDK - Added the argo-models to requirements.txt [\#1367](https://github.com/kubeflow/pipelines/pull/1367) ([Ark-kun](https://github.com/Ark-kun))
+- Do not render primitives as json [\#1366](https://github.com/kubeflow/pipelines/pull/1366) ([rileyjbauer](https://github.com/rileyjbauer))
+- Update CHANGELOG.md [\#1358](https://github.com/kubeflow/pipelines/pull/1358) ([IronPan](https://github.com/IronPan))
+- Fix docstring for \_ops\_group Condition [\#1356](https://github.com/kubeflow/pipelines/pull/1356) ([Reldan](https://github.com/Reldan))
+- update kfp ui rbac permission [\#1350](https://github.com/kubeflow/pipelines/pull/1350) ([IronPan](https://github.com/IronPan))
+- Trim the file extension from suggested pipeline names [\#1349](https://github.com/kubeflow/pipelines/pull/1349) ([rileyjbauer](https://github.com/rileyjbauer))
+- Pretty print inputs and outputs json [\#1348](https://github.com/kubeflow/pipelines/pull/1348) ([rileyjbauer](https://github.com/rileyjbauer))
+- Release components [\#1347](https://github.com/kubeflow/pipelines/pull/1347) ([gaoning777](https://github.com/gaoning777))
+- SDK/Compiler: Fix Ops after\(\) method to handle multiple arguments [\#1346](https://github.com/kubeflow/pipelines/pull/1346) ([elikatsis](https://github.com/elikatsis))
+- rename sample\_test to component\_test and sample\_test\_v2 to sample\_test [\#1341](https://github.com/kubeflow/pipelines/pull/1341) ([gaoning777](https://github.com/gaoning777))
+- Add HTTP/HTTPS support in FE [\#1339](https://github.com/kubeflow/pipelines/pull/1339) ([eran-nussbaum](https://github.com/eran-nussbaum))
+- \[kfp sdk\] Added examples for ArtifactLocation, ResourceOp, VolumeOp, and Sidecar. [\#1338](https://github.com/kubeflow/pipelines/pull/1338) ([eterna2](https://github.com/eterna2))
+- check if data and env format [\#1337](https://github.com/kubeflow/pipelines/pull/1337) ([cheyang](https://github.com/cheyang))
+- update proxy image build path [\#1336](https://github.com/kubeflow/pipelines/pull/1336) ([IronPan](https://github.com/IronPan))
+- SDK - Made description and name parameters optional in the @pipeline decorator [\#1335](https://github.com/kubeflow/pipelines/pull/1335) ([Ark-kun](https://github.com/Ark-kun))
+- add argo install in postsubmit [\#1333](https://github.com/kubeflow/pipelines/pull/1333) ([gaoning777](https://github.com/gaoning777))
+- Removes redundant npm install from FE Dockerfile [\#1332](https://github.com/kubeflow/pipelines/pull/1332) ([rileyjbauer](https://github.com/rileyjbauer))
+- \[Frontend\] minio client in API server can be configured with environment variables [\#1324](https://github.com/kubeflow/pipelines/pull/1324) ([eterna2](https://github.com/eterna2))
+- Add mpi job into arena launcher [\#1307](https://github.com/kubeflow/pipelines/pull/1307) ([cheyang](https://github.com/cheyang))
+- Adds metrics table to Compare page, creates Metric component [\#1284](https://github.com/kubeflow/pipelines/pull/1284) ([rileyjbauer](https://github.com/rileyjbauer))
+- Fix watson train component [\#1259](https://github.com/kubeflow/pipelines/pull/1259) ([mpoqq](https://github.com/mpoqq))
+- Added the auto-generated python SDK docs [\#1256](https://github.com/kubeflow/pipelines/pull/1256) ([Ark-kun](https://github.com/Ark-kun))
+- SDK - Removing unneeded uses of dsl.Pipeline [\#1229](https://github.com/kubeflow/pipelines/pull/1229) ([Ark-kun](https://github.com/Ark-kun))
+- SDK/DSL - ContainerOp.apply method now supports functions that do not return anything [\#1226](https://github.com/kubeflow/pipelines/pull/1226) ([Ark-kun](https://github.com/Ark-kun))
+- SDK/Compiler - Added the ability to apply a function to all ops in a pipeline [\#1209](https://github.com/kubeflow/pipelines/pull/1209) ([Ark-kun](https://github.com/Ark-kun))
+- \[Feature\] Supports parameterized S3Artifactory for Pipeline and ContainerOp in kfp package [\#1064](https://github.com/kubeflow/pipelines/pull/1064) ([eterna2](https://github.com/eterna2))
+
+## [0.1.20](https://github.com/kubeflow/pipelines/tree/0.1.20) (2019-05-14)
+[Full Changelog](https://github.com/kubeflow/pipelines/compare/0.1.19...0.1.20)
+
+ **Closed issues:**
+
+ - Cannot create job for experiment via Pipelines Go CLI [\#1321](https://github.com/kubeflow/pipelines/issues/1321)
+ - Support a container flow inside one pod [\#1313](https://github.com/kubeflow/pipelines/issues/1313)
+ - toleration support for ContainerOp [\#1265](https://github.com/kubeflow/pipelines/issues/1265)
+ - Can only create recurring run from within experiment page [\#1217](https://github.com/kubeflow/pipelines/issues/1217)
+
+**Merged pull requests:**
+
+- Release c3235d725eb1d1eb06b5600a8291967aa6cf518f [\#1331](https://github.com/kubeflow/pipelines/pull/1331) ([IronPan](https://github.com/IronPan))
+- bump kfp version in sdk [\#1330](https://github.com/kubeflow/pipelines/pull/1330) ([IronPan](https://github.com/IronPan))
+- bump kfp version in component sdk [\#1329](https://github.com/kubeflow/pipelines/pull/1329) ([IronPan](https://github.com/IronPan))
+- switch the release script from staging to test [\#1328](https://github.com/kubeflow/pipelines/pull/1328) ([IronPan](https://github.com/IronPan))
+- Add proxy agent as optional kustomize component [\#1325](https://github.com/kubeflow/pipelines/pull/1325) ([IronPan](https://github.com/IronPan))
+- Clean up repo - delete unnecessary top level folders [\#1323](https://github.com/kubeflow/pipelines/pull/1323) ([IronPan](https://github.com/IronPan))
+- ml-pipeline-test has a cb job [\#1322](https://github.com/kubeflow/pipelines/pull/1322) ([gaoning777](https://github.com/gaoning777))
+- add patch for metric collection [\#1317](https://github.com/kubeflow/pipelines/pull/1317) ([gaoning777](https://github.com/gaoning777))
+- update tf version to 1.12.1 [\#1315](https://github.com/kubeflow/pipelines/pull/1315) ([gaoning777](https://github.com/gaoning777))
+- Fix typo in ContainerOp contructor's help string [\#1314](https://github.com/kubeflow/pipelines/pull/1314) ([elikatsis](https://github.com/elikatsis))
+- Adding myself to test owners [\#1312](https://github.com/kubeflow/pipelines/pull/1312) ([Ark-kun](https://github.com/Ark-kun))
+- Testing - Clean up the Argo controller that was used to build images [\#1311](https://github.com/kubeflow/pipelines/pull/1311) ([Ark-kun](https://github.com/Ark-kun))
+- Shows link to Stackdriver logs if logs retrieval fails and cluster is running in GKE [\#1310](https://github.com/kubeflow/pipelines/pull/1310) ([rileyjbauer](https://github.com/rileyjbauer))
+- expose namespace config for scheduled workflow [\#1309](https://github.com/kubeflow/pipelines/pull/1309) ([IronPan](https://github.com/IronPan))
+- expose namespace config for persistent agent [\#1308](https://github.com/kubeflow/pipelines/pull/1308) ([IronPan](https://github.com/IronPan))
+- Add dsl support for preemptible vm/gpus [\#1306](https://github.com/kubeflow/pipelines/pull/1306) ([gaoning777](https://github.com/gaoning777))
+- kustomize kf pipeline [\#1305](https://github.com/kubeflow/pipelines/pull/1305) ([IronPan](https://github.com/IronPan))
+- Update quickstart sample [\#1302](https://github.com/kubeflow/pipelines/pull/1302) ([gaoning777](https://github.com/gaoning777))
+- Fix PipelineParam pattern bug [\#1300](https://github.com/kubeflow/pipelines/pull/1300) ([elikatsis](https://github.com/elikatsis))
+- Add -p to mkdir in quickstart [\#1299](https://github.com/kubeflow/pipelines/pull/1299) ([rileyjbauer](https://github.com/rileyjbauer))
+- Remove cops and rops pipeline attributes [\#1298](https://github.com/kubeflow/pipelines/pull/1298) ([elikatsis](https://github.com/elikatsis))
+- SDK - Stopped hard-coding artifact storage configuration in the pipeline packages [\#1297](https://github.com/kubeflow/pipelines/pull/1297) ([Ark-kun](https://github.com/Ark-kun))
+- changelog for v0.1.19 [\#1296](https://github.com/kubeflow/pipelines/pull/1296) ([hongye-sun](https://github.com/hongye-sun))
+- add nuclio components \(to build/deploy, delete, invoke functions\) [\#1295](https://github.com/kubeflow/pipelines/pull/1295) ([yaronha](https://github.com/yaronha))
+- SDK - Failing faster in python\_op tests [\#1291](https://github.com/kubeflow/pipelines/pull/1291) ([Ark-kun](https://github.com/Ark-kun))
+ - SDK - Renamed ModelBase.from\_struct/to\_struct to from\_dict/to\_dict [\#1290](https://github.com/kubeflow/pipelines/pull/1290) ([Ark-kun](https://github.com/Ark-kun))
+- Backend - Marking auto-added artifacts as optional [\#1289](https://github.com/kubeflow/pipelines/pull/1289) ([Ark-kun](https://github.com/Ark-kun))
+- Update new Watson OpenScale components and pipeline [\#1287](https://github.com/kubeflow/pipelines/pull/1287) ([Tomcli](https://github.com/Tomcli))
+ - Add AWS EMR and Athena components [\#1286](https://github.com/kubeflow/pipelines/pull/1286) ([Jeffwan](https://github.com/Jeffwan))
+- Make confusion\_matrix and roc generic [\#1285](https://github.com/kubeflow/pipelines/pull/1285) ([Jeffwan](https://github.com/Jeffwan))
+ - Components - Updating component versions in samples during release [\#1283](https://github.com/kubeflow/pipelines/pull/1283) ([Ark-kun](https://github.com/Ark-kun))
+ - Sets the background color for KFP pages [\#1281](https://github.com/kubeflow/pipelines/pull/1281) ([rileyjbauer](https://github.com/rileyjbauer))
+ - keep the api image name consistent between the presubmit test and staging [\#1279](https://github.com/kubeflow/pipelines/pull/1279) ([gaoning777](https://github.com/gaoning777))
+ - Frontend - Add support for artifacts stored in S3 [\#1278](https://github.com/kubeflow/pipelines/pull/1278) ([Jeffwan](https://github.com/Jeffwan))
+- Release - Simplified python package building [\#1277](https://github.com/kubeflow/pipelines/pull/1277) ([Ark-kun](https://github.com/Ark-kun))
+ - Add SageMaker components and example pipeline [\#1276](https://github.com/kubeflow/pipelines/pull/1276) ([Jeffwan](https://github.com/Jeffwan))
+- Tests/Travis - Simplified the Python SDK package installation [\#1275](https://github.com/kubeflow/pipelines/pull/1275) ([Ark-kun](https://github.com/Ark-kun))
+ - Adds a toggle between one-off and recurring runs to NewRun page [\#1274](https://github.com/kubeflow/pipelines/pull/1274) ([rileyjbauer](https://github.com/rileyjbauer))
+ - spark components [\#1272](https://github.com/kubeflow/pipelines/pull/1272) ([animeshsingh](https://github.com/animeshsingh))
+- support tolerations for ContainerOps [\#1269](https://github.com/kubeflow/pipelines/pull/1269) ([hamedhsn](https://github.com/hamedhsn))
+ - make pending timeout customizable [\#1268](https://github.com/kubeflow/pipelines/pull/1268) ([cheyang](https://github.com/cheyang))
+ - SDK/Client - Supporting pipeline packages with multiple files [\#1207](https://github.com/kubeflow/pipelines/pull/1207) ([Ark-kun](https://github.com/Ark-kun))
+ - Retaining the component url, digest or tag when loading [\#1090](https://github.com/kubeflow/pipelines/pull/1090) ([Ark-kun](https://github.com/Ark-kun))
+ - Allow to specify informers namespace in persistence agent [\#901](https://github.com/kubeflow/pipelines/pull/901) ([ywskycn](https://github.com/ywskycn))
+
+
+## [0.1.19](https://github.com/kubeflow/pipelines/tree/0.1.19) (2019-05-03)
+[Full Changelog](https://github.com/kubeflow/pipelines/compare/0.1.18...0.1.19)
+
+**Closed issues:**
+
+- Unfinished run duration shown as negative time [\#1236](https://github.com/kubeflow/pipelines/issues/1236)
+- Release the SDK in Pypi and automate the release process [\#1233](https://github.com/kubeflow/pipelines/issues/1233)
+- The post-submit test always fails [\#1228](https://github.com/kubeflow/pipelines/issues/1228)
+- docker magic command should print kaniko pod logs [\#924](https://github.com/kubeflow/pipelines/issues/924)
+- Kubeflow Pipeline conditionals should be able to handle multiple output parameters [\#679](https://github.com/kubeflow/pipelines/issues/679)
+
+**Merged pull requests:**
+
+- Update component yaml reference to eb830cd73ca148e5a1a6485a9374c2dc068314bc [\#1282](https://github.com/kubeflow/pipelines/pull/1282) ([hongye-sun](https://github.com/hongye-sun))
+- Component Release 727c48c690c081b505c1f0979d11930bf1ef07c0 [\#1280](https://github.com/kubeflow/pipelines/pull/1280) ([hongye-sun](https://github.com/hongye-sun))
+- update the image in the samples to use the new component images [\#1267](https://github.com/kubeflow/pipelines/pull/1267) ([gaoning777](https://github.com/gaoning777))
+- Pulls most functions out of Status and into StatusUtils [\#1262](https://github.com/kubeflow/pipelines/pull/1262) ([rileyjbauer](https://github.com/rileyjbauer))
+- Properly alternate sorting in PagedTable between ascending and descending [\#1261](https://github.com/kubeflow/pipelines/pull/1261) ([rileyjbauer](https://github.com/rileyjbauer))
+- Marking the UI-metadata and Metrics artifacts as optional [\#1260](https://github.com/kubeflow/pipelines/pull/1260) ([Ark-kun](https://github.com/Ark-kun))
+- Changelog for v0.1.18 [\#1258](https://github.com/kubeflow/pipelines/pull/1258) ([Ark-kun](https://github.com/Ark-kun))
+- Update arena SDK version [\#1251](https://github.com/kubeflow/pipelines/pull/1251) ([cheyang](https://github.com/cheyang))
+- remove unnecessary args [\#1249](https://github.com/kubeflow/pipelines/pull/1249) ([gaoning777](https://github.com/gaoning777))
+- fix postsubmit bugs [\#1248](https://github.com/kubeflow/pipelines/pull/1248) ([gaoning777](https://github.com/gaoning777))
+- display kaniko log if failed [\#1247](https://github.com/kubeflow/pipelines/pull/1247) ([gaoning777](https://github.com/gaoning777))
+- Fix Watson pipeline example [\#1246](https://github.com/kubeflow/pipelines/pull/1246) ([Tomcli](https://github.com/Tomcli))
+- fix the link for tfx taxi example [\#1245](https://github.com/kubeflow/pipelines/pull/1245) ([animeshsingh](https://github.com/animeshsingh))
+- updated ffdl notebook using component yaml file [\#1241](https://github.com/kubeflow/pipelines/pull/1241) ([animeshsingh](https://github.com/animeshsingh))
+- Fixing comp names [\#1239](https://github.com/kubeflow/pipelines/pull/1239) ([animeshsingh](https://github.com/animeshsingh))
+- SDK - Preventing errors when importing kfp.notebook [\#1215](https://github.com/kubeflow/pipelines/pull/1215) ([Ark-kun](https://github.com/Ark-kun))
+- SDK - Separated the generated api client package [\#1214](https://github.com/kubeflow/pipelines/pull/1214) ([Ark-kun](https://github.com/Ark-kun))
+- Clear default exp table on delete and create default exp on run create if none exists [\#1199](https://github.com/kubeflow/pipelines/pull/1199) ([rileyjbauer](https://github.com/rileyjbauer))
+- Automated the component image release script [\#1172](https://github.com/kubeflow/pipelines/pull/1172) ([Ark-kun](https://github.com/Ark-kun))
+
+## [0.1.18](https://github.com/kubeflow/pipelines/tree/0.1.18) (2019-04-26)
+[Full Changelog](https://github.com/kubeflow/pipelines/compare/0.1.17...0.1.18)
+
+**Closed issues:**
+
+- sample "DSL Static Type Checking" link to types needs to be updated [\#1220](https://github.com/kubeflow/pipelines/issues/1220)
+- Update the SDK version automatically during the release [\#1190](https://github.com/kubeflow/pipelines/issues/1190)
+- Bug: Unable to delete recurring run configs in UI [\#1181](https://github.com/kubeflow/pipelines/issues/1181)
+- Add integration test for filtering [\#1155](https://github.com/kubeflow/pipelines/issues/1155)
+- Pipeline submission fails when ContainerOp has numbers as command line arguments [\#1121](https://github.com/kubeflow/pipelines/issues/1121)
+- Add SDK support for sidecars [\#949](https://github.com/kubeflow/pipelines/issues/949)
+- Missing PullPolicy for ContainerOp [\#832](https://github.com/kubeflow/pipelines/issues/832)
+- Extend the DSL with support for Persistent Volumes and Snapshots [\#801](https://github.com/kubeflow/pipelines/issues/801)
+- Use GCP credentials for existing samples [\#418](https://github.com/kubeflow/pipelines/issues/418)
+- Does pipelines only support on GKE? And the workflow run with some problem. [\#365](https://github.com/kubeflow/pipelines/issues/365)
+- Remove experiment column from runs list inside experiment [\#222](https://github.com/kubeflow/pipelines/issues/222)
+- Support all container APIs through container op [\#204](https://github.com/kubeflow/pipelines/issues/204)
+
+**Merged pull requests:**
+
+- Release b0147bdbed9f25212408e0468a475289e80e0406 [\#1238](https://github.com/kubeflow/pipelines/pull/1238) ([Ark-kun](https://github.com/Ark-kun))
+- clean up quickstart sample a bit [\#1232](https://github.com/kubeflow/pipelines/pull/1232) ([gaoning777](https://github.com/gaoning777))
+- simplifying-ffdl-params [\#1230](https://github.com/kubeflow/pipelines/pull/1230) ([animeshsingh](https://github.com/animeshsingh))
+- modernize-wml-pipeline [\#1227](https://github.com/kubeflow/pipelines/pull/1227) ([animeshsingh](https://github.com/animeshsingh))
+- Swagger - Specifying content types on the route level [\#1225](https://github.com/kubeflow/pipelines/pull/1225) ([Ark-kun](https://github.com/Ark-kun))
+- update a broken link [\#1221](https://github.com/kubeflow/pipelines/pull/1221) ([gaoning777](https://github.com/gaoning777))
+- Update to version 3.0.2 of npm package 'extend' [\#1211](https://github.com/kubeflow/pipelines/pull/1211) ([rileyjbauer](https://github.com/rileyjbauer))
+- Testing - Fixed the postsubmit tests [\#1210](https://github.com/kubeflow/pipelines/pull/1210) ([Ark-kun](https://github.com/Ark-kun))
+- update types to defined core type names [\#1206](https://github.com/kubeflow/pipelines/pull/1206) ([gaoning777](https://github.com/gaoning777))
+- Moving the component 'verbs' ahead of 'product/project' [\#1202](https://github.com/kubeflow/pipelines/pull/1202) ([animeshsingh](https://github.com/animeshsingh))
+- Fix package version conflict [\#1201](https://github.com/kubeflow/pipelines/pull/1201) ([Ark-kun](https://github.com/Ark-kun))
+- SDK - Fixed the non-string items in the command-line arguments [\#1197](https://github.com/kubeflow/pipelines/pull/1197) ([Ark-kun](https://github.com/Ark-kun))
+- Testing/Sample - Made checking confusion matrix data more robust [\#1196](https://github.com/kubeflow/pipelines/pull/1196) ([Ark-kun](https://github.com/Ark-kun))
+- Updated vulnerable package [\#1193](https://github.com/kubeflow/pipelines/pull/1193) ([Ark-kun](https://github.com/Ark-kun))
+- Return error when ml-metadata serialized format does not match expected format. [\#1192](https://github.com/kubeflow/pipelines/pull/1192) ([neuromage](https://github.com/neuromage))
+- Expose step id and step name [\#1191](https://github.com/kubeflow/pipelines/pull/1191) ([cheyang](https://github.com/cheyang))
+- SDK - Made ComponentSpec.implementation field optional [\#1188](https://github.com/kubeflow/pipelines/pull/1188) ([Ark-kun](https://github.com/Ark-kun))
+- Make Python Client robust to existing IPython installations [\#1186](https://github.com/kubeflow/pipelines/pull/1186) ([JohnPaton](https://github.com/JohnPaton))
+- Fixes deletion of recurring runs [\#1185](https://github.com/kubeflow/pipelines/pull/1185) ([rileyjbauer](https://github.com/rileyjbauer))
+- update changelog [\#1184](https://github.com/kubeflow/pipelines/pull/1184) ([gaoning777](https://github.com/gaoning777))
+- Update arena component with git support [\#1179](https://github.com/kubeflow/pipelines/pull/1179) ([cheyang](https://github.com/cheyang))
+- SDK/Client - Stopped extracting pipeline file to disk during submission [\#1178](https://github.com/kubeflow/pipelines/pull/1178) ([Ark-kun](https://github.com/Ark-kun))
+- Marked all scripts as executable [\#1177](https://github.com/kubeflow/pipelines/pull/1177) ([Ark-kun](https://github.com/Ark-kun))
+- Allow creating runs without experiments [\#1175](https://github.com/kubeflow/pipelines/pull/1175) ([rileyjbauer](https://github.com/rileyjbauer))
+- adding myself as reviewer for samples [\#1174](https://github.com/kubeflow/pipelines/pull/1174) ([animeshsingh](https://github.com/animeshsingh))
+- SDK - Replaced insecure yaml.load with yaml.safe\_load [\#1170](https://github.com/kubeflow/pipelines/pull/1170) ([Ark-kun](https://github.com/Ark-kun))
+- SDK/Tests - Properly closing tar files opened for writing [\#1169](https://github.com/kubeflow/pipelines/pull/1169) ([Ark-kun](https://github.com/Ark-kun))
+- SDK - Decoupling ContainerOp from compiler [\#1168](https://github.com/kubeflow/pipelines/pull/1168) ([Ark-kun](https://github.com/Ark-kun))
+- SDK - Got rid of the global variable collecting all created pipelines [\#1167](https://github.com/kubeflow/pipelines/pull/1167) ([Ark-kun](https://github.com/Ark-kun))
+- Apply latest doc review changes to github docs [\#1128](https://github.com/kubeflow/pipelines/pull/1128) ([hongye-sun](https://github.com/hongye-sun))
+- SDK - Removed SourceSpec structure [\#1119](https://github.com/kubeflow/pipelines/pull/1119) ([Ark-kun](https://github.com/Ark-kun))
+- Updated the "XGBoost Trainer'" sample [\#1116](https://github.com/kubeflow/pipelines/pull/1116) ([Ark-kun](https://github.com/Ark-kun))
+- Updated the "TFX Taxi Cab Classification Pipeline" sample [\#1115](https://github.com/kubeflow/pipelines/pull/1115) ([Ark-kun](https://github.com/Ark-kun))
+- Updated the "Kubeflow training and classification" sample [\#1114](https://github.com/kubeflow/pipelines/pull/1114) ([Ark-kun](https://github.com/Ark-kun))
+- Updated the "Basic - Recursive loop" sample [\#1113](https://github.com/kubeflow/pipelines/pull/1113) ([Ark-kun](https://github.com/Ark-kun))
+- Updated the "Basic - Sequential execution" sample [\#1112](https://github.com/kubeflow/pipelines/pull/1112) ([Ark-kun](https://github.com/Ark-kun))
+- Updated the "Basic - Parallel execution" sample [\#1110](https://github.com/kubeflow/pipelines/pull/1110) ([Ark-kun](https://github.com/Ark-kun))
+- Updated the "Basic - Conditional" sample [\#1108](https://github.com/kubeflow/pipelines/pull/1108) ([Ark-kun](https://github.com/Ark-kun))
+- Parameterize the artifact path for mlpipeline ui-metadata and metrics [\#998](https://github.com/kubeflow/pipelines/pull/998) ([Tomcli](https://github.com/Tomcli))
+- Minor fix on samples/tfx-oss/README.md [\#969](https://github.com/kubeflow/pipelines/pull/969) ([ucdmkt](https://github.com/ucdmkt))
+- Extend the DSL to implement the design of \#801 [\#926](https://github.com/kubeflow/pipelines/pull/926) ([elikatsis](https://github.com/elikatsis))
+- Allow more flexible way to config the api server addr in persistence agent [\#867](https://github.com/kubeflow/pipelines/pull/867) ([ywskycn](https://github.com/ywskycn))
+- Kubeflow pipelines quickstart notebooks added. [\#821](https://github.com/kubeflow/pipelines/pull/821) ([rostam-github](https://github.com/rostam-github))
+
+## [0.1.17](https://github.com/kubeflow/pipelines/tree/0.1.17) (2019-04-17)
+[Full Changelog](https://github.com/kubeflow/pipelines/compare/0.1.16...0.1.17)
+
+**Closed issues:**
+
+- Runs leaked into archived page [\#1150](https://github.com/kubeflow/pipelines/issues/1150)
+- Pagination broken in UI [\#1149](https://github.com/kubeflow/pipelines/issues/1149)
+- The number of members in the policy \(1,503\) is larger than the maximum allowed size 1,500 [\#1146](https://github.com/kubeflow/pipelines/issues/1146)
+- Backend Docker build fails with python error in resnet-train-pipeline.py [\#1142](https://github.com/kubeflow/pipelines/issues/1142)
+- Use range instead of pin for python dependencies for kfp [\#1134](https://github.com/kubeflow/pipelines/issues/1134)
+- Deploy a TFX pipeline from the command line [\#1127](https://github.com/kubeflow/pipelines/issues/1127)
+- CloudBuild is failing since the CMLE deployer component was deleted [\#1123](https://github.com/kubeflow/pipelines/issues/1123)
+- Failed to load the trained model using kubeflow deployer component [\#1102](https://github.com/kubeflow/pipelines/issues/1102)
+- Pipeline parameters should be accessible to pipeline logic [\#1099](https://github.com/kubeflow/pipelines/issues/1099)
+- Retrieve the experiment during list run [\#1084](https://github.com/kubeflow/pipelines/issues/1084)
+- Connect local notebook to remote Pipeline cluster [\#1079](https://github.com/kubeflow/pipelines/issues/1079)
+- How to support continuous online learning? [\#1053](https://github.com/kubeflow/pipelines/issues/1053)
+- When notebook submits a run and clicks the link they get "Error: failed to retrieve run:" [\#1017](https://github.com/kubeflow/pipelines/issues/1017)
+- Add static type checking sample test [\#974](https://github.com/kubeflow/pipelines/issues/974)
+- Test coverage for runs without experiments [\#685](https://github.com/kubeflow/pipelines/issues/685)
+
+**Merged pull requests:**
+
+- release 0.1.17: update yaml tag in samples [\#1176](https://github.com/kubeflow/pipelines/pull/1176) ([gaoning777](https://github.com/gaoning777))
+- Component releasing for 0.1.17 [\#1171](https://github.com/kubeflow/pipelines/pull/1171) ([gaoning777](https://github.com/gaoning777))
+- SDK - Simplified the @component decorator [\#1166](https://github.com/kubeflow/pipelines/pull/1166) ([Ark-kun](https://github.com/Ark-kun))
+- delete the resnet image items [\#1165](https://github.com/kubeflow/pipelines/pull/1165) ([gaoning777](https://github.com/gaoning777))
+- Adding myself as a reviewer for components [\#1161](https://github.com/kubeflow/pipelines/pull/1161) ([animeshsingh](https://github.com/animeshsingh))
+- new kubernetes packages contain breaking change, thus fixing the version in the sample test image [\#1159](https://github.com/kubeflow/pipelines/pull/1159) ([gaoning777](https://github.com/gaoning777))
+- bug fix in resnet sample [\#1154](https://github.com/kubeflow/pipelines/pull/1154) ([gaoning777](https://github.com/gaoning777))
+- Add filter to next page token so it applies to subsequently requested pages [\#1153](https://github.com/kubeflow/pipelines/pull/1153) ([neuromage](https://github.com/neuromage))
+- fix missing filter for list call [\#1151](https://github.com/kubeflow/pipelines/pull/1151) ([IronPan](https://github.com/IronPan))
+- using comp yaml for ffdl pipeline [\#1148](https://github.com/kubeflow/pipelines/pull/1148) ([animeshsingh](https://github.com/animeshsingh))
+- component readmes [\#1147](https://github.com/kubeflow/pipelines/pull/1147) ([animeshsingh](https://github.com/animeshsingh))
+- Removes unnecessary API calls [\#1144](https://github.com/kubeflow/pipelines/pull/1144) ([rileyjbauer](https://github.com/rileyjbauer))
+- use kubeflow/pipelines branch for deployment in test [\#1143](https://github.com/kubeflow/pipelines/pull/1143) ([IronPan](https://github.com/IronPan))
+- Set run model default timestamp to 0 [\#1140](https://github.com/kubeflow/pipelines/pull/1140) ([IronPan](https://github.com/IronPan))
+- Allow adding pipeline with name and description. [\#1139](https://github.com/kubeflow/pipelines/pull/1139) ([neuromage](https://github.com/neuromage))
+- Allow later versions of dependencies to be used with Python KFP package. [\#1137](https://github.com/kubeflow/pipelines/pull/1137) ([neuromage](https://github.com/neuromage))
+- Expose service-type as parameter [\#1136](https://github.com/kubeflow/pipelines/pull/1136) ([vincent-pli](https://github.com/vincent-pli))
+- Add helper to easily add aws secrets [\#1133](https://github.com/kubeflow/pipelines/pull/1133) ([Jeffwan](https://github.com/Jeffwan))
+- add type checking sample to sample tests [\#1129](https://github.com/kubeflow/pipelines/pull/1129) ([gaoning777](https://github.com/gaoning777))
+- Updated descriptions of pre-installed samples [\#1126](https://github.com/kubeflow/pipelines/pull/1126) ([Ark-kun](https://github.com/Ark-kun))
+- Fixed CloudBuild failure [\#1124](https://github.com/kubeflow/pipelines/pull/1124) ([Ark-kun](https://github.com/Ark-kun))
+- Surface workflow finished time in list run API [\#1122](https://github.com/kubeflow/pipelines/pull/1122) ([IronPan](https://github.com/IronPan))
+- SDK - Simplified the @pipeline decorator [\#1120](https://github.com/kubeflow/pipelines/pull/1120) ([Ark-kun](https://github.com/Ark-kun))
+- update katib laucher [\#1118](https://github.com/kubeflow/pipelines/pull/1118) ([hougangliu](https://github.com/hougangliu))
+- Updated the package version in the notebooks [\#1117](https://github.com/kubeflow/pipelines/pull/1117) ([Ark-kun](https://github.com/Ark-kun))
+- Updated the "Basic - Retry" sample [\#1111](https://github.com/kubeflow/pipelines/pull/1111) ([Ark-kun](https://github.com/Ark-kun))
+- Updated the "Basic - Exit handler" sample [\#1109](https://github.com/kubeflow/pipelines/pull/1109) ([Ark-kun](https://github.com/Ark-kun))
+- incremented app version [\#1107](https://github.com/kubeflow/pipelines/pull/1107) ([kevinbache](https://github.com/kevinbache))
+- uprade tf-serving in deployer component [\#1103](https://github.com/kubeflow/pipelines/pull/1103) ([jinchihe](https://github.com/jinchihe))
+- Stabilized the artifact ordering during the compilation [\#1097](https://github.com/kubeflow/pipelines/pull/1097) ([Ark-kun](https://github.com/Ark-kun))
+- Creates a default experiment at API server set up time [\#1089](https://github.com/kubeflow/pipelines/pull/1089) ([rileyjbauer](https://github.com/rileyjbauer))
+- Passing the annotations and labels to the ContainerOp [\#1077](https://github.com/kubeflow/pipelines/pull/1077) ([Ark-kun](https://github.com/Ark-kun))
+- Build Pipeline leveraging Arena [\#1058](https://github.com/kubeflow/pipelines/pull/1058) ([cheyang](https://github.com/cheyang))
+- Rewrite ResNet sample by GCP components [\#1018](https://github.com/kubeflow/pipelines/pull/1018) ([hongye-sun](https://github.com/hongye-sun))
+- Add a ResNet example from NVIDIA [\#964](https://github.com/kubeflow/pipelines/pull/964) ([khoa-ho](https://github.com/khoa-ho))
+
+## [0.1.16](https://github.com/kubeflow/pipelines/tree/0.1.16) (2019-04-06)
+[Full Changelog](https://github.com/kubeflow/pipelines/compare/0.1.14...0.1.16)
+
+**Closed issues:**
+
+- usage reporting link broken [\#1073](https://github.com/kubeflow/pipelines/issues/1073)
+- Exit handler task does not have proper name in the UX [\#1051](https://github.com/kubeflow/pipelines/issues/1051)
+- Remove link to JupyterHub from pipelines UI [\#1029](https://github.com/kubeflow/pipelines/issues/1029)
+- Invalid memory address or nil pointer dereference in metadata\_store.go [\#1024](https://github.com/kubeflow/pipelines/issues/1024)
+- KFP SDK: suggest retry if pipeline launch returns 500 error \('warmup' apparently required\) [\#1007](https://github.com/kubeflow/pipelines/issues/1007)
+- Refactor the test infra code [\#875](https://github.com/kubeflow/pipelines/issues/875)
+- Ability to stop runs [\#413](https://github.com/kubeflow/pipelines/issues/413)
+- Surface the sample test results to github [\#361](https://github.com/kubeflow/pipelines/issues/361)
+- feature ask: support for loops [\#303](https://github.com/kubeflow/pipelines/issues/303)
+
+**Merged pull requests:**
+
+- Fixed Kubeflow sample test [\#1096](https://github.com/kubeflow/pipelines/pull/1096) ([Ark-kun](https://github.com/Ark-kun))
+- Rolling back TFMA image [\#1095](https://github.com/kubeflow/pipelines/pull/1095) ([Ark-kun](https://github.com/Ark-kun))
+- Fixed bug in TFMA component code [\#1094](https://github.com/kubeflow/pipelines/pull/1094) ([Ark-kun](https://github.com/Ark-kun))
+- Fixed bug in ROC component definition [\#1093](https://github.com/kubeflow/pipelines/pull/1093) ([Ark-kun](https://github.com/Ark-kun))
+- Added "Target lambda" parameter to "Confusion matrix" component [\#1091](https://github.com/kubeflow/pipelines/pull/1091) ([Ark-kun](https://github.com/Ark-kun))
+- SDK - Fix obj name in K8s -\> json conversion util [\#1088](https://github.com/kubeflow/pipelines/pull/1088) ([NathanDeMaria](https://github.com/NathanDeMaria))
+- optimize UX for loading pipeline pages [\#1085](https://github.com/kubeflow/pipelines/pull/1085) ([IronPan](https://github.com/IronPan))
+- Update CHANGELOG for release 0.1.15 [\#1083](https://github.com/kubeflow/pipelines/pull/1083) ([neuromage](https://github.com/neuromage))
+- Release component image version a277f87ea1d4707bf860d080d06639b7caf9a1cf [\#1082](https://github.com/kubeflow/pipelines/pull/1082) ([neuromage](https://github.com/neuromage))
+- Fix wrong indentation in extract\_pipelineparams\_from\_any\(\) [\#1076](https://github.com/kubeflow/pipelines/pull/1076) ([elikatsis](https://github.com/elikatsis))
+- Removes link to Kubeflow privacy links as KFP no longer runs Spartakus [\#1074](https://github.com/kubeflow/pipelines/pull/1074) ([rileyjbauer](https://github.com/rileyjbauer))
+- metadata tag for watson components [\#1072](https://github.com/kubeflow/pipelines/pull/1072) ([animeshsingh](https://github.com/animeshsingh))
+- metadata for ibm components targeting opensource [\#1071](https://github.com/kubeflow/pipelines/pull/1071) ([animeshsingh](https://github.com/animeshsingh))
+- Add Bazel build/test to Travis run. [\#1069](https://github.com/kubeflow/pipelines/pull/1069) ([neuromage](https://github.com/neuromage))
+- Fixed a bug in tarball handling [\#1068](https://github.com/kubeflow/pipelines/pull/1068) ([Ark-kun](https://github.com/Ark-kun))
+- SDK - Configure artifact name and path separately \(again\) [\#1067](https://github.com/kubeflow/pipelines/pull/1067) ([Ark-kun](https://github.com/Ark-kun))
+- Release 03/28/2019 [\#1063](https://github.com/kubeflow/pipelines/pull/1063) ([vicaire](https://github.com/vicaire))
+- Allow bigquery component to export to table only [\#1062](https://github.com/kubeflow/pipelines/pull/1062) ([hongye-sun](https://github.com/hongye-sun))
+- Recursion bug fix [\#1061](https://github.com/kubeflow/pipelines/pull/1061) ([gaoning777](https://github.com/gaoning777))
+- Improve runtime graph exit-handler node name [\#1059](https://github.com/kubeflow/pipelines/pull/1059) ([rileyjbauer](https://github.com/rileyjbauer))
+- SDK/Components - Handling public GCS URIs in load\_component\_from\_url [\#1057](https://github.com/kubeflow/pipelines/pull/1057) ([Ark-kun](https://github.com/Ark-kun))
+- Improve doc for gcp components. [\#1049](https://github.com/kubeflow/pipelines/pull/1049) ([hongye-sun](https://github.com/hongye-sun))
+- remove jupyter hub link from UI [\#1046](https://github.com/kubeflow/pipelines/pull/1046) ([IronPan](https://github.com/IronPan))
+- Test loading all component.yaml definitions [\#1045](https://github.com/kubeflow/pipelines/pull/1045) ([Ark-kun](https://github.com/Ark-kun))
+- Allow passing comma-delimited column names [\#1044](https://github.com/kubeflow/pipelines/pull/1044) ([Ark-kun](https://github.com/Ark-kun))
+- Add run termination controls to ui [\#1039](https://github.com/kubeflow/pipelines/pull/1039) ([rileyjbauer](https://github.com/rileyjbauer))
+- Fixed some typos [\#1038](https://github.com/kubeflow/pipelines/pull/1038) ([thedriftofwords](https://github.com/thedriftofwords))
+- Improved the "IBM - Create kubernetes secret" component [\#1027](https://github.com/kubeflow/pipelines/pull/1027) ([Ark-kun](https://github.com/Ark-kun))
+- add proxy agent image to cloud builder [\#996](https://github.com/kubeflow/pipelines/pull/996) ([IronPan](https://github.com/IronPan))
+- Pin specific version of kubeflow instead of using master [\#995](https://github.com/kubeflow/pipelines/pull/995) ([IronPan](https://github.com/IronPan))
+- Added component definition files for our components [\#539](https://github.com/kubeflow/pipelines/pull/539) ([Ark-kun](https://github.com/Ark-kun))
+
+## [0.1.14](https://github.com/kubeflow/pipelines/tree/0.1.14) (2019-03-28)
+[Full Changelog](https://github.com/kubeflow/pipelines/compare/0.1.13...0.1.14)
+
+**Closed issues:**
+
+- Users should be able to add metadata to component.yaml [\#1013](https://github.com/kubeflow/pipelines/issues/1013)
+- Allow Python functions defined in a pipeline to be run as entrypoint for operations. [\#979](https://github.com/kubeflow/pipelines/issues/979)
+- Unable to use TFJob in Kubeflow Pipeline, current dsl compiler have not support for TFJOBOP [\#973](https://github.com/kubeflow/pipelines/issues/973)
+- Make the data generated by a pipeline step available before the pipeline step completes. [\#959](https://github.com/kubeflow/pipelines/issues/959)
+- Kubeflow version vs Jupyter Hub Image Versions Confusion [\#948](https://github.com/kubeflow/pipelines/issues/948)
+- Moving and retrieving logs from persistent volume [\#940](https://github.com/kubeflow/pipelines/issues/940)
+- Use DL VM as default container for Notebooks. [\#911](https://github.com/kubeflow/pipelines/issues/911)
+- Unable to delete service account in e2e tests [\#893](https://github.com/kubeflow/pipelines/issues/893)
+- How to accessing the source code from Images [\#881](https://github.com/kubeflow/pipelines/issues/881)
+- ModuleNotFoundError: No module named 'kfp' [\#872](https://github.com/kubeflow/pipelines/issues/872)
+- Error regarding syncing workflow [\#869](https://github.com/kubeflow/pipelines/issues/869)
+- Errors in the test logs [\#865](https://github.com/kubeflow/pipelines/issues/865)
+- Switch default package format to .zip [\#854](https://github.com/kubeflow/pipelines/issues/854)
+- How to deploy distribute training with tensorflow or pytorch with pipelines [\#848](https://github.com/kubeflow/pipelines/issues/848)
+- Add option in DSL compiler to output YAML [\#834](https://github.com/kubeflow/pipelines/issues/834)
+- Flaky presubmit step in build-scheduledworkflow-image [\#831](https://github.com/kubeflow/pipelines/issues/831)
+- Pipeline view UX is completely blank [\#824](https://github.com/kubeflow/pipelines/issues/824)
+- kfctl.sh apply platform [\#820](https://github.com/kubeflow/pipelines/issues/820)
+- 500 Error Jupyter notebook [\#818](https://github.com/kubeflow/pipelines/issues/818)
+- Move e2e tests out of us-central1-a [\#807](https://github.com/kubeflow/pipelines/issues/807)
+- Are there plans to allow kubeflow pipelines to be created through a manifest file? [\#804](https://github.com/kubeflow/pipelines/issues/804)
+- Support inline html in html viewer [\#795](https://github.com/kubeflow/pipelines/issues/795)
+- Namespace "kubeflow" is "Terminating" forever [\#768](https://github.com/kubeflow/pipelines/issues/768)
+- Kubeflow 0.4 still include argo UI [\#753](https://github.com/kubeflow/pipelines/issues/753)
+- pipeline step failed with exit status code 2: failed to save outputs [\#750](https://github.com/kubeflow/pipelines/issues/750)
+- Allow eventing in Kubeflow Pipelines [\#725](https://github.com/kubeflow/pipelines/issues/725)
+- Can not use NFS volume for Tensorboard logs [\#708](https://github.com/kubeflow/pipelines/issues/708)
+- Unschedulable: 0/2 nodes are available: 2 Insufficient cpu. [\#706](https://github.com/kubeflow/pipelines/issues/706)
+- Pipeline upload via URL doesn't work when port-forwarding from cloud shell [\#700](https://github.com/kubeflow/pipelines/issues/700)
+- Error running TF training job on KF 0.3.5 using GPUs [\#686](https://github.com/kubeflow/pipelines/issues/686)
+- "Google Compute Engine does not have enough resources available to fulfill request: us-central1-a" [\#673](https://github.com/kubeflow/pipelines/issues/673)
+- Add bigquery component in release [\#643](https://github.com/kubeflow/pipelines/issues/643)
+- Pipeline notebook samples : "Install Pipeline SDK " error [\#610](https://github.com/kubeflow/pipelines/issues/610)
+- Deployer component to output model URL [\#593](https://github.com/kubeflow/pipelines/issues/593)
+- TFMA output is empty [\#592](https://github.com/kubeflow/pipelines/issues/592)
+- Pending run step shown as error [\#591](https://github.com/kubeflow/pipelines/issues/591)
+- UI dashboard needs to show the pipelines system version [\#590](https://github.com/kubeflow/pipelines/issues/590)
+- CMLE/TFX failure: Exceeded the max allowed number of models per project: 100. [\#587](https://github.com/kubeflow/pipelines/issues/587)
+- File output is too long [\#577](https://github.com/kubeflow/pipelines/issues/577)
+- Deployment problem with private cluster [\#534](https://github.com/kubeflow/pipelines/issues/534)
+- How to deploy distribute training with tensorflow [\#533](https://github.com/kubeflow/pipelines/issues/533)
+- ERROR: \(gcloud.container.clusters.delete\) One of \[--zone, --region\] must be supplied: Please specify location.. [\#488](https://github.com/kubeflow/pipelines/issues/488)
+- Support TPU in DSL [\#485](https://github.com/kubeflow/pipelines/issues/485)
+- Show an error message for output viewers that can't fetch their data [\#479](https://github.com/kubeflow/pipelines/issues/479)
+- Recurring runs create runs with an embedded pipeline spec, rather than id [\#475](https://github.com/kubeflow/pipelines/issues/475)
+- Unable to visualize any HTML content with Output Viewer [\#473](https://github.com/kubeflow/pipelines/issues/473)
+- Improve logic to preload sample pipeline [\#455](https://github.com/kubeflow/pipelines/issues/455)
+- Add component metadata to the generated Argo YAML for consumption by the UI [\#452](https://github.com/kubeflow/pipelines/issues/452)
+- Enable users to launch and manage multiple Tensorboard instances within kubeflow pipelines [\#443](https://github.com/kubeflow/pipelines/issues/443)
+- Python SDK is not continuous integration friendly. [\#420](https://github.com/kubeflow/pipelines/issues/420)
+- K8s 1.11 fails on generateName when upgrading pipelines [\#409](https://github.com/kubeflow/pipelines/issues/409)
+- TFJob doesn't stop trainer jobs after a timeout [\#408](https://github.com/kubeflow/pipelines/issues/408)
+- TFJob doesn't forward error logs from the jobs [\#407](https://github.com/kubeflow/pipelines/issues/407)
+- Pipeline GCP sample components need to switch to default gcp ops [\#406](https://github.com/kubeflow/pipelines/issues/406)
+- Permanent storage for pipeline system [\#395](https://github.com/kubeflow/pipelines/issues/395)
+- Error installing package with ks [\#372](https://github.com/kubeflow/pipelines/issues/372)
+- Pre-install samples via bootstrapper. [\#356](https://github.com/kubeflow/pipelines/issues/356)
+- Backend should not rely on presence of samples. [\#355](https://github.com/kubeflow/pipelines/issues/355)
+- deploy problem: 3 pvc like mysql not created at the script and not specified label selector [\#352](https://github.com/kubeflow/pipelines/issues/352)
+- RESOURCE\_EXHAUSTED: Maximum number of keys on account reached [\#348](https://github.com/kubeflow/pipelines/issues/348)
+- TFX Components pipeline- Expanding model analysis artifact does not go full screen [\#341](https://github.com/kubeflow/pipelines/issues/341)
+- Support for Argo artifacts [\#336](https://github.com/kubeflow/pipelines/issues/336)
+- Cloud platform scope not carried through on autoscaling [\#332](https://github.com/kubeflow/pipelines/issues/332)
+- Unclear how much cpu/memory actually available [\#330](https://github.com/kubeflow/pipelines/issues/330)
+- uninstall failed, there are twenty pod has not been deleted [\#329](https://github.com/kubeflow/pipelines/issues/329)
+- Conditions are not visualized properly [\#321](https://github.com/kubeflow/pipelines/issues/321)
+- Container op should expose method to append volume/mount/envs [\#311](https://github.com/kubeflow/pipelines/issues/311)
+- UI says "Successfully created new Run", but it isn't in the list [\#308](https://github.com/kubeflow/pipelines/issues/308)
+- Error running pipeline: cannot create tfjobs.kubeflow.org 403 [\#294](https://github.com/kubeflow/pipelines/issues/294)
+- Python DSL should support multiple storage options \(GCS, PVC, etc.\) [\#275](https://github.com/kubeflow/pipelines/issues/275)
+- Modify container images so that they support multiple data store options depending on parameters [\#274](https://github.com/kubeflow/pipelines/issues/274)
+- Add sample tests for notebooks [\#263](https://github.com/kubeflow/pipelines/issues/263)
+- UI feature ask: Search experiments and runs [\#257](https://github.com/kubeflow/pipelines/issues/257)
+- Containers should not be allowed to directly create K8s objects [\#254](https://github.com/kubeflow/pipelines/issues/254)
+- GKE cluster creation warnings [\#253](https://github.com/kubeflow/pipelines/issues/253)
+- Consider never deleting the runs [\#247](https://github.com/kubeflow/pipelines/issues/247)
+- Our ksonnet sometimes throws "SIGSEGV: segmentation violation" failing deployment [\#241](https://github.com/kubeflow/pipelines/issues/241)
+- Get experiment by name, create run in experiment by name [\#237](https://github.com/kubeflow/pipelines/issues/237)
+- UI should display documentations for the steps of a pipeline [\#227](https://github.com/kubeflow/pipelines/issues/227)
+- Missing dropoff latitudes [\#225](https://github.com/kubeflow/pipelines/issues/225)
+- Tensorboard not showing historical AUC / Accuracy [\#223](https://github.com/kubeflow/pipelines/issues/223)
+- Our tests fail on external PRs [\#215](https://github.com/kubeflow/pipelines/issues/215)
+- We need automatic labels indicating whether an issue/PR is internal/external. [\#207](https://github.com/kubeflow/pipelines/issues/207)
+- Support submitting runs from local machine [\#206](https://github.com/kubeflow/pipelines/issues/206)
+- Make it easy for people to write pipeline tests in python [\#203](https://github.com/kubeflow/pipelines/issues/203)
+- Tests are launched in doubles since Nov 09 18:30 [\#197](https://github.com/kubeflow/pipelines/issues/197)
+- Tests sometimes run multiple times for the same commit \(creating multiple GKE clusters\). [\#192](https://github.com/kubeflow/pipelines/issues/192)
+- We have ~40 GKE clusters \(~=100 instances\) running tests and our quotas are exhausted [\#191](https://github.com/kubeflow/pipelines/issues/191)
+- Instruction for running it on minikube [\#184](https://github.com/kubeflow/pipelines/issues/184)
+- feature request: restore the client method for creating a pipeline [\#175](https://github.com/kubeflow/pipelines/issues/175)
+- Create GCS path checker component [\#170](https://github.com/kubeflow/pipelines/issues/170)
+- feature request: in SDK support 'get\_or\_create\_experiment\(\)' [\#161](https://github.com/kubeflow/pipelines/issues/161)
+- Pipeline API Server Swagger Client \(Go\) for Pipeline Create does not take a "name" parameter [\#123](https://github.com/kubeflow/pipelines/issues/123)
+- Pipeline API Server Swagger Client \(Go\) for Pipeline Upload returns incomplete output [\#122](https://github.com/kubeflow/pipelines/issues/122)
+- Fasten release process for the image tag update. [\#115](https://github.com/kubeflow/pipelines/issues/115)
+- Persist pod logs after they finish [\#93](https://github.com/kubeflow/pipelines/issues/93)
+- Need to add notebooks with end-to-end sample scenarios [\#80](https://github.com/kubeflow/pipelines/issues/80)
+- After the user creates a run, redirect to the run page, not the runs list page. [\#64](https://github.com/kubeflow/pipelines/issues/64)
+- Remember the page I was on [\#62](https://github.com/kubeflow/pipelines/issues/62)
+- Doesn't remove old containers \(\> maxHistory\) [\#15](https://github.com/kubeflow/pipelines/issues/15)
+- ScheduledWorkflow CRD: Investigate need for retries beyond the ones provided by Argo [\#5](https://github.com/kubeflow/pipelines/issues/5)
+
+**Merged pull requests:**
+
+- add licenses for katib-launcher [\#1056](https://github.com/kubeflow/pipelines/pull/1056) ([hougangliu](https://github.com/hougangliu))
+- add license file to proxy agent docker image [\#1054](https://github.com/kubeflow/pipelines/pull/1054) ([IronPan](https://github.com/IronPan))
+- Fix compiler unit test bug [\#1048](https://github.com/kubeflow/pipelines/pull/1048) ([gaoning777](https://github.com/gaoning777))
+- Fixed handling parameters with default values in task factory construction [\#1047](https://github.com/kubeflow/pipelines/pull/1047) ([Ark-kun](https://github.com/Ark-kun))
+- Register proxy agent using the service account endpoint [\#1043](https://github.com/kubeflow/pipelines/pull/1043) ([IronPan](https://github.com/IronPan))
+- Revert "Register proxy agent using the service account endpoint" [\#1042](https://github.com/kubeflow/pipelines/pull/1042) ([IronPan](https://github.com/IronPan))
+- changelog for v0.1.13 [\#1041](https://github.com/kubeflow/pipelines/pull/1041) ([IronPan](https://github.com/IronPan))
+- Pin the component.yaml version in all the samples. [\#1037](https://github.com/kubeflow/pipelines/pull/1037) ([hongye-sun](https://github.com/hongye-sun))
+- Release components to include fix \#1028. [\#1032](https://github.com/kubeflow/pipelines/pull/1032) ([hongye-sun](https://github.com/hongye-sun))
+- Add more tests for metadata store. [\#1030](https://github.com/kubeflow/pipelines/pull/1030) ([neuromage](https://github.com/neuromage))
+- Fix a missing precheck in create cluster component [\#1028](https://github.com/kubeflow/pipelines/pull/1028) ([hongye-sun](https://github.com/hongye-sun))
+- Added the metadata property to ComponentSpec [\#1023](https://github.com/kubeflow/pipelines/pull/1023) ([Ark-kun](https://github.com/Ark-kun))
+- exposing type checking [\#1022](https://github.com/kubeflow/pipelines/pull/1022) ([gaoning777](https://github.com/gaoning777))
+- openvino demo pipeline documentation updates [\#1020](https://github.com/kubeflow/pipelines/pull/1020) ([dtrawins](https://github.com/dtrawins))
+- Add a recursion sample [\#1016](https://github.com/kubeflow/pipelines/pull/1016) ([gaoning777](https://github.com/gaoning777))
+- Support recursions in a function [\#1014](https://github.com/kubeflow/pipelines/pull/1014) ([gaoning777](https://github.com/gaoning777))
+- OpenVINO Model Server deployer [\#1008](https://github.com/kubeflow/pipelines/pull/1008) ([dtrawins](https://github.com/dtrawins))
+- upgrade ks in deployer to fix one block issue [\#1005](https://github.com/kubeflow/pipelines/pull/1005) ([jinchihe](https://github.com/jinchihe))
+- SDK/Tests - Simplified type compatibility tests [\#1004](https://github.com/kubeflow/pipelines/pull/1004) ([Ark-kun](https://github.com/Ark-kun))
+- Update developer\_guide.md [\#989](https://github.com/kubeflow/pipelines/pull/989) ([soolaugust](https://github.com/soolaugust))
+- Add proxy agent runner for kubeflow pipeline [\#988](https://github.com/kubeflow/pipelines/pull/988) ([IronPan](https://github.com/IronPan))
+- Add type check samples [\#955](https://github.com/kubeflow/pipelines/pull/955) ([gaoning777](https://github.com/gaoning777))
+- Enable pipeline packages with multiple files [\#939](https://github.com/kubeflow/pipelines/pull/939) ([Ark-kun](https://github.com/Ark-kun))
+- Keras - Train classifier: Improved the README.md [\#934](https://github.com/kubeflow/pipelines/pull/934) ([Ark-kun](https://github.com/Ark-kun))
+- Feature: sidecar for ContainerOp [\#879](https://github.com/kubeflow/pipelines/pull/879) ([eterna2](https://github.com/eterna2))
+- dsl generate zip file [\#855](https://github.com/kubeflow/pipelines/pull/855) ([gaoning777](https://github.com/gaoning777))
+
+## [0.1.13](https://github.com/kubeflow/pipelines/tree/0.1.13) (2019-03-22)
+[Full Changelog](https://github.com/kubeflow/pipelines/compare/0.1.12...0.1.13)
+
+**Closed issues:**
+
+- bazel test command does not actually run tests [\#977](https://github.com/kubeflow/pipelines/issues/977)
+- bazel build fails with fatal error: ares.h: No such file or directory [\#970](https://github.com/kubeflow/pipelines/issues/970)
+- dsl.ContainerOp.after\(\) only works with Ops that have already sanitized names [\#965](https://github.com/kubeflow/pipelines/issues/965)
+- value in file\_outputs is not being passed to input parameters correctly [\#957](https://github.com/kubeflow/pipelines/issues/957)
+- Support Azure authentication for pipeline ops [\#953](https://github.com/kubeflow/pipelines/issues/953)
+- \[CMLE\] trainer component should output the trained model URI [\#944](https://github.com/kubeflow/pipelines/issues/944)
+- dependency analysis issue with 'component.yaml'-based ops [\#941](https://github.com/kubeflow/pipelines/issues/941)
+- rename sample GCS path to fix model-name typo [\#936](https://github.com/kubeflow/pipelines/issues/936)
+- GitHub checks have been enabled in this repo, but are not supported by Tide. [\#930](https://github.com/kubeflow/pipelines/issues/930)
+- Cannot find module for path ml\_metadata/metadata\_store/mlmetadata [\#928](https://github.com/kubeflow/pipelines/issues/928)
+- Pipeline name is not checked [\#825](https://github.com/kubeflow/pipelines/issues/825)
+- Can not mount volume in Container op [\#477](https://github.com/kubeflow/pipelines/issues/477)
+- Travis tests do not properly test what happens when the branch is merged. [\#431](https://github.com/kubeflow/pipelines/issues/431)
+- Full solution to tests and code desync problem [\#200](https://github.com/kubeflow/pipelines/issues/200)
+- Prow code, test image code and code being tested can still desync. [\#198](https://github.com/kubeflow/pipelines/issues/198)
+- SDK/Components/Python - Functions that do not return anything [\#186](https://github.com/kubeflow/pipelines/issues/186)
+
+**Merged pull requests:**
+
+- Release component image version f98ec68488cfbac022893a87c5bca083f03f2065 [\#1021](https://github.com/kubeflow/pipelines/pull/1021) ([IronPan](https://github.com/IronPan))
+- Fix hard-coded model export dir name [\#1015](https://github.com/kubeflow/pipelines/pull/1015) ([hongye-sun](https://github.com/hongye-sun))
+- Fixed bug in docstring construction [\#1012](https://github.com/kubeflow/pipelines/pull/1012) ([Ark-kun](https://github.com/Ark-kun))
+- Update TFX Pipeline Example Readme [\#1011](https://github.com/kubeflow/pipelines/pull/1011) ([rculbertson](https://github.com/rculbertson))
+- Update sidebar styling [\#1010](https://github.com/kubeflow/pipelines/pull/1010) ([rileyjbauer](https://github.com/rileyjbauer))
+- SDK/DSL/Compiler - Fixed handling of empty pipeline name [\#1009](https://github.com/kubeflow/pipelines/pull/1009) ([Ark-kun](https://github.com/Ark-kun))
+- Fix dataflow and mlengine samples with recent changes [\#1006](https://github.com/kubeflow/pipelines/pull/1006) ([hongye-sun](https://github.com/hongye-sun))
+- Updating the description for model\_uid param [\#1002](https://github.com/kubeflow/pipelines/pull/1002) ([animeshsingh](https://github.com/animeshsingh))
+- Adjustments for OpenVINO v5 [\#997](https://github.com/kubeflow/pipelines/pull/997) ([dtrawins](https://github.com/dtrawins))
+- use pending commit id for cluster and source code name [\#994](https://github.com/kubeflow/pipelines/pull/994) ([IronPan](https://github.com/IronPan))
+- Ignore not found error in delete cluster [\#991](https://github.com/kubeflow/pipelines/pull/991) ([hongye-sun](https://github.com/hongye-sun))
+- Update component.yaml for dataflow and cmle with recent changes. [\#987](https://github.com/kubeflow/pipelines/pull/987) ([hongye-sun](https://github.com/hongye-sun))
+- Add job\_dir.txt as output files of CMLE trainning job. [\#986](https://github.com/kubeflow/pipelines/pull/986) ([hongye-sun](https://github.com/hongye-sun))
+- Improve dataflow job to use GCS to keep temp job states. [\#985](https://github.com/kubeflow/pipelines/pull/985) ([hongye-sun](https://github.com/hongye-sun))
+- Add component.yaml to every ibm-components [\#984](https://github.com/kubeflow/pipelines/pull/984) ([Tomcli](https://github.com/Tomcli))
+- Add instruction on building viewer CRD controller image [\#981](https://github.com/kubeflow/pipelines/pull/981) ([terrytangyuan](https://github.com/terrytangyuan))
+- well-defined parameter types [\#978](https://github.com/kubeflow/pipelines/pull/978) ([gaoning777](https://github.com/gaoning777))
+- Added the component name to the docstring [\#976](https://github.com/kubeflow/pipelines/pull/976) ([Ark-kun](https://github.com/Ark-kun))
+- Add unit tests pipelineparam [\#975](https://github.com/kubeflow/pipelines/pull/975) ([gaoning777](https://github.com/gaoning777))
+- Fix sample test failure because of the type information in the pipelineparam [\#972](https://github.com/kubeflow/pipelines/pull/972) ([gaoning777](https://github.com/gaoning777))
+- Add documentation and sample notebook for dataproc components. [\#971](https://github.com/kubeflow/pipelines/pull/971) ([hongye-sun](https://github.com/hongye-sun))
+- Add optional pipeline\_id to run\_pipeline api [\#968](https://github.com/kubeflow/pipelines/pull/968) ([hongye-sun](https://github.com/hongye-sun))
+- Fix cancellation bug for dataproc job. [\#967](https://github.com/kubeflow/pipelines/pull/967) ([hongye-sun](https://github.com/hongye-sun))
+- Add missing main\_class arg to spark job and set default values. [\#966](https://github.com/kubeflow/pipelines/pull/966) ([hongye-sun](https://github.com/hongye-sun))
+- Set default values for payload objects in case of empty. [\#962](https://github.com/kubeflow/pipelines/pull/962) ([hongye-sun](https://github.com/hongye-sun))
+- Add fake metadata store and fix tests. [\#958](https://github.com/kubeflow/pipelines/pull/958) ([neuromage](https://github.com/neuromage))
+- Add dataproc component yaml files [\#956](https://github.com/kubeflow/pipelines/pull/956) ([hongye-sun](https://github.com/hongye-sun))
+- helper fn to add az secrets to container [\#954](https://github.com/kubeflow/pipelines/pull/954) ([rakelkar](https://github.com/rakelkar))
+- Fixing the broken links for components source code in XGBoost Spark P… [\#952](https://github.com/kubeflow/pipelines/pull/952) ([animeshsingh](https://github.com/animeshsingh))
+- Watson Openscale Components to manage models [\#950](https://github.com/kubeflow/pipelines/pull/950) ([animeshsingh](https://github.com/animeshsingh))
+- SDK/DSL/Compiler - Fixed compilation when using ContainerOp.after [\#943](https://github.com/kubeflow/pipelines/pull/943) ([Ark-kun](https://github.com/Ark-kun))
+- Add type check [\#938](https://github.com/kubeflow/pipelines/pull/938) ([gaoning777](https://github.com/gaoning777))
+- Components - GCP: Fixed typo: Replaced cencus with census [\#937](https://github.com/kubeflow/pipelines/pull/937) ([Ark-kun](https://github.com/Ark-kun))
+- Add code formatting to markdown viewer [\#935](https://github.com/kubeflow/pipelines/pull/935) ([yebrahim](https://github.com/yebrahim))
+- Components - Keras - Train classifier: Changed the types to conform to the chosen type system [\#933](https://github.com/kubeflow/pipelines/pull/933) ([Ark-kun](https://github.com/Ark-kun))
+- Add notebook instructions on how to query ML Metadata for output artifacts [\#932](https://github.com/kubeflow/pipelines/pull/932) ([neuromage](https://github.com/neuromage))
+- SDK - Added support for loading zip-packed components [\#931](https://github.com/kubeflow/pipelines/pull/931) ([Ark-kun](https://github.com/Ark-kun))
+- update changelog for 0.1.12 [\#925](https://github.com/kubeflow/pipelines/pull/925) ([gaoning777](https://github.com/gaoning777))
+- SDK - Fixed small bug in DSL code that generates unique names for ops [\#923](https://github.com/kubeflow/pipelines/pull/923) ([Ark-kun](https://github.com/Ark-kun))
+- Samples - Improved the TFX OSS notebook and README [\#922](https://github.com/kubeflow/pipelines/pull/922) ([Ark-kun](https://github.com/Ark-kun))
+- Detecting file format using signature instead of file extension [\#919](https://github.com/kubeflow/pipelines/pull/919) ([Ark-kun](https://github.com/Ark-kun))
+- Add Tommy as the approver for the IBM components and samples. [\#915](https://github.com/kubeflow/pipelines/pull/915) ([Tomcli](https://github.com/Tomcli))
+- Pass meta to containerop and pipeline [\#905](https://github.com/kubeflow/pipelines/pull/905) ([gaoning777](https://github.com/gaoning777))
+- DataProc commands implementation [\#862](https://github.com/kubeflow/pipelines/pull/862) ([hongye-sun](https://github.com/hongye-sun))
+- SDK/Components - Added naming.generate\_unique\_name\_conversion\_table [\#716](https://github.com/kubeflow/pipelines/pull/716) ([Ark-kun](https://github.com/Ark-kun))
+- Added the ability to terminate a run [\#528](https://github.com/kubeflow/pipelines/pull/528) ([Ark-kun](https://github.com/Ark-kun))
+
+## [0.1.12](https://github.com/kubeflow/pipelines/tree/0.1.12) (2019-03-06)
+[Full Changelog](https://github.com/kubeflow/pipelines/compare/0.1.11...0.1.12)
+
+**Merged pull requests:**
+
+- Fix bugs in bigquery component [\#921](https://github.com/kubeflow/pipelines/pull/921) ([hongye-sun](https://github.com/hongye-sun))
+- fix license links [\#917](https://github.com/kubeflow/pipelines/pull/917) ([hongye-sun](https://github.com/hongye-sun))
+- Update README.md [\#916](https://github.com/kubeflow/pipelines/pull/916) ([paveldournov](https://github.com/paveldournov))
+- Add online prediction instructions and print friendly msg in console. [\#914](https://github.com/kubeflow/pipelines/pull/914) ([hongye-sun](https://github.com/hongye-sun))
+- Create TFX Example.ipynb [\#913](https://github.com/kubeflow/pipelines/pull/913) ([paveldournov](https://github.com/paveldournov))
+- Create README.md [\#909](https://github.com/kubeflow/pipelines/pull/909) ([paveldournov](https://github.com/paveldournov))
+- Deflake frontend e2e test [\#904](https://github.com/kubeflow/pipelines/pull/904) ([yebrahim](https://github.com/yebrahim))
+- Support customized export TF model folder names. [\#903](https://github.com/kubeflow/pipelines/pull/903) ([hongye-sun](https://github.com/hongye-sun))
+- SDK - Configure artifact name and path separately [\#900](https://github.com/kubeflow/pipelines/pull/900) ([Ark-kun](https://github.com/Ark-kun))
+- Add sample notebook and readme markdown for GCP components. [\#899](https://github.com/kubeflow/pipelines/pull/899) ([hongye-sun](https://github.com/hongye-sun))
+- Add markdown viewer [\#897](https://github.com/kubeflow/pipelines/pull/897) ([yebrahim](https://github.com/yebrahim))
+- Infra - Added license for google-cloud-bigtable [\#896](https://github.com/kubeflow/pipelines/pull/896) ([Ark-kun](https://github.com/Ark-kun))
+- Change viewer to markdown instead of html according to FE changes. [\#895](https://github.com/kubeflow/pipelines/pull/895) ([hongye-sun](https://github.com/hongye-sun))
+- Add python conf to the metadata [\#894](https://github.com/kubeflow/pipelines/pull/894) ([gaoning777](https://github.com/gaoning777))
+- Create experiment only if it's not exist. [\#892](https://github.com/kubeflow/pipelines/pull/892) ([hongye-sun](https://github.com/hongye-sun))
+- Add component metadata [\#891](https://github.com/kubeflow/pipelines/pull/891) ([gaoning777](https://github.com/gaoning777))
+- SDK - Added support for default values to Lightweight python components [\#890](https://github.com/kubeflow/pipelines/pull/890) ([Ark-kun](https://github.com/Ark-kun))
+- SDK - Added support for default values to load\_component [\#889](https://github.com/kubeflow/pipelines/pull/889) ([Ark-kun](https://github.com/Ark-kun))
+- move integration test to sub dir [\#888](https://github.com/kubeflow/pipelines/pull/888) ([IronPan](https://github.com/IronPan))
+- GCPcomponents yaml spec [\#887](https://github.com/kubeflow/pipelines/pull/887) ([hongye-sun](https://github.com/hongye-sun))
+- add core types and type checking function [\#886](https://github.com/kubeflow/pipelines/pull/886) ([gaoning777](https://github.com/gaoning777))
+- Add bigquery docstring and dump output path. [\#885](https://github.com/kubeflow/pipelines/pull/885) ([hongye-sun](https://github.com/hongye-sun))
+- Record TFX output artifacts in Metadata store [\#884](https://github.com/kubeflow/pipelines/pull/884) ([neuromage](https://github.com/neuromage))
+- add missing dependency in bazel BUILD [\#883](https://github.com/kubeflow/pipelines/pull/883) ([IronPan](https://github.com/IronPan))
+- Dump job id and change output to /tmp/kfp/output [\#878](https://github.com/kubeflow/pipelines/pull/878) ([hongye-sun](https://github.com/hongye-sun))
+- SDK - Passing the environment variables to container op [\#877](https://github.com/kubeflow/pipelines/pull/877) ([Ark-kun](https://github.com/Ark-kun))
+- Switch to python 2.7 and install beam for the gcp image. [\#876](https://github.com/kubeflow/pipelines/pull/876) ([hongye-sun](https://github.com/hongye-sun))
+- support .zip pipeline package [\#874](https://github.com/kubeflow/pipelines/pull/874) ([IronPan](https://github.com/IronPan))
+- sample test image build failure [\#871](https://github.com/kubeflow/pipelines/pull/871) ([gaoning777](https://github.com/gaoning777))
+- Fix bugs in dataflow component [\#870](https://github.com/kubeflow/pipelines/pull/870) ([hongye-sun](https://github.com/hongye-sun))
+- fix bunch of issues in prow test [\#866](https://github.com/kubeflow/pipelines/pull/866) ([IronPan](https://github.com/IronPan))
+- Refactor MLEngine code and add deploy and set\_default commands [\#864](https://github.com/kubeflow/pipelines/pull/864) ([hongye-sun](https://github.com/hongye-sun))
+- add sigint sigterm to the test [\#863](https://github.com/kubeflow/pipelines/pull/863) ([gaoning777](https://github.com/gaoning777))
+- apiserver glog uses -alsologtostderr [\#859](https://github.com/kubeflow/pipelines/pull/859) ([ywskycn](https://github.com/ywskycn))
+- Added a README for Getting Started with the Pipelines CLI [\#858](https://github.com/kubeflow/pipelines/pull/858) ([swiftdiaries](https://github.com/swiftdiaries))
+- link anchor text nit [\#853](https://github.com/kubeflow/pipelines/pull/853) ([ryan-williams](https://github.com/ryan-williams))
+- Allow users to specify namespace for Kubernetes informers [\#851](https://github.com/kubeflow/pipelines/pull/851) ([ywskycn](https://github.com/ywskycn))
+- Fixing grammatical errors and references [\#835](https://github.com/kubeflow/pipelines/pull/835) ([animeshsingh](https://github.com/animeshsingh))
+- Adds a modifier function to simplify addition of local volumes to containerop [\#783](https://github.com/kubeflow/pipelines/pull/783) ([swiftdiaries](https://github.com/swiftdiaries))
+- add katib studyjob launcher [\#754](https://github.com/kubeflow/pipelines/pull/754) ([hougangliu](https://github.com/hougangliu))
+
+## [0.1.11](https://github.com/kubeflow/pipelines/tree/0.1.11) (2019-02-27)
+[Full Changelog](https://github.com/kubeflow/pipelines/compare/0.1.10...0.1.11)
+
+**Closed issues:**
+
+- ROADMAP for pipelines [\#675](https://github.com/kubeflow/pipelines/issues/675)
+- python components don't work for images with python 2 [\#666](https://github.com/kubeflow/pipelines/issues/666)
+- dsl PipelineParam does not work under Image or Command [\#521](https://github.com/kubeflow/pipelines/issues/521)
+
+**Merged pull requests:**
+
+- fix bug in the gcp oob [\#861](https://github.com/kubeflow/pipelines/pull/861) ([gaoning777](https://github.com/gaoning777))
+- fix typo in cloud build [\#860](https://github.com/kubeflow/pipelines/pull/860) ([hongye-sun](https://github.com/hongye-sun))
+- add attrs and pyrsistent [\#857](https://github.com/kubeflow/pipelines/pull/857) ([gaoning777](https://github.com/gaoning777))
+- Removes accidental package from TFDV Dockerfile [\#856](https://github.com/kubeflow/pipelines/pull/856) ([rileyjbauer](https://github.com/rileyjbauer))
+- freeze tfdv dependency [\#852](https://github.com/kubeflow/pipelines/pull/852) ([gaoning777](https://github.com/gaoning777))
+- release new components: deployer [\#849](https://github.com/kubeflow/pipelines/pull/849) ([gaoning777](https://github.com/gaoning777))
+- Move e2e tests to us-east1 [\#847](https://github.com/kubeflow/pipelines/pull/847) ([hongye-sun](https://github.com/hongye-sun))
+- ROADMAP.md cosmetic changes [\#846](https://github.com/kubeflow/pipelines/pull/846) ([rileyjbauer](https://github.com/rileyjbauer))
+- Support rendering recursive static DAGs [\#845](https://github.com/kubeflow/pipelines/pull/845) ([rileyjbauer](https://github.com/rileyjbauer))
+- Releasing component SDK [\#842](https://github.com/kubeflow/pipelines/pull/842) ([hongye-sun](https://github.com/hongye-sun))
+- extract the pipelineparam deserialize function [\#841](https://github.com/kubeflow/pipelines/pull/841) ([gaoning777](https://github.com/gaoning777))
+- Update change log for v0.1.10 release [\#840](https://github.com/kubeflow/pipelines/pull/840) ([hongye-sun](https://github.com/hongye-sun))
+- Update swagger codegen version [\#839](https://github.com/kubeflow/pipelines/pull/839) ([hongye-sun](https://github.com/hongye-sun))
+- Add owner file under component\_sdk. [\#838](https://github.com/kubeflow/pipelines/pull/838) ([hongye-sun](https://github.com/hongye-sun))
+- add container image uris support in train API [\#837](https://github.com/kubeflow/pipelines/pull/837) ([hongye-sun](https://github.com/hongye-sun))
+- Support BigQuery command to query and export to GCS [\#836](https://github.com/kubeflow/pipelines/pull/836) ([hongye-sun](https://github.com/hongye-sun))
+- Dataflow SDK to support launch beam python code or template [\#833](https://github.com/kubeflow/pipelines/pull/833) ([hongye-sun](https://github.com/hongye-sun))
+- Sets 'Choose file' button width to avoid wrap [\#830](https://github.com/kubeflow/pipelines/pull/830) ([rileyjbauer](https://github.com/rileyjbauer))
+- Update graph styling [\#829](https://github.com/kubeflow/pipelines/pull/829) ([rileyjbauer](https://github.com/rileyjbauer))
+- fix apache-beam and pandas versions in tfdv image [\#828](https://github.com/kubeflow/pipelines/pull/828) ([gaoning777](https://github.com/gaoning777))
+- Enhance hard code for export dir in deploy component [\#823](https://github.com/kubeflow/pipelines/pull/823) ([jinchihe](https://github.com/jinchihe))
+- Components - Added the "Keras - Train classifier" component [\#809](https://github.com/kubeflow/pipelines/pull/809) ([Ark-kun](https://github.com/Ark-kun))
+- Training and Serving Pipeline leveraging WML [\#800](https://github.com/kubeflow/pipelines/pull/800) ([animeshsingh](https://github.com/animeshsingh))
+- MLEngine Commands Implementation [\#773](https://github.com/kubeflow/pipelines/pull/773) ([hongye-sun](https://github.com/hongye-sun))
+- Cleanup pipeline bootstrapper [\#770](https://github.com/kubeflow/pipelines/pull/770) ([ohmystack](https://github.com/ohmystack))
+- component build support for both python2 and python3 [\#730](https://github.com/kubeflow/pipelines/pull/730) ([gaoning777](https://github.com/gaoning777))
+- Add postsubmit component test [\#613](https://github.com/kubeflow/pipelines/pull/613) ([gaoning777](https://github.com/gaoning777))
+
+## [0.1.10](https://github.com/kubeflow/pipelines/tree/0.1.10) (2019-02-15)
+[Full Changelog](https://github.com/kubeflow/pipelines/compare/0.1.9...0.1.10)
+
+**Closed issues:**
+
+- Pipeline Release V0.1.10 [\#805](https://github.com/kubeflow/pipelines/issues/805)
+- Failed to create recurring run [\#802](https://github.com/kubeflow/pipelines/issues/802)
+- Persistent Agent doesn't like v1beta1 scheduledworkflow in on-prem scenario [\#790](https://github.com/kubeflow/pipelines/issues/790)
+- Metric bars do not always show up correctly [\#775](https://github.com/kubeflow/pipelines/issues/775)
+- ml-pipeline-persistenceagent restarts forever [\#741](https://github.com/kubeflow/pipelines/issues/741)
+- 403 Insufficient Permission error when writing to GCS [\#709](https://github.com/kubeflow/pipelines/issues/709)
+- Support archiving/unarchiving runs on the UI [\#621](https://github.com/kubeflow/pipelines/issues/621)
+- Incorrect validation in new run form when filling name then pipeline [\#612](https://github.com/kubeflow/pipelines/issues/612)
+
+**Merged pull requests:**
+
+- Fixed validation in new run form when filling name then pipeline [\#826](https://github.com/kubeflow/pipelines/pull/826) ([elviraux](https://github.com/elviraux))
+- Adds a local volume sample for the Viewer CRD [\#822](https://github.com/kubeflow/pipelines/pull/822) ([swiftdiaries](https://github.com/swiftdiaries))
+- Revert dataflow component version in samples to unblock release [\#817](https://github.com/kubeflow/pipelines/pull/817) ([hongye-sun](https://github.com/hongye-sun))
+- Sample updates for release v0.1.10 [\#816](https://github.com/kubeflow/pipelines/pull/816) ([hongye-sun](https://github.com/hongye-sun))
+- Improve docker image build perf by leveraging docker cache [\#815](https://github.com/kubeflow/pipelines/pull/815) ([hongye-sun](https://github.com/hongye-sun))
+- Increase disk size and use high CPU machine type in cloud build [\#813](https://github.com/kubeflow/pipelines/pull/813) ([hongye-sun](https://github.com/hongye-sun))
+- add ViewerCrdController to cloud builder release spec [\#811](https://github.com/kubeflow/pipelines/pull/811) ([IronPan](https://github.com/IronPan))
+- add pyarrow in license csv. [\#810](https://github.com/kubeflow/pipelines/pull/810) ([hongye-sun](https://github.com/hongye-sun))
+- switch test to us-west1 [\#808](https://github.com/kubeflow/pipelines/pull/808) ([IronPan](https://github.com/IronPan))
+- add trainer license to unblock release [\#806](https://github.com/kubeflow/pipelines/pull/806) ([hongye-sun](https://github.com/hongye-sun))
+- Updates lodash to version 4.17.11 [\#803](https://github.com/kubeflow/pipelines/pull/803) ([rileyjbauer](https://github.com/rileyjbauer))
+- merge build image to test suit [\#799](https://github.com/kubeflow/pipelines/pull/799) ([IronPan](https://github.com/IronPan))
+- Update changelog for release 0.19 [\#797](https://github.com/kubeflow/pipelines/pull/797) ([neuromage](https://github.com/neuromage))
+- Update Basic Pipeline example notebook [\#782](https://github.com/kubeflow/pipelines/pull/782) ([swiftdiaries](https://github.com/swiftdiaries))
+- Support to send default service account jwt token for pipeline client. [\#779](https://github.com/kubeflow/pipelines/pull/779) ([hongye-sun](https://github.com/hongye-sun))
+- Updates frontend to Typescript to 3.3.1 [\#772](https://github.com/kubeflow/pipelines/pull/772) ([rileyjbauer](https://github.com/rileyjbauer))
+- update developer guide to renew deployment section [\#771](https://github.com/kubeflow/pipelines/pull/771) ([jinchihe](https://github.com/jinchihe))
+- Add Launcher module in component SDK [\#769](https://github.com/kubeflow/pipelines/pull/769) ([hongye-sun](https://github.com/hongye-sun))
+- Archive runs UI [\#748](https://github.com/kubeflow/pipelines/pull/748) ([yebrahim](https://github.com/yebrahim))
+- placeholder for ibm sample pipelines and corresponding components [\#731](https://github.com/kubeflow/pipelines/pull/731) ([animeshsingh](https://github.com/animeshsingh))
+
+## [0.1.9](https://github.com/kubeflow/pipelines/tree/0.1.9) (2019-02-06)
+[Full Changelog](https://github.com/kubeflow/pipelines/compare/0.1.8...0.1.9)
+
+**Closed issues:**
+
+- XGBoost sample failure [\#736](https://github.com/kubeflow/pipelines/issues/736)
+- TFX taxi client utility run abnormally after serving started [\#728](https://github.com/kubeflow/pipelines/issues/728)
+- Pipeline deploy component does not support local model [\#726](https://github.com/kubeflow/pipelines/issues/726)
+- Failed to execute the component roc and confusion\_matrix in TFX sample for local mode [\#719](https://github.com/kubeflow/pipelines/issues/719)
+- An error occurs when run TFX example in local kubeflow cluster [\#703](https://github.com/kubeflow/pipelines/issues/703)
+- Not using secret when pulling from private Registry [\#695](https://github.com/kubeflow/pipelines/issues/695)
+- Release 1.7 - TFX taxi cab example failing the deploy step [\#692](https://github.com/kubeflow/pipelines/issues/692)
+- Run details page should autorefresh [\#683](https://github.com/kubeflow/pipelines/issues/683)
+- problem when deploying kubeflow 0.4.0 [\#676](https://github.com/kubeflow/pipelines/issues/676)
+- Do I need to be running kubeflow on GCP to use pipelines? [\#661](https://github.com/kubeflow/pipelines/issues/661)
+- Design doc for pipelines [\#569](https://github.com/kubeflow/pipelines/issues/569)
+- Some error logs in sample for ML - TFX - Taxi Tip Prediction Model Trainer [\#547](https://github.com/kubeflow/pipelines/issues/547)
+- Return the total number of resources in list APIs [\#103](https://github.com/kubeflow/pipelines/issues/103)
+
+**Merged pull requests:**
+
+- Creating ROADMAP.md [\#786](https://github.com/kubeflow/pipelines/pull/786) ([paveldournov](https://github.com/paveldournov))
+- Revert GPU tests to unblock release [\#778](https://github.com/kubeflow/pipelines/pull/778) ([hongye-sun](https://github.com/hongye-sun))
+- Modifications done by the execution of the release script. [\#777](https://github.com/kubeflow/pipelines/pull/777) ([vicaire](https://github.com/vicaire))
+- Refactor BaseOp to context manager pattern [\#762](https://github.com/kubeflow/pipelines/pull/762) ([hongye-sun](https://github.com/hongye-sun))
+- Add component\_sdk tests to travis tests [\#761](https://github.com/kubeflow/pipelines/pull/761) ([hongye-sun](https://github.com/hongye-sun))
+- Fix Dockerfile for viewer CRD image. [\#760](https://github.com/kubeflow/pipelines/pull/760) ([neuromage](https://github.com/neuromage))
+- Add UI actions to Buttons module [\#758](https://github.com/kubeflow/pipelines/pull/758) ([yebrahim](https://github.com/yebrahim))
+- Regenerate frontend API files now that listCount APIs are merged [\#757](https://github.com/kubeflow/pipelines/pull/757) ([rileyjbauer](https://github.com/rileyjbauer))
+- Enhance deploy.sh to support local storage [\#755](https://github.com/kubeflow/pipelines/pull/755) ([jinchihe](https://github.com/jinchihe))
+- Releasing components fixing xgboost [\#747](https://github.com/kubeflow/pipelines/pull/747) ([gaoning777](https://github.com/gaoning777))
+- support pipeline level imagepullsecret in DSL [\#745](https://github.com/kubeflow/pipelines/pull/745) ([gaoning777](https://github.com/gaoning777))
+- Updated dev guide for apiserver build with minikube. [\#743](https://github.com/kubeflow/pipelines/pull/743) ([gyliu513](https://github.com/gyliu513))
+- Add Dockerfile for building Viewer CRD controller. [\#740](https://github.com/kubeflow/pipelines/pull/740) ([neuromage](https://github.com/neuromage))
+- Remove pipeline bootstrapper [\#739](https://github.com/kubeflow/pipelines/pull/739) ([IronPan](https://github.com/IronPan))
+- Refactor UI buttons to lib file [\#737](https://github.com/kubeflow/pipelines/pull/737) ([yebrahim](https://github.com/yebrahim))
+- fix dataproc cluster version [\#735](https://github.com/kubeflow/pipelines/pull/735) ([gaoning777](https://github.com/gaoning777))
+- Improve runtime graph starting and running experience [\#734](https://github.com/kubeflow/pipelines/pull/734) ([rileyjbauer](https://github.com/rileyjbauer))
+- SDK/Components - convert\_object\_to\_struct now uses \_\_init\_\_ to get field list [\#733](https://github.com/kubeflow/pipelines/pull/733) ([Ark-kun](https://github.com/Ark-kun))
+- SDK/Components - Improved error when type checking fails in constructor [\#732](https://github.com/kubeflow/pipelines/pull/732) ([Ark-kun](https://github.com/Ark-kun))
+- Create kfp componet sdk [\#729](https://github.com/kubeflow/pipelines/pull/729) ([hongye-sun](https://github.com/hongye-sun))
+- Add CMLE sample test script. [\#724](https://github.com/kubeflow/pipelines/pull/724) ([qimingj](https://github.com/qimingj))
+- Add "set\_retry\(\)" on ContainerOp. [\#723](https://github.com/kubeflow/pipelines/pull/723) ([qimingj](https://github.com/qimingj))
+- Auto-refreshes the run details page [\#722](https://github.com/kubeflow/pipelines/pull/722) ([rileyjbauer](https://github.com/rileyjbauer))
+- create nonexistent directory for roc and confusion\_matrix component [\#720](https://github.com/kubeflow/pipelines/pull/720) ([jinchihe](https://github.com/jinchihe))
+- SDK/Components/Python - Improved Python2 compatibility [\#718](https://github.com/kubeflow/pipelines/pull/718) ([Ark-kun](https://github.com/Ark-kun))
+- SDK/Components/Python - Made the typing.NamedTuple import optional [\#717](https://github.com/kubeflow/pipelines/pull/717) ([Ark-kun](https://github.com/Ark-kun))
+- SDK/Components - Added \_naming.\_convert\_to\_human\_name function [\#715](https://github.com/kubeflow/pipelines/pull/715) ([Ark-kun](https://github.com/Ark-kun))
+- SDK/Components - Command line args can only be strings or placeholders [\#711](https://github.com/kubeflow/pipelines/pull/711) ([Ark-kun](https://github.com/Ark-kun))
+- SDK/Components - More meaningful error when trying to convert graph component to ContainerOp [\#710](https://github.com/kubeflow/pipelines/pull/710) ([Ark-kun](https://github.com/Ark-kun))
+- randomize the deployment name to avoid collision [\#704](https://github.com/kubeflow/pipelines/pull/704) ([gaoning777](https://github.com/gaoning777))
+- SDK - Removed stray debug print from tests [\#701](https://github.com/kubeflow/pipelines/pull/701) ([Ark-kun](https://github.com/Ark-kun))
+- Update changelog for release 0.1.8 [\#697](https://github.com/kubeflow/pipelines/pull/697) ([yebrahim](https://github.com/yebrahim))
+- Add gpu pool to test deployment and enable gpu in sample test [\#696](https://github.com/kubeflow/pipelines/pull/696) ([hongye-sun](https://github.com/hongye-sun))
+- bump ks version in presubmit test [\#693](https://github.com/kubeflow/pipelines/pull/693) ([IronPan](https://github.com/IronPan))
+- SDK - Update minimum Python version to 3.5.3 [\#691](https://github.com/kubeflow/pipelines/pull/691) ([Ark-kun](https://github.com/Ark-kun))
+- Update all Pipelines CRD versions to v1beta1. [\#681](https://github.com/kubeflow/pipelines/pull/681) ([neuromage](https://github.com/neuromage))
+- Return resource count from ListXXX calls [\#595](https://github.com/kubeflow/pipelines/pull/595) ([yebrahim](https://github.com/yebrahim))
+
+## [0.1.8](https://github.com/kubeflow/pipelines/tree/0.1.8) (2019-01-17)
+[Full Changelog](https://github.com/kubeflow/pipelines/compare/0.1.7...0.1.8)
+
+**Closed issues:**
+
+- Docs about how to write mlpipeline-ui-metadata and mlpipeline-metrics artifacts [\#660](https://github.com/kubeflow/pipelines/issues/660)
+- Pipeline run created from python shows experiment but not pipeline \(in UI\). [\#617](https://github.com/kubeflow/pipelines/issues/617)
+- Load samples returns an error in 0.4 Kubeflow [\#603](https://github.com/kubeflow/pipelines/issues/603)
+- Error deploying latest bootstrapper [\#594](https://github.com/kubeflow/pipelines/issues/594)
+- Provide argument to assign GCP service account to use for DSL operator [\#218](https://github.com/kubeflow/pipelines/issues/218)
+- Run status tooltip should include creation time [\#61](https://github.com/kubeflow/pipelines/issues/61)
+
+**Merged pull requests:**
+
+- Components release 2ed60100d1db9efeb38c6c358f90b21c144179be [\#694](https://github.com/kubeflow/pipelines/pull/694) ([yebrahim](https://github.com/yebrahim))
+- add gcp credential for tf-job template [\#689](https://github.com/kubeflow/pipelines/pull/689) ([IronPan](https://github.com/IronPan))
+- improve the list run query [\#687](https://github.com/kubeflow/pipelines/pull/687) ([IronPan](https://github.com/IronPan))
+- Adds a link in the side nav to the KF 'usage-reporting' doc [\#682](https://github.com/kubeflow/pipelines/pull/682) ([rileyjbauer](https://github.com/rileyjbauer))
+- Fix Makefile to add licenses using Go modules. [\#674](https://github.com/kubeflow/pipelines/pull/674) ([neuromage](https://github.com/neuromage))
+- Include date in run status tooltips [\#671](https://github.com/kubeflow/pipelines/pull/671) ([rileyjbauer](https://github.com/rileyjbauer))
+- Add build version to side nav [\#670](https://github.com/kubeflow/pipelines/pull/670) ([rileyjbauer](https://github.com/rileyjbauer))
+- SDK/Tests - Fixed the output ordering instability in tests [\#664](https://github.com/kubeflow/pipelines/pull/664) ([Ark-kun](https://github.com/Ark-kun))
+- SDK/Components - Added /data to the generated file paths [\#663](https://github.com/kubeflow/pipelines/pull/663) ([Ark-kun](https://github.com/Ark-kun))
+- SDK/Components - Simplified \_create\_task\_factory\_from\_component\_spec function [\#662](https://github.com/kubeflow/pipelines/pull/662) ([Ark-kun](https://github.com/Ark-kun))
+- SDK/Components - Renamed serialized command-line argument tokens [\#659](https://github.com/kubeflow/pipelines/pull/659) ([Ark-kun](https://github.com/Ark-kun))
+- update change log [\#657](https://github.com/kubeflow/pipelines/pull/657) ([IronPan](https://github.com/IronPan))
+- Add simple filtering by name to CustomTable [\#656](https://github.com/kubeflow/pipelines/pull/656) ([rileyjbauer](https://github.com/rileyjbauer))
+- SDK/Components - Reworked the component model structures. [\#642](https://github.com/kubeflow/pipelines/pull/642) ([Ark-kun](https://github.com/Ark-kun))
+- Support filtering on storage state [\#629](https://github.com/kubeflow/pipelines/pull/629) ([yebrahim](https://github.com/yebrahim))
+- SDK - Fixed the unittest script [\#556](https://github.com/kubeflow/pipelines/pull/556) ([Ark-kun](https://github.com/Ark-kun))
+
+## [0.1.7](https://github.com/kubeflow/pipelines/tree/0.1.7) (2019-01-09)
+[Full Changelog](https://github.com/kubeflow/pipelines/compare/0.1.6...0.1.7)
+
+**Closed issues:**
+
+- Use "Start" for runs, "Create" for experiments [\#649](https://github.com/kubeflow/pipelines/issues/649)
+- go vet in Travis tests breaks tests for unrelated PRs [\#646](https://github.com/kubeflow/pipelines/issues/646)
+- Building backend using Bazel fails on mac [\#638](https://github.com/kubeflow/pipelines/issues/638)
+- ml-pipeline-persistenceagent fails a few times. [\#624](https://github.com/kubeflow/pipelines/issues/624)
+- Unable to plug-in default values to Pipeline [\#618](https://github.com/kubeflow/pipelines/issues/618)
+- Add deployed model cleanup code to the Kubeflow notebook [\#608](https://github.com/kubeflow/pipelines/issues/608)
+- Compare page perf optimizations [\#597](https://github.com/kubeflow/pipelines/issues/597)
+- OAth client instructions are ambiguous [\#586](https://github.com/kubeflow/pipelines/issues/586)
+- "Waiting for the IAP setup to get ready..." after clicking "Skip IAP" [\#585](https://github.com/kubeflow/pipelines/issues/585)
+- UI should allow creating a run with no experiment [\#573](https://github.com/kubeflow/pipelines/issues/573)
+- studyjob-controller start failed [\#546](https://github.com/kubeflow/pipelines/issues/546)
+- Failing e2e sample tests do not log any errors [\#515](https://github.com/kubeflow/pipelines/issues/515)
+- Authentication and service account plan for Pipeline + Kubeflow [\#374](https://github.com/kubeflow/pipelines/issues/374)
+- Error getting logs [\#290](https://github.com/kubeflow/pipelines/issues/290)
+- Support filtering in list APIs [\#270](https://github.com/kubeflow/pipelines/issues/270)
+- Switch to Go 1.11 modules and package management. [\#187](https://github.com/kubeflow/pipelines/issues/187)
+
+**Merged pull requests:**
+
+- Release component image version d3c4add0a95e930c70a330466d0923827784eb9a [\#655](https://github.com/kubeflow/pipelines/pull/655) ([IronPan](https://github.com/IronPan))
+- Correctly ignore src/apis when building frontend [\#654](https://github.com/kubeflow/pipelines/pull/654) ([yebrahim](https://github.com/yebrahim))
+- Use linguist annotations to skip diffing generated files [\#652](https://github.com/kubeflow/pipelines/pull/652) ([yebrahim](https://github.com/yebrahim))
+- Use "create" rather than "start" except when initiating a run [\#650](https://github.com/kubeflow/pipelines/pull/650) ([rileyjbauer](https://github.com/rileyjbauer))
+- Fix shadowing errors in Viewer reconciler [\#648](https://github.com/kubeflow/pipelines/pull/648) ([neuromage](https://github.com/neuromage))
+- Add IS\_SUBSTRING operator for use in API resource filtering. [\#645](https://github.com/kubeflow/pipelines/pull/645) ([neuromage](https://github.com/neuromage))
+- Add changelog to pipeline repo [\#644](https://github.com/kubeflow/pipelines/pull/644) ([IronPan](https://github.com/IronPan))
+- Update WORKSPACE and BUILD files incoporating recent changes [\#639](https://github.com/kubeflow/pipelines/pull/639) ([neuromage](https://github.com/neuromage))
+- fix deploy model name conflict in case of concurrent notebook sample test [\#636](https://github.com/kubeflow/pipelines/pull/636) ([gaoning777](https://github.com/gaoning777))
+- Print sample test logs in case of exception throw [\#635](https://github.com/kubeflow/pipelines/pull/635) ([gaoning777](https://github.com/gaoning777))
+- Expose pipeline/job API through setup.py [\#634](https://github.com/kubeflow/pipelines/pull/634) ([IronPan](https://github.com/IronPan))
+- Fix retrying logic which was causing persistenceagent to crash loop. [\#633](https://github.com/kubeflow/pipelines/pull/633) ([neuromage](https://github.com/neuromage))
+- Add resnet-cmle sample back. Update all component images. [\#632](https://github.com/kubeflow/pipelines/pull/632) ([qimingj](https://github.com/qimingj))
+- Updates material-ui and react npm libraries [\#630](https://github.com/kubeflow/pipelines/pull/630) ([rileyjbauer](https://github.com/rileyjbauer))
+- Generate pipeline and job python client as part of SDK [\#628](https://github.com/kubeflow/pipelines/pull/628) ([IronPan](https://github.com/IronPan))
+- Fix gpu sample issues [\#627](https://github.com/kubeflow/pipelines/pull/627) ([hongye-sun](https://github.com/hongye-sun))
+- Run `go vet` as part of the Travis CI. [\#626](https://github.com/kubeflow/pipelines/pull/626) ([neuromage](https://github.com/neuromage))
+- Sanity check filtering/sorting options in list requests. [\#625](https://github.com/kubeflow/pipelines/pull/625) ([neuromage](https://github.com/neuromage))
+- Support replacable arguments in command as well \(besides arguments\) in container op. [\#623](https://github.com/kubeflow/pipelines/pull/623) ([qimingj](https://github.com/qimingj))
+- Update sample notebook to clean up deployed models. [\#622](https://github.com/kubeflow/pipelines/pull/622) ([qimingj](https://github.com/qimingj))
+- URLEncode instead of base64 encode the filter string [\#620](https://github.com/kubeflow/pipelines/pull/620) ([neuromage](https://github.com/neuromage))
+- DSL refactor [\#619](https://github.com/kubeflow/pipelines/pull/619) ([gaoning777](https://github.com/gaoning777))
+- Expose that the python API is Python3.5+ only [\#616](https://github.com/kubeflow/pipelines/pull/616) ([TimZaman](https://github.com/TimZaman))
+- Load sample when pipeline initially started [\#615](https://github.com/kubeflow/pipelines/pull/615) ([IronPan](https://github.com/IronPan))
+- Use Bazel to build the entire backend and perform API code generation [\#609](https://github.com/kubeflow/pipelines/pull/609) ([neuromage](https://github.com/neuromage))
+- Improve condition sample to demonstrate ==, \>= and \<. [\#607](https://github.com/kubeflow/pipelines/pull/607) ([qimingj](https://github.com/qimingj))
+- fix for boostrapp problem [\#602](https://github.com/kubeflow/pipelines/pull/602) ([xiaozhouX](https://github.com/xiaozhouX))
+- Initial version of BigQuery query execution component. [\#601](https://github.com/kubeflow/pipelines/pull/601) ([cbreuel](https://github.com/cbreuel))
+- First step to bring back CMLE sample. [\#599](https://github.com/kubeflow/pipelines/pull/599) ([qimingj](https://github.com/qimingj))
+- Compare perf - pure components, disable ROC curve thumbnail animations [\#598](https://github.com/kubeflow/pipelines/pull/598) ([yebrahim](https://github.com/yebrahim))
+- Move backend unit tests to Travis [\#589](https://github.com/kubeflow/pipelines/pull/589) ([yebrahim](https://github.com/yebrahim))
+- Deployment - Minikube support - Passing the platform parameter to kfctl [\#588](https://github.com/kubeflow/pipelines/pull/588) ([Ark-kun](https://github.com/Ark-kun))
+- Fix the List run to get all runs [\#583](https://github.com/kubeflow/pipelines/pull/583) ([IronPan](https://github.com/IronPan))
+- retry on create table in api server [\#582](https://github.com/kubeflow/pipelines/pull/582) ([IronPan](https://github.com/IronPan))
+- switch from go dep to go module [\#581](https://github.com/kubeflow/pipelines/pull/581) ([IronPan](https://github.com/IronPan))
+- Add sample test without image build [\#578](https://github.com/kubeflow/pipelines/pull/578) ([gaoning777](https://github.com/gaoning777))
+- remove xgboost compiled sample [\#576](https://github.com/kubeflow/pipelines/pull/576) ([gaoning777](https://github.com/gaoning777))
+- Add a gpu sample [\#575](https://github.com/kubeflow/pipelines/pull/575) ([hongye-sun](https://github.com/hongye-sun))
+- Backend - Removed hardcoded metrics file name [\#574](https://github.com/kubeflow/pipelines/pull/574) ([Ark-kun](https://github.com/Ark-kun))
+- update dockerfile and add build step of frontend [\#567](https://github.com/kubeflow/pipelines/pull/567) ([chenzhiwei](https://github.com/chenzhiwei))
+- Encode filter parameter as a base64-encoded JSON string in List requests [\#563](https://github.com/kubeflow/pipelines/pull/563) ([neuromage](https://github.com/neuromage))
+- Tests - Updated image-builder Makefile [\#500](https://github.com/kubeflow/pipelines/pull/500) ([Ark-kun](https://github.com/Ark-kun))
+- Add the Viewer CRD controller for managing web views such as Tensorboard instances from within the Pipelines UI. [\#449](https://github.com/kubeflow/pipelines/pull/449) ([neuromage](https://github.com/neuromage))
+
+## [0.1.6](https://github.com/kubeflow/pipelines/tree/0.1.6) (2018-12-20)
+[Full Changelog](https://github.com/kubeflow/pipelines/compare/0.1.5...0.1.6)
+
+**Closed issues:**
+
+- Support creating runs without experiments [\#560](https://github.com/kubeflow/pipelines/issues/560)
+
+**Merged pull requests:**
+
+- Allow creating runs with no experiments [\#572](https://github.com/kubeflow/pipelines/pull/572) ([yebrahim](https://github.com/yebrahim))
+- Add script for component image release [\#571](https://github.com/kubeflow/pipelines/pull/571) ([IronPan](https://github.com/IronPan))
+
+## [0.1.5](https://github.com/kubeflow/pipelines/tree/0.1.5) (2018-12-20)
+[Full Changelog](https://github.com/kubeflow/pipelines/compare/0.1.4...0.1.5)
+
+**Closed issues:**
+
+- remove duplicate volumes in the DSL [\#557](https://github.com/kubeflow/pipelines/issues/557)
+- Cut a Pipeline v0.1.5 release [\#549](https://github.com/kubeflow/pipelines/issues/549)
+- "Information in the Argo UI appears much faster compared to the KF Pipelines UI" [\#544](https://github.com/kubeflow/pipelines/issues/544)
+- KubeFlow Pipeline example notebook is half way updated for 0.1.4 [\#520](https://github.com/kubeflow/pipelines/issues/520)
+- UI loses state after being idle for a while [\#502](https://github.com/kubeflow/pipelines/issues/502)
+- No way in the UI to upload a pipeline from the cluster itself [\#495](https://github.com/kubeflow/pipelines/issues/495)
+- Confusion Matrix UI Doesn't Work if My Labels are "True", "False" [\#446](https://github.com/kubeflow/pipelines/issues/446)
+- input parameter for creating pipelines - does not allow camelCase [\#445](https://github.com/kubeflow/pipelines/issues/445)
+- Pipeline install timeout [\#414](https://github.com/kubeflow/pipelines/issues/414)
+- 'Run output' tab for Run details page [\#331](https://github.com/kubeflow/pipelines/issues/331)
+- Create a container wrapper op with Default GCP auth [\#310](https://github.com/kubeflow/pipelines/issues/310)
+- Test/Server code out of sync, blocking PRs [\#196](https://github.com/kubeflow/pipelines/issues/196)
+- Show results of the entire workflow in one view [\#92](https://github.com/kubeflow/pipelines/issues/92)
+- User should be able to start a new run from the pipeline page. [\#53](https://github.com/kubeflow/pipelines/issues/53)
+
+**Merged pull requests:**
+
+- Refactor Python SDK [\#568](https://github.com/kubeflow/pipelines/pull/568) ([gaoning777](https://github.com/gaoning777))
+- Fix XGB Sample with the new ROC component. [\#565](https://github.com/kubeflow/pipelines/pull/565) ([qimingj](https://github.com/qimingj))
+- Fix frontend mock data after proto changes [\#564](https://github.com/kubeflow/pipelines/pull/564) ([yebrahim](https://github.com/yebrahim))
+- update dockerfile and build steps [\#562](https://github.com/kubeflow/pipelines/pull/562) ([chenzhiwei](https://github.com/chenzhiwei))
+- Fix ROC Component [\#559](https://github.com/kubeflow/pipelines/pull/559) ([qimingj](https://github.com/qimingj))
+- remove duplicate volumes [\#558](https://github.com/kubeflow/pipelines/pull/558) ([gaoning777](https://github.com/gaoning777))
+- Enables uploading a pipeline via a URL [\#554](https://github.com/kubeflow/pipelines/pull/554) ([rileyjbauer](https://github.com/rileyjbauer))
+- Fixes await bug in create new run [\#553](https://github.com/kubeflow/pipelines/pull/553) ([rileyjbauer](https://github.com/rileyjbauer))
+- Support archiving/unarchiving runs on the backend [\#552](https://github.com/kubeflow/pipelines/pull/552) ([yebrahim](https://github.com/yebrahim))
+- Parameterize mysql and minio image [\#551](https://github.com/kubeflow/pipelines/pull/551) ([IronPan](https://github.com/IronPan))
+- Fix sample test failure [\#548](https://github.com/kubeflow/pipelines/pull/548) ([gaoning777](https://github.com/gaoning777))
+- add two necessary licenses [\#545](https://github.com/kubeflow/pipelines/pull/545) ([gaoning777](https://github.com/gaoning777))
+- update test to specify name when create pipeline [\#543](https://github.com/kubeflow/pipelines/pull/543) ([IronPan](https://github.com/IronPan))
+- Restructure dataproc components [\#542](https://github.com/kubeflow/pipelines/pull/542) ([gaoning777](https://github.com/gaoning777))
+- Hides TaskGroup nodes from runtime graphs and removes unnecessary edges in static graphs [\#541](https://github.com/kubeflow/pipelines/pull/541) ([rileyjbauer](https://github.com/rileyjbauer))
+- Stops k8s-helper from throwing error when a pod has no logs [\#540](https://github.com/kubeflow/pipelines/pull/540) ([rileyjbauer](https://github.com/rileyjbauer))
+- fix persistence agent to use in cluster DNS instead of kube proxy to access API [\#538](https://github.com/kubeflow/pipelines/pull/538) ([IronPan](https://github.com/IronPan))
+- Add filtering ability for all backend API ListXXX requests [\#537](https://github.com/kubeflow/pipelines/pull/537) ([neuromage](https://github.com/neuromage))
+- Samples - Moved secret application to the pipeline definition [\#536](https://github.com/kubeflow/pipelines/pull/536) ([Ark-kun](https://github.com/Ark-kun))
+- Support Kaniko job in a outside-cluster jupyter. [\#535](https://github.com/kubeflow/pipelines/pull/535) ([qimingj](https://github.com/qimingj))
+- Add hongye-sun to OWNERS of samples, components, and sdk directories. [\#531](https://github.com/kubeflow/pipelines/pull/531) ([qimingj](https://github.com/qimingj))
+- Support GPU image for dnntrainer component [\#530](https://github.com/kubeflow/pipelines/pull/530) ([hongye-sun](https://github.com/hongye-sun))
+- Change "namespace" param to "host" in kfp client so we can use Jupyter outside cluster. [\#529](https://github.com/kubeflow/pipelines/pull/529) ([qimingj](https://github.com/qimingj))
+- Skip backend integration tests when cli flag isn't passed [\#527](https://github.com/kubeflow/pipelines/pull/527) ([yebrahim](https://github.com/yebrahim))
+- Remove the workaround in confusion matrix component for correctly handling boolean cases. [\#526](https://github.com/kubeflow/pipelines/pull/526) ([qimingj](https://github.com/qimingj))
+- Regenerate swagger APIs [\#524](https://github.com/kubeflow/pipelines/pull/524) ([rileyjbauer](https://github.com/rileyjbauer))
+- Add metric and ui-metadata samples [\#523](https://github.com/kubeflow/pipelines/pull/523) ([hongye-sun](https://github.com/hongye-sun))
+- bump up the sdk version to 0.1.4 [\#522](https://github.com/kubeflow/pipelines/pull/522) ([hongye-sun](https://github.com/hongye-sun))
+- SDK/Components - Added the ComponentStore [\#519](https://github.com/kubeflow/pipelines/pull/519) ([Ark-kun](https://github.com/Ark-kun))
+- Improve TFX Taxi Sample and Components. [\#518](https://github.com/kubeflow/pipelines/pull/518) ([qimingj](https://github.com/qimingj))
+- Improve the notebook TFX sample [\#517](https://github.com/kubeflow/pipelines/pull/517) ([gaoning777](https://github.com/gaoning777))
+- SDK/Components - Do not crash on non-hashable objects [\#511](https://github.com/kubeflow/pipelines/pull/511) ([Ark-kun](https://github.com/Ark-kun))
+- Renamed dsl/\_component.py to dsl/\_python\_component.py [\#510](https://github.com/kubeflow/pipelines/pull/510) ([Ark-kun](https://github.com/Ark-kun))
+- add job to load sample [\#509](https://github.com/kubeflow/pipelines/pull/509) ([IronPan](https://github.com/IronPan))
+- Improve get\_experiment and list\_runs in the python sdk [\#508](https://github.com/kubeflow/pipelines/pull/508) ([gaoning777](https://github.com/gaoning777))
+- Add new run button to pipeline details [\#507](https://github.com/kubeflow/pipelines/pull/507) ([rileyjbauer](https://github.com/rileyjbauer))
+- Add lightweight sample test [\#499](https://github.com/kubeflow/pipelines/pull/499) ([gaoning777](https://github.com/gaoning777))
+- Move pipeline name to request body for create pipeline API [\#498](https://github.com/kubeflow/pipelines/pull/498) ([IronPan](https://github.com/IronPan))
+- Show all run outputs in dedicated tab [\#496](https://github.com/kubeflow/pipelines/pull/496) ([yebrahim](https://github.com/yebrahim))
+- Pin versions of libraries and tools required for proto generation. [\#492](https://github.com/kubeflow/pipelines/pull/492) ([neuromage](https://github.com/neuromage))
+- SDK/Components - Don't fail on dict type specs [\#490](https://github.com/kubeflow/pipelines/pull/490) ([Ark-kun](https://github.com/Ark-kun))
+- Add experiment selector to NewRun [\#486](https://github.com/kubeflow/pipelines/pull/486) ([rileyjbauer](https://github.com/rileyjbauer))
+
+## [0.1.4](https://github.com/kubeflow/pipelines/tree/0.1.4) (2018-12-07)
+[Full Changelog](https://github.com/kubeflow/pipelines/compare/0.1.3...0.1.4)
+
+**Closed issues:**
+
+- DSL Condition Feature/Bug [\#481](https://github.com/kubeflow/pipelines/issues/481)
+- Persist pod logs to permanent storage [\#439](https://github.com/kubeflow/pipelines/issues/439)
+- Pipeline name does not show up when submitting from SDK [\#347](https://github.com/kubeflow/pipelines/issues/347)
+
+**Merged pull requests:**
+
+- support tpu settings in dsl [\#491](https://github.com/kubeflow/pipelines/pull/491) ([hongye-sun](https://github.com/hongye-sun))
+- Bump default pipeline version to v0.1.3 [\#484](https://github.com/kubeflow/pipelines/pull/484) ([IronPan](https://github.com/IronPan))
+- clean up test - remove unused workflow [\#483](https://github.com/kubeflow/pipelines/pull/483) ([IronPan](https://github.com/IronPan))
+- Add notebook sample test: tfx sample [\#470](https://github.com/kubeflow/pipelines/pull/470) ([gaoning777](https://github.com/gaoning777))
+
+## [0.1.3](https://github.com/kubeflow/pipelines/tree/0.1.3) (2018-12-05)
+[Full Changelog](https://github.com/kubeflow/pipelines/compare/0.1.3-rc.3...0.1.3)
+
+**Closed issues:**
+
+- Bug in pipeline samples' parameters [\#478](https://github.com/kubeflow/pipelines/issues/478)
+- Frontend integration test is flaky [\#464](https://github.com/kubeflow/pipelines/issues/464)
+- Jupyter Notebook \[tf-hub-0\] run failed [\#458](https://github.com/kubeflow/pipelines/issues/458)
+- AssertionError \[ERR\_ASSERTION\]: logs do not look right: 1 [\#453](https://github.com/kubeflow/pipelines/issues/453)
+- Why can not delete experiments and runs? [\#441](https://github.com/kubeflow/pipelines/issues/441)
+- grant kubeflow user account CMLE permission [\#416](https://github.com/kubeflow/pipelines/issues/416)
+- where is bootstrapper code gcr.io/ml-pipeline/bootstrapper:0.1.2 [\#412](https://github.com/kubeflow/pipelines/issues/412)
+- Compare page tests [\#382](https://github.com/kubeflow/pipelines/issues/382)
+- Pipeline UI should work behind Kubeflow reverse proxy \(Ambassador\) [\#373](https://github.com/kubeflow/pipelines/issues/373)
+- Presubmit tests are broken since we rely on pulling node.js image that was removed [\#370](https://github.com/kubeflow/pipelines/issues/370)
+- Backend ships with very old SDK version [\#358](https://github.com/kubeflow/pipelines/issues/358)
+- Python fire interprets string incorrectly [\#318](https://github.com/kubeflow/pipelines/issues/318)
+- Include kubectl install command in Github page [\#284](https://github.com/kubeflow/pipelines/issues/284)
+- Embeddable run view page [\#182](https://github.com/kubeflow/pipelines/issues/182)
+- Better render pipeline description [\#89](https://github.com/kubeflow/pipelines/issues/89)
+- Support cloning run started from notebook [\#75](https://github.com/kubeflow/pipelines/issues/75)
+- "No runs were found for this experiment" is a misleading message when the list is still being loaded. [\#63](https://github.com/kubeflow/pipelines/issues/63)
+- User should be able to create a new Recurring Run from the pipeline page. [\#54](https://github.com/kubeflow/pipelines/issues/54)
+
+**Merged pull requests:**
+
+- Return string from pod logs [\#476](https://github.com/kubeflow/pipelines/pull/476) ([yebrahim](https://github.com/yebrahim))
+- Fix serializing cloned embedded pipeline [\#474](https://github.com/kubeflow/pipelines/pull/474) ([yebrahim](https://github.com/yebrahim))
+- Refresh while waiting for run to start [\#472](https://github.com/kubeflow/pipelines/pull/472) ([yebrahim](https://github.com/yebrahim))
+- Add article on Jupyter notebooks [\#471](https://github.com/kubeflow/pipelines/pull/471) ([lakshmanok](https://github.com/lakshmanok))
+- Adds tests for the run comparison page [\#469](https://github.com/kubeflow/pipelines/pull/469) ([rileyjbauer](https://github.com/rileyjbauer))
+- Sanitize inputs from SDK when submitting run. [\#466](https://github.com/kubeflow/pipelines/pull/466) ([qimingj](https://github.com/qimingj))
+- Support cloning runs created with an embedded pipeline [\#465](https://github.com/kubeflow/pipelines/pull/465) ([yebrahim](https://github.com/yebrahim))
+- import bug [\#463](https://github.com/kubeflow/pipelines/pull/463) ([gaoning777](https://github.com/gaoning777))
+- Wait until logs appear right in e2e test [\#459](https://github.com/kubeflow/pipelines/pull/459) ([yebrahim](https://github.com/yebrahim))
+- Fix sample test - Add gcp permission [\#454](https://github.com/kubeflow/pipelines/pull/454) ([IronPan](https://github.com/IronPan))
+- Show pipeline details embedded in runs [\#447](https://github.com/kubeflow/pipelines/pull/447) ([yebrahim](https://github.com/yebrahim))
+- SDK/PythonContainer - Compiling pipelines without needing kubernetes [\#442](https://github.com/kubeflow/pipelines/pull/442) ([Ark-kun](https://github.com/Ark-kun))
+- SDK/Components - Renamed container.arguments to container.args [\#437](https://github.com/kubeflow/pipelines/pull/437) ([Ark-kun](https://github.com/Ark-kun))
+- DSL - return self for .after\(\) [\#435](https://github.com/kubeflow/pipelines/pull/435) ([IronPan](https://github.com/IronPan))
+- Tests - Try fix GCS file being inaccessible right after upload [\#433](https://github.com/kubeflow/pipelines/pull/433) ([Ark-kun](https://github.com/Ark-kun))
+- Testing - Fix Travis tests \(SDK/Components\) [\#432](https://github.com/kubeflow/pipelines/pull/432) ([Ark-kun](https://github.com/Ark-kun))
+- SDK/GCP - Replaced default\_gcp\_op with task.apply\(use\_gcp\_secret\) [\#430](https://github.com/kubeflow/pipelines/pull/430) ([Ark-kun](https://github.com/Ark-kun))
+- Fix tb viewer test [\#427](https://github.com/kubeflow/pipelines/pull/427) ([yebrahim](https://github.com/yebrahim))
+- SDK/Client - Removed import six [\#425](https://github.com/kubeflow/pipelines/pull/425) ([Ark-kun](https://github.com/Ark-kun))
+- SDK/Components/PythonContainerOp - Make the local output path configurable [\#424](https://github.com/kubeflow/pipelines/pull/424) ([Ark-kun](https://github.com/Ark-kun))
+- Propagate secret to kaniko [\#423](https://github.com/kubeflow/pipelines/pull/423) ([IronPan](https://github.com/IronPan))
+- fix bug in the notebook sample [\#419](https://github.com/kubeflow/pipelines/pull/419) ([gaoning777](https://github.com/gaoning777))
+- Failed the sample tests when it should [\#417](https://github.com/kubeflow/pipelines/pull/417) ([gaoning777](https://github.com/gaoning777))
+- Fix for k8s dict parsing [\#411](https://github.com/kubeflow/pipelines/pull/411) ([vanpelt](https://github.com/vanpelt))
+- Clean up the resource after test finish [\#410](https://github.com/kubeflow/pipelines/pull/410) ([IronPan](https://github.com/IronPan))
+- Add loading spinner to custom table while loading items [\#405](https://github.com/kubeflow/pipelines/pull/405) ([yebrahim](https://github.com/yebrahim))
+- SDK/Components/PythonContainerOp - Switch from dict to ComponentSpec [\#396](https://github.com/kubeflow/pipelines/pull/396) ([Ark-kun](https://github.com/Ark-kun))
+- SDK/Components - Removed outputs from task factory function signature [\#388](https://github.com/kubeflow/pipelines/pull/388) ([Ark-kun](https://github.com/Ark-kun))
+- Add Gopkg dependency for kubernetes code-generator. [\#371](https://github.com/kubeflow/pipelines/pull/371) ([neuromage](https://github.com/neuromage))
+- SDK - Improve the python package build [\#364](https://github.com/kubeflow/pipelines/pull/364) ([Ark-kun](https://github.com/Ark-kun))
+- SDK/DSL - Added support for conditions: !=, \<, \<=, \>=, \> [\#309](https://github.com/kubeflow/pipelines/pull/309) ([Ark-kun](https://github.com/Ark-kun))
+- SDK/Components - Support for optional inputs [\#214](https://github.com/kubeflow/pipelines/pull/214) ([Ark-kun](https://github.com/Ark-kun))
+- SDK/Components/PythonContainerOp - Simplified GCSHelper by extracting duplicate code [\#210](https://github.com/kubeflow/pipelines/pull/210) ([Ark-kun](https://github.com/Ark-kun))
+
+## [0.1.3-rc.3](https://github.com/kubeflow/pipelines/tree/0.1.3-rc.3) (2018-11-29)
+[Full Changelog](https://github.com/kubeflow/pipelines/compare/0.1.3-rc.2...0.1.3-rc.3)
+
+**Closed issues:**
+
+- Pipeline names are truncated and the columns are not realizable [\#400](https://github.com/kubeflow/pipelines/issues/400)
+- RunDetails page tests [\#383](https://github.com/kubeflow/pipelines/issues/383)
+- ExperimentDetails page tests [\#381](https://github.com/kubeflow/pipelines/issues/381)
+- PipelineDetails page tests [\#379](https://github.com/kubeflow/pipelines/issues/379)
+- Our tests clone our repo 40+ times for each PR commit \(and some PRs have dozens of commits\) [\#368](https://github.com/kubeflow/pipelines/issues/368)
+- Backend image build does not fail when sample compilation fails [\#354](https://github.com/kubeflow/pipelines/issues/354)
+- Back-button not working when creating a run from a notebook [\#221](https://github.com/kubeflow/pipelines/issues/221)
+
+**Merged pull requests:**
+
+- Adds tests for the ExperimentDetails page [\#404](https://github.com/kubeflow/pipelines/pull/404) ([rileyjbauer](https://github.com/rileyjbauer))
+- Removed unused docs directory including OWNERS file and images. [\#401](https://github.com/kubeflow/pipelines/pull/401) ([sarahmaddox](https://github.com/sarahmaddox))
+- Pointed doc links to Kubeflow website instead of wiki. [\#398](https://github.com/kubeflow/pipelines/pull/398) ([sarahmaddox](https://github.com/sarahmaddox))
+- RunDetails test suite, bug fixes [\#394](https://github.com/kubeflow/pipelines/pull/394) ([yebrahim](https://github.com/yebrahim))
+- update image tag with the new release [\#393](https://github.com/kubeflow/pipelines/pull/393) ([gaoning777](https://github.com/gaoning777))
+- Increase timeout waiting for test run to start [\#392](https://github.com/kubeflow/pipelines/pull/392) ([yebrahim](https://github.com/yebrahim))
+- Delete kf\_deploy.sh [\#391](https://github.com/kubeflow/pipelines/pull/391) ([IronPan](https://github.com/IronPan))
+- Clean up vendor directory [\#390](https://github.com/kubeflow/pipelines/pull/390) ([neuromage](https://github.com/neuromage))
+- Add support for minio hosted artifacts [\#389](https://github.com/kubeflow/pipelines/pull/389) ([vanpelt](https://github.com/vanpelt))
+- Backend - Fixed handling of sample compilation failure [\#387](https://github.com/kubeflow/pipelines/pull/387) ([Ark-kun](https://github.com/Ark-kun))
+- Add finish timestamp to backend API interceptor [\#386](https://github.com/kubeflow/pipelines/pull/386) ([yebrahim](https://github.com/yebrahim))
+- remove fire dependency in the component image build [\#384](https://github.com/kubeflow/pipelines/pull/384) ([gaoning777](https://github.com/gaoning777))
+- PipelineDetails page tests [\#380](https://github.com/kubeflow/pipelines/pull/380) ([yebrahim](https://github.com/yebrahim))
+- update samples with dependency requirement in the component image build [\#378](https://github.com/kubeflow/pipelines/pull/378) ([gaoning777](https://github.com/gaoning777))
+- Disables back button when there is no history [\#377](https://github.com/kubeflow/pipelines/pull/377) ([rileyjbauer](https://github.com/rileyjbauer))
+- move default gcp op to dsl/ [\#376](https://github.com/kubeflow/pipelines/pull/376) ([IronPan](https://github.com/IronPan))
+- Add Amy and Lak's Blogs Link to README [\#375](https://github.com/kubeflow/pipelines/pull/375) ([qimingj](https://github.com/qimingj))
+- Tests - Improve Minikube source code copy process [\#367](https://github.com/kubeflow/pipelines/pull/367) ([Ark-kun](https://github.com/Ark-kun))
+- restructure local components directory [\#357](https://github.com/kubeflow/pipelines/pull/357) ([gaoning777](https://github.com/gaoning777))
+- Switching test to kubeflow deployment [\#351](https://github.com/kubeflow/pipelines/pull/351) ([IronPan](https://github.com/IronPan))
+- added component with tf slim model generator and pipeline adjustments [\#335](https://github.com/kubeflow/pipelines/pull/335) ([dtrawins](https://github.com/dtrawins))
+- PipelineSelector, RecurringRunsManager, and 404Page tests [\#319](https://github.com/kubeflow/pipelines/pull/319) ([yebrahim](https://github.com/yebrahim))
+- Tests - Fixed many test issues \(external PRs, code de-sync\) by getting rid of git clone [\#315](https://github.com/kubeflow/pipelines/pull/315) ([Ark-kun](https://github.com/Ark-kun))
+- Now pipeline function takes direct default values rather than dsp.PipelineParam. [\#110](https://github.com/kubeflow/pipelines/pull/110) ([qimingj](https://github.com/qimingj))
+
+## [0.1.3-rc.2](https://github.com/kubeflow/pipelines/tree/0.1.3-rc.2) (2018-11-22)
+[Full Changelog](https://github.com/kubeflow/pipelines/compare/0.1.3-rc.1...0.1.3-rc.2)
+
+**Closed issues:**
+
+- Parameterize kaniko pod namespace [\#344](https://github.com/kubeflow/pipelines/issues/344)
+- ks init error : msg="unable to find SHA1 for repo: Get https://api.github.com/repos/ksonnet/parts/commits/master: dial tcp: [\#327](https://github.com/kubeflow/pipelines/issues/327)
+- Pipeline Summary is blocking the graph [\#289](https://github.com/kubeflow/pipelines/issues/289)
+- The frontend-integration-test is really flaky [\#272](https://github.com/kubeflow/pipelines/issues/272)
+
+**Merged pull requests:**
+
+- Tests - Stop repeating the logs three times [\#366](https://github.com/kubeflow/pipelines/pull/366) ([Ark-kun](https://github.com/Ark-kun))
+- Fixes issue with footer overlapping side panel and fixes summary layout [\#353](https://github.com/kubeflow/pipelines/pull/353) ([rileyjbauer](https://github.com/rileyjbauer))
+- PipelineDetails cleanup [\#350](https://github.com/kubeflow/pipelines/pull/350) ([yebrahim](https://github.com/yebrahim))
+- Add support for nvidia gpu limit [\#346](https://github.com/kubeflow/pipelines/pull/346) ([hongye-sun](https://github.com/hongye-sun))
+- mount gcp credentials for kaniko worker [\#343](https://github.com/kubeflow/pipelines/pull/343) ([IronPan](https://github.com/IronPan))
+- rename ks registry ml-pipeline -\> pipeline [\#340](https://github.com/kubeflow/pipelines/pull/340) ([IronPan](https://github.com/IronPan))
+- restructure dataflow component structure [\#338](https://github.com/kubeflow/pipelines/pull/338) ([gaoning777](https://github.com/gaoning777))
+- formating jsonnet registry [\#333](https://github.com/kubeflow/pipelines/pull/333) ([IronPan](https://github.com/IronPan))
+- Use upper case for ks parameter [\#328](https://github.com/kubeflow/pipelines/pull/328) ([IronPan](https://github.com/IronPan))
+- Use ks env for pipeline namespace [\#326](https://github.com/kubeflow/pipelines/pull/326) ([IronPan](https://github.com/IronPan))
+- Tests - Fixed the argo submit failure not failing the script [\#324](https://github.com/kubeflow/pipelines/pull/324) ([Ark-kun](https://github.com/Ark-kun))
+- SDK/Components - Renamed DockerContainer spec to to Container [\#323](https://github.com/kubeflow/pipelines/pull/323) ([Ark-kun](https://github.com/Ark-kun))
+- fix bug for the component build with requirements [\#320](https://github.com/kubeflow/pipelines/pull/320) ([gaoning777](https://github.com/gaoning777))
+- Use latest npm and node on travis, fix tests [\#317](https://github.com/kubeflow/pipelines/pull/317) ([yebrahim](https://github.com/yebrahim))
+- add metric image file for wiki [\#316](https://github.com/kubeflow/pipelines/pull/316) ([hongye-sun](https://github.com/hongye-sun))
+- Add default\_gcp\_op [\#314](https://github.com/kubeflow/pipelines/pull/314) ([IronPan](https://github.com/IronPan))
+- return self for container op initialization methods to allow chaining the construction [\#313](https://github.com/kubeflow/pipelines/pull/313) ([IronPan](https://github.com/IronPan))
+- switch set\(\) to add\(\) for better inheritance. [\#312](https://github.com/kubeflow/pipelines/pull/312) ([IronPan](https://github.com/IronPan))
+- Changes "Hide" button on Pipeline summary to fully hide the paper card [\#305](https://github.com/kubeflow/pipelines/pull/305) ([rileyjbauer](https://github.com/rileyjbauer))
+- add ngao to test owner because frequent contributions to this directory, including e2e, sample tests and imagebuilder. [\#299](https://github.com/kubeflow/pipelines/pull/299) ([gaoning777](https://github.com/gaoning777))
+- reenable basic sample tests, serially after frontend integ tests\(triggering 5 times, successful always\) [\#298](https://github.com/kubeflow/pipelines/pull/298) ([gaoning777](https://github.com/gaoning777))
+- Add run status to page title [\#287](https://github.com/kubeflow/pipelines/pull/287) ([yebrahim](https://github.com/yebrahim))
+- Update button styling for new experiment and new run [\#264](https://github.com/kubeflow/pipelines/pull/264) ([rileyjbauer](https://github.com/rileyjbauer))
+- refactor component build codes [\#260](https://github.com/kubeflow/pipelines/pull/260) ([gaoning777](https://github.com/gaoning777))
+- Remove tfjob operator from sdk [\#233](https://github.com/kubeflow/pipelines/pull/233) ([IronPan](https://github.com/IronPan))
+- SDK/Components/PythonContainerOp - Fixed string escaping warning [\#208](https://github.com/kubeflow/pipelines/pull/208) ([Ark-kun](https://github.com/Ark-kun))
+- SDK/DSL/Compiler - Improved compilation of dsl.Conditional - UX support done [\#177](https://github.com/kubeflow/pipelines/pull/177) ([Ark-kun](https://github.com/Ark-kun))
+
+## [0.1.3-rc.1](https://github.com/kubeflow/pipelines/tree/0.1.3-rc.1) (2018-11-17)
+[Full Changelog](https://github.com/kubeflow/pipelines/compare/0.1.2...0.1.3-rc.1)
+
+**Closed issues:**
+
+- Cannot copy pipeline source on Pipeline details page [\#301](https://github.com/kubeflow/pipelines/issues/301)
+- Notebook Kaniko job shows logs in red background [\#288](https://github.com/kubeflow/pipelines/issues/288)
+- a pipeline must be sellected [\#286](https://github.com/kubeflow/pipelines/issues/286)
+- Create experiment from Notebook and use link to open KFP UI. Back button doesn't work. [\#281](https://github.com/kubeflow/pipelines/issues/281)
+- Text/link displayed after calling run\_pipeline should be "Run link" not "Job link" [\#266](https://github.com/kubeflow/pipelines/issues/266)
+- Perf issue - All runs page loads very slowly with \>100 runs [\#259](https://github.com/kubeflow/pipelines/issues/259)
+- UI - feature ask: Remember the "Rows per page:" setting [\#258](https://github.com/kubeflow/pipelines/issues/258)
+- Upload Error Invalid response: 404 [\#239](https://github.com/kubeflow/pipelines/issues/239)
+- Auth Error when running TFX notebook. [\#229](https://github.com/kubeflow/pipelines/issues/229)
+- Cannot create a Notebook when starting KFP from cloud shell [\#179](https://github.com/kubeflow/pipelines/issues/179)
+- Unsupported Scan Error While Listing the Jobs of an Experiment [\#171](https://github.com/kubeflow/pipelines/issues/171)
+- Our test code and test images code is not always the same. [\#163](https://github.com/kubeflow/pipelines/issues/163)
+- Popped out TFMA is too small [\#160](https://github.com/kubeflow/pipelines/issues/160)
+- SDK should require kubernetes client lib [\#158](https://github.com/kubeflow/pipelines/issues/158)
+- Experiment list title should not change [\#71](https://github.com/kubeflow/pipelines/issues/71)
+- Create a sample notebook [\#69](https://github.com/kubeflow/pipelines/issues/69)
+- Frontend should give 404s for non-existing pages/routes. [\#55](https://github.com/kubeflow/pipelines/issues/55)
+- Remove python op decorator [\#44](https://github.com/kubeflow/pipelines/issues/44)
+
+**Merged pull requests:**
+
+- configurable namespaces in sample test and python SDK [\#306](https://github.com/kubeflow/pipelines/pull/306) ([gaoning777](https://github.com/gaoning777))
+- Allows copying of pipeline source from pipeline details page [\#302](https://github.com/kubeflow/pipelines/pull/302) ([rileyjbauer](https://github.com/rileyjbauer))
+- Add volume, volumemount and env to container op [\#300](https://github.com/kubeflow/pipelines/pull/300) ([IronPan](https://github.com/IronPan))
+- Update frontend handling of graphs [\#293](https://github.com/kubeflow/pipelines/pull/293) ([rileyjbauer](https://github.com/rileyjbauer))
+- configure logger such that it shows correct colors for the logs [\#292](https://github.com/kubeflow/pipelines/pull/292) ([gaoning777](https://github.com/gaoning777))
+- Update default pipeline version in ks package [\#291](https://github.com/kubeflow/pipelines/pull/291) ([IronPan](https://github.com/IronPan))
+- Update frontend tensorboard spec [\#283](https://github.com/kubeflow/pipelines/pull/283) ([IronPan](https://github.com/IronPan))
+- Revert sample test to mitigate test flakiness [\#277](https://github.com/kubeflow/pipelines/pull/277) ([IronPan](https://github.com/IronPan))
+- Fix build badge [\#276](https://github.com/kubeflow/pipelines/pull/276) ([yebrahim](https://github.com/yebrahim))
+- attach service account to tensorboard pod [\#273](https://github.com/kubeflow/pipelines/pull/273) ([IronPan](https://github.com/IronPan))
+- Upgrade @kubernetes/client-node [\#271](https://github.com/kubeflow/pipelines/pull/271) ([yebrahim](https://github.com/yebrahim))
+- Job link -\> run link [\#268](https://github.com/kubeflow/pipelines/pull/268) ([yebrahim](https://github.com/yebrahim))
+- Fix build and coveralls badges [\#267](https://github.com/kubeflow/pipelines/pull/267) ([yebrahim](https://github.com/yebrahim))
+- create secret for default service account [\#262](https://github.com/kubeflow/pipelines/pull/262) ([IronPan](https://github.com/IronPan))
+- Add gcp secret parameter to container op [\#261](https://github.com/kubeflow/pipelines/pull/261) ([IronPan](https://github.com/IronPan))
+- Adds NewRun tests [\#242](https://github.com/kubeflow/pipelines/pull/242) ([rileyjbauer](https://github.com/rileyjbauer))
+- Move basic sample tests to e2e tests [\#235](https://github.com/kubeflow/pipelines/pull/235) ([gaoning777](https://github.com/gaoning777))
+- switch default tag to strongly typed version number [\#234](https://github.com/kubeflow/pipelines/pull/234) ([IronPan](https://github.com/IronPan))
+- Components - Reorganized components/kubeflow [\#232](https://github.com/kubeflow/pipelines/pull/232) ([Ark-kun](https://github.com/Ark-kun))
+- Tests - Simplified test/sample-test/run\_tests.sh a bit [\#230](https://github.com/kubeflow/pipelines/pull/230) ([Ark-kun](https://github.com/Ark-kun))
+- enable component build unit test [\#228](https://github.com/kubeflow/pipelines/pull/228) ([gaoning777](https://github.com/gaoning777))
+- add support for dependencies in the component image building [\#219](https://github.com/kubeflow/pipelines/pull/219) ([gaoning777](https://github.com/gaoning777))
+- minor fixes [\#217](https://github.com/kubeflow/pipelines/pull/217) ([gaoning777](https://github.com/gaoning777))
+- SDK/Components - Fixes and more tests [\#213](https://github.com/kubeflow/pipelines/pull/213) ([Ark-kun](https://github.com/Ark-kun))
+- Use kubeflow as default namespace for tf serving [\#211](https://github.com/kubeflow/pipelines/pull/211) ([IronPan](https://github.com/IronPan))
+- Recurring run details tests [\#202](https://github.com/kubeflow/pipelines/pull/202) ([yebrahim](https://github.com/yebrahim))
+- Update check experiment condition [\#201](https://github.com/kubeflow/pipelines/pull/201) ([IronPan](https://github.com/IronPan))
+- Tests/Minikube - Fix Google credentials [\#194](https://github.com/kubeflow/pipelines/pull/194) ([Ark-kun](https://github.com/Ark-kun))
+- Tests/Minikube - Downgraded Docker to 18.06.1 [\#189](https://github.com/kubeflow/pipelines/pull/189) ([Ark-kun](https://github.com/Ark-kun))
+- Marked scripts as executable [\#188](https://github.com/kubeflow/pipelines/pull/188) ([Ark-kun](https://github.com/Ark-kun))
+- Update experimental/OWNERS [\#185](https://github.com/kubeflow/pipelines/pull/185) ([Ark-kun](https://github.com/Ark-kun))
+- Tests - Upgrade Minikube and Kubernetes for new Docker release [\#183](https://github.com/kubeflow/pipelines/pull/183) ([Ark-kun](https://github.com/Ark-kun))
+- Modify sample notebook. [\#181](https://github.com/kubeflow/pipelines/pull/181) ([qimingj](https://github.com/qimingj))
+- openvino predict component and pipeline example [\#180](https://github.com/kubeflow/pipelines/pull/180) ([dtrawins](https://github.com/dtrawins))
+- OpenVINO model optimizer component and example pipeline [\#178](https://github.com/kubeflow/pipelines/pull/178) ([dtrawins](https://github.com/dtrawins))
+- Add coveralls for frontend code coverage [\#176](https://github.com/kubeflow/pipelines/pull/176) ([yebrahim](https://github.com/yebrahim))
+- SDK/Components - Switched the generated inputs/outputs structure from dict to list [\#173](https://github.com/kubeflow/pipelines/pull/173) ([Ark-kun](https://github.com/Ark-kun))
+- SDK/DSL-compiler - Compile without temporary files [\#172](https://github.com/kubeflow/pipelines/pull/172) ([Ark-kun](https://github.com/Ark-kun))
+- SDK/Components - Removed the old argument syntax [\#168](https://github.com/kubeflow/pipelines/pull/168) ([Ark-kun](https://github.com/Ark-kun))
+- Sets min height/width of all fullscreen viewers to 80% [\#167](https://github.com/kubeflow/pipelines/pull/167) ([rileyjbauer](https://github.com/rileyjbauer))
+- Tests - Testing master+branch instead of just branch [\#165](https://github.com/kubeflow/pipelines/pull/165) ([Ark-kun](https://github.com/Ark-kun))
+- Adding support for experiments in the CLI. [\#159](https://github.com/kubeflow/pipelines/pull/159) ([vicaire](https://github.com/vicaire))
+- Acknowledge Argo. [\#157](https://github.com/kubeflow/pipelines/pull/157) ([jlewi](https://github.com/jlewi))
+- SDK - Relative imports [\#156](https://github.com/kubeflow/pipelines/pull/156) ([Ark-kun](https://github.com/Ark-kun))
+- Update a sample notebook. [\#155](https://github.com/kubeflow/pipelines/pull/155) ([qimingj](https://github.com/qimingj))
+- activate public prow service account [\#153](https://github.com/kubeflow/pipelines/pull/153) ([IronPan](https://github.com/IronPan))
+- sample tests using the dataset in the corresponding test project [\#152](https://github.com/kubeflow/pipelines/pull/152) ([gaoning777](https://github.com/gaoning777))
+- SDK/Tests - Removed sys.path manipulations. [\#151](https://github.com/kubeflow/pipelines/pull/151) ([Ark-kun](https://github.com/Ark-kun))
+- Removes the magic in Input, reducing it to a styled TextField [\#150](https://github.com/kubeflow/pipelines/pull/150) ([rileyjbauer](https://github.com/rileyjbauer))
+- Remove token from travis badge [\#148](https://github.com/kubeflow/pipelines/pull/148) ([rileyjbauer](https://github.com/rileyjbauer))
+- Prevents '0' from showing up on RunDetails config tab when pipeline has no parameters [\#147](https://github.com/kubeflow/pipelines/pull/147) ([rileyjbauer](https://github.com/rileyjbauer))
+- sample test fix: software version error [\#146](https://github.com/kubeflow/pipelines/pull/146) ([gaoning777](https://github.com/gaoning777))
+- add todos for the image tag update for new releases [\#138](https://github.com/kubeflow/pipelines/pull/138) ([gaoning777](https://github.com/gaoning777))
+- Require full function signatures [\#136](https://github.com/kubeflow/pipelines/pull/136) ([yebrahim](https://github.com/yebrahim))
+- 404 page [\#135](https://github.com/kubeflow/pipelines/pull/135) ([yebrahim](https://github.com/yebrahim))
+- Updated the kubeflow-tf sample README [\#130](https://github.com/kubeflow/pipelines/pull/130) ([sarahmaddox](https://github.com/sarahmaddox))
+- Refactor RunList, add test suite [\#127](https://github.com/kubeflow/pipelines/pull/127) ([yebrahim](https://github.com/yebrahim))
+- Consolidate UI of toolbars for AllRunsList and ExperimentList [\#124](https://github.com/kubeflow/pipelines/pull/124) ([rileyjbauer](https://github.com/rileyjbauer))
+- Adjusts the size and position of the pipelines icon [\#121](https://github.com/kubeflow/pipelines/pull/121) ([rileyjbauer](https://github.com/rileyjbauer))
+- Add integration tests for API servers [\#112](https://github.com/kubeflow/pipelines/pull/112) ([IronPan](https://github.com/IronPan))
+- Presubmit tests - Added presubmit SDK Components tests to Travis [\#87](https://github.com/kubeflow/pipelines/pull/87) ([Ark-kun](https://github.com/Ark-kun))
+- SDK/Components/Python - Removed python\_op in favor of python\_component [\#85](https://github.com/kubeflow/pipelines/pull/85) ([Ark-kun](https://github.com/Ark-kun))
+
+## [0.1.2](https://github.com/kubeflow/pipelines/tree/0.1.2) (2018-11-08)
+[Full Changelog](https://github.com/kubeflow/pipelines/compare/0.1.1...0.1.2)
+
+**Merged pull requests:**
+
+- Fixes bug where a new recurring run has no default trigger [\#144](https://github.com/kubeflow/pipelines/pull/144) ([rileyjbauer](https://github.com/rileyjbauer))
+- add tensorboard routing rule [\#143](https://github.com/kubeflow/pipelines/pull/143) ([IronPan](https://github.com/IronPan))
+- Update Screenshots Used in Wiki [\#142](https://github.com/kubeflow/pipelines/pull/142) ([qimingj](https://github.com/qimingj))
+- Update term "pipeline with oss tfx components" [\#141](https://github.com/kubeflow/pipelines/pull/141) ([gaoning777](https://github.com/gaoning777))
+- Tutorials/Lightweight Python components [\#139](https://github.com/kubeflow/pipelines/pull/139) ([Ark-kun](https://github.com/Ark-kun))
+- Add a sample notebook. [\#137](https://github.com/kubeflow/pipelines/pull/137) ([qimingj](https://github.com/qimingj))
+- Fix flaky basic sample test [\#134](https://github.com/kubeflow/pipelines/pull/134) ([gaoning777](https://github.com/gaoning777))
+- Make kubernetes a dependency of kfp package. [\#133](https://github.com/kubeflow/pipelines/pull/133) ([qimingj](https://github.com/qimingj))
+- Updated the xgboost-spark sample README [\#132](https://github.com/kubeflow/pipelines/pull/132) ([sarahmaddox](https://github.com/sarahmaddox))
+- Updated the tfx sample README [\#131](https://github.com/kubeflow/pipelines/pull/131) ([sarahmaddox](https://github.com/sarahmaddox))
+- Updated the basic samples README [\#129](https://github.com/kubeflow/pipelines/pull/129) ([sarahmaddox](https://github.com/sarahmaddox))
+- Updated the components README [\#128](https://github.com/kubeflow/pipelines/pull/128) ([sarahmaddox](https://github.com/sarahmaddox))
+- Adding integrations test for the CLI commands related to pipelines. [\#125](https://github.com/kubeflow/pipelines/pull/125) ([vicaire](https://github.com/vicaire))
+- Expanded row changes [\#120](https://github.com/kubeflow/pipelines/pull/120) ([ajayalfred](https://github.com/ajayalfred))
+- Fix an issue that %%docker doesn't work. [\#119](https://github.com/kubeflow/pipelines/pull/119) ([qimingj](https://github.com/qimingj))
+- Updated favicon to monochrome color [\#118](https://github.com/kubeflow/pipelines/pull/118) ([ajayalfred](https://github.com/ajayalfred))
+- Removed mentions of ark7 in tests [\#111](https://github.com/kubeflow/pipelines/pull/111) ([Ark-kun](https://github.com/Ark-kun))
+- Moves docs from the /samples README to the wiki [\#84](https://github.com/kubeflow/pipelines/pull/84) ([sarahmaddox](https://github.com/sarahmaddox))
+- Add basic sample tests [\#79](https://github.com/kubeflow/pipelines/pull/79) ([gaoning777](https://github.com/gaoning777))
+- remove kubeflow/ [\#68](https://github.com/kubeflow/pipelines/pull/68) ([IronPan](https://github.com/IronPan))
+
+## [0.1.1](https://github.com/kubeflow/pipelines/tree/0.1.1) (2018-11-07)
+[Full Changelog](https://github.com/kubeflow/pipelines/compare/0.1.0...0.1.1)
+
+**Merged pull requests:**
+
+- fix cloud build typo [\#116](https://github.com/kubeflow/pipelines/pull/116) ([gaoning777](https://github.com/gaoning777))
+- image tag update for release [\#114](https://github.com/kubeflow/pipelines/pull/114) ([gaoning777](https://github.com/gaoning777))
+- Remove CMLE sample for now since we are waiting for a service fix to support TPU. [\#113](https://github.com/kubeflow/pipelines/pull/113) ([qimingj](https://github.com/qimingj))
+- Add tests for the NewExperiment page [\#109](https://github.com/kubeflow/pipelines/pull/109) ([rileyjbauer](https://github.com/rileyjbauer))
+- Account for padding in metric progress fill [\#107](https://github.com/kubeflow/pipelines/pull/107) ([yebrahim](https://github.com/yebrahim))
+- First integration test for the ML Pipeline CLI \(Pipeline List\). [\#81](https://github.com/kubeflow/pipelines/pull/81) ([vicaire](https://github.com/vicaire))
+- add xgboost: migrate from the old repo [\#46](https://github.com/kubeflow/pipelines/pull/46) ([gaoning777](https://github.com/gaoning777))
+
+## [0.1.0](https://github.com/kubeflow/pipelines/tree/0.1.0) (2018-11-06)
+[Full Changelog](https://github.com/kubeflow/pipelines/compare/0.0.42...0.1.0)
+
+**Closed issues:**
+
+- The "Recurrent run configs" tab is showing inaccurate \# of jobs [\#100](https://github.com/kubeflow/pipelines/issues/100)
+- Duplicate experiment name would return me a blank page [\#97](https://github.com/kubeflow/pipelines/issues/97)
+- The Experiments/All runs tab should probably be the default tab in Experiments. [\#58](https://github.com/kubeflow/pipelines/issues/58)
+- The Experiments tab should be the default tab. [\#57](https://github.com/kubeflow/pipelines/issues/57)
+
+**Merged pull requests:**
+
+- Use the experiment's resource reference in the listJobs request [\#105](https://github.com/kubeflow/pipelines/pull/105) ([yebrahim](https://github.com/yebrahim))
+- Fix validation check for maximum size limit [\#104](https://github.com/kubeflow/pipelines/pull/104) ([IronPan](https://github.com/IronPan))
+- Add Ning and Alexey to OWNERS for components, samples and sample-test [\#102](https://github.com/kubeflow/pipelines/pull/102) ([Ark-kun](https://github.com/Ark-kun))
+- Don't barf when experiment name is already used [\#101](https://github.com/kubeflow/pipelines/pull/101) ([yebrahim](https://github.com/yebrahim))
+- CSS changes for nav menu and tables [\#99](https://github.com/kubeflow/pipelines/pull/99) ([ajayalfred](https://github.com/ajayalfred))
+- Fixed the Minikube tests after moving to the new repo [\#98](https://github.com/kubeflow/pipelines/pull/98) ([Ark-kun](https://github.com/Ark-kun))
+- sort by run display name by default [\#96](https://github.com/kubeflow/pipelines/pull/96) ([IronPan](https://github.com/IronPan))
+- SDK/DSL/Compiler - Reverted fix of dsl.Condition until the UI is ready. [\#94](https://github.com/kubeflow/pipelines/pull/94) ([Ark-kun](https://github.com/Ark-kun))
+- debug tfma failure [\#91](https://github.com/kubeflow/pipelines/pull/91) ([gaoning777](https://github.com/gaoning777))
+- fix miscellaneous List API issue [\#90](https://github.com/kubeflow/pipelines/pull/90) ([IronPan](https://github.com/IronPan))
+- mlp -\> kfp.dsl [\#88](https://github.com/kubeflow/pipelines/pull/88) ([Ark-kun](https://github.com/Ark-kun))
+- ExperimentList tests, use immer.js [\#86](https://github.com/kubeflow/pipelines/pull/86) ([yebrahim](https://github.com/yebrahim))
+- Moves docs from pipelines main README to wiki [\#83](https://github.com/kubeflow/pipelines/pull/83) ([sarahmaddox](https://github.com/sarahmaddox))
+- Add rileyjbauer to frontend OWNERS [\#82](https://github.com/kubeflow/pipelines/pull/82) ([yebrahim](https://github.com/yebrahim))
+- Update OWNERS to add qimingj [\#77](https://github.com/kubeflow/pipelines/pull/77) ([qimingj](https://github.com/qimingj))
+- Compile samples instead of hard code them in API server [\#76](https://github.com/kubeflow/pipelines/pull/76) ([IronPan](https://github.com/IronPan))
+- Added @gaoning777 and @Ark-kun to OWNERS [\#74](https://github.com/kubeflow/pipelines/pull/74) ([Ark-kun](https://github.com/Ark-kun))
+- Change title from 'Start a new run' to 'Start recurring run' as appropriate [\#73](https://github.com/kubeflow/pipelines/pull/73) ([rileyjbauer](https://github.com/rileyjbauer))
+- Add %%docker magic to jupyter kernel. [\#72](https://github.com/kubeflow/pipelines/pull/72) ([qimingj](https://github.com/qimingj))
+- Build Python SDK in the releasing [\#70](https://github.com/kubeflow/pipelines/pull/70) ([gaoning777](https://github.com/gaoning777))
+- Fix tfx name bug in the tfma sample test [\#67](https://github.com/kubeflow/pipelines/pull/67) ([gaoning777](https://github.com/gaoning777))
+- Fixes off-by-one error with months in Trigger [\#66](https://github.com/kubeflow/pipelines/pull/66) ([rileyjbauer](https://github.com/rileyjbauer))
+- Updating OWNER files. Adding per-subdirectory OWNER files. [\#65](https://github.com/kubeflow/pipelines/pull/65) ([vicaire](https://github.com/vicaire))
+- This is a test. [\#51](https://github.com/kubeflow/pipelines/pull/51) ([vicaire](https://github.com/vicaire))
+- PipelineList tests, fix clearing error banner [\#50](https://github.com/kubeflow/pipelines/pull/50) ([yebrahim](https://github.com/yebrahim))
+- Adds the Kubeflow logo to the side nav. Fetches specific font weights [\#48](https://github.com/kubeflow/pipelines/pull/48) ([rileyjbauer](https://github.com/rileyjbauer))
+- Refresh button now reloads logs and artifacts as well on RunDetails page [\#47](https://github.com/kubeflow/pipelines/pull/47) ([rileyjbauer](https://github.com/rileyjbauer))
+- Cleans up StaticGraphParser tests as per PR commends from \#30 [\#45](https://github.com/kubeflow/pipelines/pull/45) ([rileyjbauer](https://github.com/rileyjbauer))
+- Fix resnet-cmle sample. [\#43](https://github.com/kubeflow/pipelines/pull/43) ([qimingj](https://github.com/qimingj))
+- Add ngao to the owners [\#42](https://github.com/kubeflow/pipelines/pull/42) ([gaoning777](https://github.com/gaoning777))
+- SDK/Tests/Components - Corrected test argument types [\#41](https://github.com/kubeflow/pipelines/pull/41) ([Ark-kun](https://github.com/Ark-kun))
+- SDK/Components/Python - Stopped using the Fire library [\#40](https://github.com/kubeflow/pipelines/pull/40) ([Ark-kun](https://github.com/Ark-kun))
+- Upgrade kubeflow to v0.3.2 [\#39](https://github.com/kubeflow/pipelines/pull/39) ([IronPan](https://github.com/IronPan))
+- Components - Removed debug print [\#38](https://github.com/kubeflow/pipelines/pull/38) ([Ark-kun](https://github.com/Ark-kun))
+- SDK/Tests/Components - Improve temporary file handling [\#37](https://github.com/kubeflow/pipelines/pull/37) ([Ark-kun](https://github.com/Ark-kun))
+- Add Alexey Volkov to OWNERS [\#36](https://github.com/kubeflow/pipelines/pull/36) ([Ark-kun](https://github.com/Ark-kun))
+- Propagate name for runs from scheduled job [\#33](https://github.com/kubeflow/pipelines/pull/33) ([IronPan](https://github.com/IronPan))
+- add rileyjbauer to OWNERS [\#32](https://github.com/kubeflow/pipelines/pull/32) ([rileyjbauer](https://github.com/rileyjbauer))
+- Push pagination control to CustomTable [\#31](https://github.com/kubeflow/pipelines/pull/31) ([yebrahim](https://github.com/yebrahim))
+- Adds many more static workflow parser tests [\#30](https://github.com/kubeflow/pipelines/pull/30) ([rileyjbauer](https://github.com/rileyjbauer))
+- SDK/Components - Switching to map-based syntax for the arguments. [\#29](https://github.com/kubeflow/pipelines/pull/29) ([Ark-kun](https://github.com/Ark-kun))
+- SDK/DSL/Compiler - Fixed compilation of dsl.Condition [\#28](https://github.com/kubeflow/pipelines/pull/28) ([Ark-kun](https://github.com/Ark-kun))
+- TFMA deployer bug fix [\#27](https://github.com/kubeflow/pipelines/pull/27) ([gaoning777](https://github.com/gaoning777))
+- Upgrading the container versions to 0.0.42, the version of the first release of kubeflow/pipelines. [\#26](https://github.com/kubeflow/pipelines/pull/26) ([vicaire](https://github.com/vicaire))
+
+## [0.0.42](https://github.com/kubeflow/pipelines/tree/0.0.42) (2018-11-02)
+**Closed issues:**
+
+- ScheduledWorkflow CRD: CLI [\#7](https://github.com/kubeflow/pipelines/issues/7)
+- How does it proceed with this project? [\#1](https://github.com/kubeflow/pipelines/issues/1)
+
+**Merged pull requests:**
+- Updating references to the project repository to kubeflow/pipelines. [\#25](https://github.com/kubeflow/pipelines/pull/25) ([vicaire](https://github.com/vicaire))
+- Fixing the GO import paths to reference the kubeflow/pipelines repository [\#24](https://github.com/kubeflow/pipelines/pull/24) ([vicaire](https://github.com/vicaire))
+- Initial commit of the kubeflow/pipeline project. [\#22](https://github.com/kubeflow/pipelines/pull/22) ([kubeflow-pipeline-bot](https://github.com/kubeflow-pipeline-bot))
+- Cleaning up the kubeflow/pipeline repository. [\#21](https://github.com/kubeflow/pipelines/pull/21) ([kubeflow-pipeline-bot](https://github.com/kubeflow-pipeline-bot))
+- TEST [\#20](https://github.com/kubeflow/pipelines/pull/20) ([kubeflow-pipeline-bot](https://github.com/kubeflow-pipeline-bot))
+- TEST [\#19](https://github.com/kubeflow/pipelines/pull/19) ([kubeflow-pipeline-bot](https://github.com/kubeflow-pipeline-bot))
+- Updating the README [\#18](https://github.com/kubeflow/pipelines/pull/18) ([kubeflow-pipeline-bot](https://github.com/kubeflow-pipeline-bot))
+- Test [\#17](https://github.com/kubeflow/pipelines/pull/17) ([kubeflow-pipeline-bot](https://github.com/kubeflow-pipeline-bot))
+- Improve Dockerfile [\#14](https://github.com/kubeflow/pipelines/pull/14) ([ynqa](https://github.com/ynqa))
+- Fixing bug in parameterized sample. [\#13](https://github.com/kubeflow/pipelines/pull/13) ([vicaire](https://github.com/vicaire))
+- Adding a Dockerfile to create the scheduled workflow container. [\#11](https://github.com/kubeflow/pipelines/pull/11) ([vicaire](https://github.com/vicaire))
+- deplize [\#10](https://github.com/kubeflow/pipelines/pull/10) ([ynqa](https://github.com/ynqa))
+- Fix typo in README.md [\#9](https://github.com/kubeflow/pipelines/pull/9) ([vicaire](https://github.com/vicaire))
+- Adding more samples for the ScheduledWorkflow CRD. [\#8](https://github.com/kubeflow/pipelines/pull/8) ([vicaire](https://github.com/vicaire))
+- Add manifests for CRD, examples [\#6](https://github.com/kubeflow/pipelines/pull/6) ([ynqa](https://github.com/ynqa))
+- Setting up the README, prow\_config and OWNERS file. [\#4](https://github.com/kubeflow/pipelines/pull/4) ([vicaire](https://github.com/vicaire))
+- CRD for scheduling Argo workflows \(Implementation\) [\#3](https://github.com/kubeflow/pipelines/pull/3) ([vicaire](https://github.com/vicaire))
+- CRD for scheduling Argo workflows \(API spec\) [\#2](https://github.com/kubeflow/pipelines/pull/2) ([vicaire](https://github.com/vicaire))
-\* *This Changelog was automatically generated by [github_changelog_generator](https://github.com/github-changelog-generator/github-changelog-generator)*
+\* *This Change Log was automatically generated by [github_changelog_generator](https://github.com/skywinder/Github-Changelog-Generator)*
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 5bff97828bd..01de7730903 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -19,7 +19,7 @@ again.
The Python part of the project will follow [Google Python style guide](http://google.github.io/styleguide/pyguide.html). We provide a [yapf](https://github.com/google/yapf) configuration file to help contributors auto-format their code to adopt the Google Python style. Also, it is encouraged to lint python docstrings by [docformatter](https://github.com/myint/docformatter).
-The frontend part of the project uses [prettier](https://prettier.io/) for formatting, read [frontend/README.md#code-style](frontend/#code-style) for more details.
+The frontend part of the project uses [prettier](https://prettier.io/) for formatting, read [frontend/README.md#code-style](frontend/README.md#code-style) for more details.
## Code reviews
diff --git a/backend/Dockerfile b/backend/Dockerfile
index f1e7a4937d9..754e7b9d0ac 100644
--- a/backend/Dockerfile
+++ b/backend/Dockerfile
@@ -25,6 +25,10 @@ RUN if [ "$use_remote_build" = "true" ]; then \
# Compile
FROM python:3.5 as compiler
+RUN apt-get update -y && \
+ apt-get install --no-install-recommends -y -q default-jdk python3-setuptools python3-dev
+RUN wget https://bootstrap.pypa.io/get-pip.py && python3 get-pip.py
+RUN python3 -m pip install apache-beam[gcp]==2.17 pyarrow==0.14.1 tfx==0.15.0
WORKDIR /go/src/github.com/kubeflow/pipelines
COPY sdk sdk
@@ -41,7 +45,7 @@ COPY ./samples .
#I think it's better to just use a shell loop though.
#RUN for pipeline in $(find . -maxdepth 2 -name '*.py' -type f); do dsl-compile --py "$pipeline" --output "$pipeline.tar.gz"; done
#The "for" loop breaks on all whitespace, so we either need to override IFS or use the "read" command instead.
-RUN set -e; find core -maxdepth 2 -name '*.py' -type f | while read pipeline; do dsl-compile --py "$pipeline" --output "$pipeline.tar.gz"; done
+RUN set -e; find core -maxdepth 2 -name '*.py' -type f | while read pipeline; do python3 $pipeline; done
FROM debian:stretch
@@ -61,6 +65,9 @@ COPY --from=compiler /samples/ /samples/
# Adding CA certificate so API server can download pipeline through URL
RUN apt-get update && apt-get install -y ca-certificates
+# Pin sample doc links to the commit that built the backend image
+RUN sed "s#/blob/master/#/blob/${COMMIT_SHA}/#g" -i /config/sample_config.json
+
# Expose apiserver port
EXPOSE 8888
diff --git a/backend/api/generate_api.sh b/backend/api/generate_api.sh
index 6b57e843757..20f159ee702 100755
--- a/backend/api/generate_api.sh
+++ b/backend/api/generate_api.sh
@@ -57,7 +57,7 @@ jq -s '
reduce .[] as $item ({}; . * $item) |
.info.title = "Kubeflow Pipelines API" |
.info.description = "This file contains REST API specification for Kubeflow Pipelines. The file is autogenerated from the swagger definition." |
- .info.version = "0.1.20"
+ .info.version = "0.1.38"
' ${DIR}/swagger/{run,job,pipeline,experiment,pipeline.upload}.swagger.json > "${DIR}/swagger/kfp_api_single_file.swagger.json"
# Generate Go HTTP client from the swagger files.
diff --git a/backend/api/go_client/resource_reference.pb.go b/backend/api/go_client/resource_reference.pb.go
index 2c3b693a600..0a834c74823 100755
--- a/backend/api/go_client/resource_reference.pb.go
+++ b/backend/api/go_client/resource_reference.pb.go
@@ -40,6 +40,7 @@ const (
ResourceType_JOB ResourceType = 2
ResourceType_PIPELINE ResourceType = 3
ResourceType_PIPELINE_VERSION ResourceType = 4
+ ResourceType_NAMESPACE ResourceType = 5
)
var ResourceType_name = map[int32]string{
@@ -48,6 +49,7 @@ var ResourceType_name = map[int32]string{
2: "JOB",
3: "PIPELINE",
4: "PIPELINE_VERSION",
+ 5: "NAMESPACE",
}
var ResourceType_value = map[string]int32{
"UNKNOWN_RESOURCE_TYPE": 0,
@@ -55,13 +57,14 @@ var ResourceType_value = map[string]int32{
"JOB": 2,
"PIPELINE": 3,
"PIPELINE_VERSION": 4,
+ "NAMESPACE": 5,
}
func (x ResourceType) String() string {
return proto.EnumName(ResourceType_name, int32(x))
}
func (ResourceType) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_resource_reference_81a9849386131b93, []int{0}
+ return fileDescriptor_resource_reference_876ea904b7b7aed8, []int{0}
}
type Relationship int32
@@ -87,7 +90,7 @@ func (x Relationship) String() string {
return proto.EnumName(Relationship_name, int32(x))
}
func (Relationship) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_resource_reference_81a9849386131b93, []int{1}
+ return fileDescriptor_resource_reference_876ea904b7b7aed8, []int{1}
}
type ResourceKey struct {
@@ -102,7 +105,7 @@ func (m *ResourceKey) Reset() { *m = ResourceKey{} }
func (m *ResourceKey) String() string { return proto.CompactTextString(m) }
func (*ResourceKey) ProtoMessage() {}
func (*ResourceKey) Descriptor() ([]byte, []int) {
- return fileDescriptor_resource_reference_81a9849386131b93, []int{0}
+ return fileDescriptor_resource_reference_876ea904b7b7aed8, []int{0}
}
func (m *ResourceKey) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ResourceKey.Unmarshal(m, b)
@@ -149,7 +152,7 @@ func (m *ResourceReference) Reset() { *m = ResourceReference{} }
func (m *ResourceReference) String() string { return proto.CompactTextString(m) }
func (*ResourceReference) ProtoMessage() {}
func (*ResourceReference) Descriptor() ([]byte, []int) {
- return fileDescriptor_resource_reference_81a9849386131b93, []int{1}
+ return fileDescriptor_resource_reference_876ea904b7b7aed8, []int{1}
}
func (m *ResourceReference) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ResourceReference.Unmarshal(m, b)
@@ -198,31 +201,32 @@ func init() {
}
func init() {
- proto.RegisterFile("backend/api/resource_reference.proto", fileDescriptor_resource_reference_81a9849386131b93)
-}
-
-var fileDescriptor_resource_reference_81a9849386131b93 = []byte{
- // 351 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x91, 0x41, 0xab, 0xda, 0x40,
- 0x14, 0x85, 0x5f, 0x12, 0xdb, 0xd7, 0x77, 0x15, 0x99, 0x37, 0x58, 0x48, 0x77, 0x22, 0x2d, 0x88,
- 0x8b, 0x04, 0x14, 0xf7, 0x55, 0x3b, 0xd0, 0x54, 0x3b, 0x09, 0x63, 0xac, 0x6d, 0x37, 0x21, 0x89,
- 0x57, 0x1d, 0x8c, 0xc9, 0x10, 0x23, 0x25, 0xdb, 0xfe, 0xf2, 0x62, 0x68, 0x88, 0xee, 0x66, 0xf8,
- 0x0e, 0xe7, 0xbb, 0x70, 0xe0, 0x63, 0x14, 0xc6, 0x27, 0x4c, 0x77, 0x76, 0xa8, 0xa4, 0x9d, 0xe3,
- 0x25, 0xbb, 0xe6, 0x31, 0x06, 0x39, 0xee, 0x31, 0xc7, 0x34, 0x46, 0x4b, 0xe5, 0x59, 0x91, 0x51,
- 0x23, 0x54, 0x72, 0xf0, 0x05, 0xda, 0xe2, 0x7f, 0x60, 0x89, 0x25, 0xfd, 0x04, 0xad, 0xa2, 0x54,
- 0x68, 0x6a, 0x7d, 0x6d, 0xd8, 0x1d, 0xbf, 0x5a, 0xa1, 0x92, 0x56, 0xcd, 0xfd, 0x52, 0xa1, 0xa8,
- 0x30, 0xed, 0x82, 0x2e, 0x77, 0xa6, 0xde, 0xd7, 0x86, 0x2f, 0x42, 0x97, 0xbb, 0xc1, 0x5f, 0x0d,
- 0x5e, 0xeb, 0x98, 0xa8, 0x35, 0x74, 0x00, 0xc6, 0x09, 0xcb, 0xaa, 0xab, 0x3d, 0x26, 0x0f, 0x5d,
- 0x4b, 0x2c, 0xc5, 0x0d, 0x52, 0x0a, 0xad, 0x34, 0x3c, 0xa3, 0x69, 0x54, 0x5d, 0xd5, 0x9b, 0x4e,
- 0xa1, 0x93, 0x63, 0x12, 0x16, 0x32, 0x4b, 0x2f, 0x47, 0xa9, 0x2a, 0x4f, 0x73, 0x4c, 0x03, 0xc4,
- 0x43, 0x6c, 0xb4, 0x87, 0xce, 0xfd, 0xa9, 0xf4, 0x03, 0xbc, 0xdf, 0xf0, 0x25, 0x77, 0xb7, 0x3c,
- 0x10, 0x6c, 0xed, 0x6e, 0xc4, 0x82, 0x05, 0xfe, 0x2f, 0x8f, 0x91, 0x27, 0xda, 0x05, 0x60, 0x3f,
- 0x3d, 0x26, 0x9c, 0xef, 0x8c, 0xfb, 0x44, 0xa3, 0xcf, 0x60, 0x7c, 0x73, 0xe7, 0x44, 0xa7, 0x1d,
- 0x78, 0xe7, 0x39, 0x1e, 0x5b, 0x39, 0x9c, 0x11, 0x83, 0xf6, 0x80, 0xd4, 0xbf, 0xe0, 0x07, 0x13,
- 0x6b, 0xc7, 0xe5, 0xa4, 0x35, 0xfa, 0x7c, 0xf3, 0x34, 0x5e, 0x6a, 0x42, 0xaf, 0xf1, 0xac, 0x66,
- 0xbe, 0xe3, 0xf2, 0xf5, 0x57, 0xc7, 0x23, 0x4f, 0xf4, 0x05, 0xde, 0xb8, 0x5b, 0xce, 0x04, 0xd1,
- 0x68, 0x1b, 0x9e, 0x17, 0x82, 0xcd, 0x7c, 0x57, 0x10, 0x7d, 0x3e, 0xfd, 0x3d, 0x39, 0xc8, 0xe2,
- 0x78, 0x8d, 0xac, 0x38, 0x3b, 0xdb, 0xa7, 0x6b, 0x84, 0xfb, 0x24, 0xfb, 0x63, 0x2b, 0xa9, 0x30,
- 0x91, 0x29, 0x5e, 0xec, 0xfb, 0xfd, 0x0e, 0x59, 0x10, 0x27, 0x12, 0xd3, 0x22, 0x7a, 0x5b, 0xed,
- 0x36, 0xf9, 0x17, 0x00, 0x00, 0xff, 0xff, 0xa7, 0x32, 0x3a, 0x06, 0xdf, 0x01, 0x00, 0x00,
+ proto.RegisterFile("backend/api/resource_reference.proto", fileDescriptor_resource_reference_876ea904b7b7aed8)
+}
+
+var fileDescriptor_resource_reference_876ea904b7b7aed8 = []byte{
+ // 366 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x91, 0xc1, 0x6b, 0x9c, 0x40,
+ 0x14, 0xc6, 0xa3, 0x6e, 0x9a, 0xee, 0xdb, 0xed, 0x32, 0x79, 0xa4, 0x60, 0x6f, 0x61, 0x69, 0x21,
+ 0xe4, 0xa0, 0x90, 0x90, 0x7b, 0xcd, 0x76, 0xa0, 0x76, 0x93, 0x51, 0x46, 0xd3, 0xb4, 0xbd, 0x88,
+ 0xba, 0x2f, 0xc9, 0xb0, 0x46, 0x07, 0x57, 0x29, 0x5e, 0xfb, 0x97, 0x97, 0x48, 0xc4, 0xec, 0x6d,
+ 0x86, 0xdf, 0xc7, 0xf7, 0xfb, 0xe0, 0xc1, 0xe7, 0x2c, 0xcd, 0xb7, 0x54, 0x6e, 0xdc, 0x54, 0x2b,
+ 0xb7, 0xa6, 0x5d, 0xd5, 0xd6, 0x39, 0x25, 0x35, 0x3d, 0x50, 0x4d, 0x65, 0x4e, 0x8e, 0xae, 0xab,
+ 0xa6, 0x42, 0x2b, 0xd5, 0x6a, 0xf9, 0x0d, 0x66, 0xf2, 0x35, 0xb0, 0xa6, 0x0e, 0xbf, 0xc0, 0xa4,
+ 0xe9, 0x34, 0xd9, 0xc6, 0xa9, 0x71, 0xb6, 0xb8, 0x38, 0x76, 0x52, 0xad, 0x9c, 0x81, 0xc7, 0x9d,
+ 0x26, 0xd9, 0x63, 0x5c, 0x80, 0xa9, 0x36, 0xb6, 0x79, 0x6a, 0x9c, 0x4d, 0xa5, 0xa9, 0x36, 0xcb,
+ 0x7f, 0x06, 0x1c, 0x0f, 0x31, 0x39, 0x68, 0x70, 0x09, 0xd6, 0x96, 0xba, 0xbe, 0x6b, 0x76, 0xc1,
+ 0xf6, 0xba, 0xd6, 0xd4, 0xc9, 0x17, 0x88, 0x08, 0x93, 0x32, 0x7d, 0x26, 0xdb, 0xea, 0xbb, 0xfa,
+ 0x37, 0x5e, 0xc1, 0xbc, 0xa6, 0x22, 0x6d, 0x54, 0x55, 0xee, 0x9e, 0x94, 0xee, 0x3d, 0xe3, 0x98,
+ 0x11, 0xc8, 0xbd, 0xd8, 0x79, 0x0b, 0xf3, 0xb7, 0x53, 0xf1, 0x13, 0x7c, 0xbc, 0x13, 0x6b, 0x11,
+ 0xdc, 0x8b, 0x44, 0xf2, 0x28, 0xb8, 0x93, 0x2b, 0x9e, 0xc4, 0xbf, 0x43, 0xce, 0x0e, 0x70, 0x01,
+ 0xc0, 0x7f, 0x85, 0x5c, 0xfa, 0xb7, 0x5c, 0xc4, 0xcc, 0xc0, 0x23, 0xb0, 0x7e, 0x04, 0xd7, 0xcc,
+ 0xc4, 0x39, 0xbc, 0x0f, 0xfd, 0x90, 0xdf, 0xf8, 0x82, 0x33, 0x0b, 0x4f, 0x80, 0x0d, 0xbf, 0xe4,
+ 0x27, 0x97, 0x91, 0x1f, 0x08, 0x36, 0xc1, 0x0f, 0x30, 0x15, 0xde, 0x2d, 0x8f, 0x42, 0x6f, 0xc5,
+ 0xd9, 0xe1, 0xf9, 0xd7, 0x17, 0xed, 0x38, 0x03, 0x6d, 0x38, 0x19, 0xb5, 0x37, 0x5e, 0xec, 0x07,
+ 0x22, 0xfa, 0xee, 0x87, 0xec, 0x00, 0xa7, 0x70, 0x18, 0xdc, 0x0b, 0x2e, 0x99, 0x81, 0x33, 0x38,
+ 0x5a, 0x49, 0xee, 0xc5, 0x81, 0x64, 0xe6, 0xf5, 0xd5, 0x9f, 0xcb, 0x47, 0xd5, 0x3c, 0xb5, 0x99,
+ 0x93, 0x57, 0xcf, 0xee, 0xb6, 0xcd, 0xe8, 0xa1, 0xa8, 0xfe, 0xba, 0x5a, 0x69, 0x2a, 0x54, 0x49,
+ 0x3b, 0xf7, 0xed, 0x39, 0x1f, 0xab, 0x24, 0x2f, 0x14, 0x95, 0x4d, 0xf6, 0xae, 0x3f, 0xe3, 0xe5,
+ 0xff, 0x00, 0x00, 0x00, 0xff, 0xff, 0xa3, 0x58, 0x92, 0x1b, 0xee, 0x01, 0x00, 0x00,
}
diff --git a/backend/api/go_http_client/job_model/api_job.go-- b/backend/api/go_http_client/job_model/api_job.go--
deleted file mode 100644
index 2ae15162dd7..00000000000
--- a/backend/api/go_http_client/job_model/api_job.go--
+++ /dev/null
@@ -1,224 +0,0 @@
-// Code generated by go-swagger; DO NOT EDIT.
-
-package job_model
-
-// This file was generated by the swagger tool.
-// Editing this file might prove futile when you re-run the swagger generate command
-
-import (
- "strconv"
-
- strfmt "github.com/go-openapi/strfmt"
-
- "github.com/go-openapi/errors"
- "github.com/go-openapi/swag"
- "github.com/go-openapi/validate"
-)
-
-// APIJob api job
-// swagger:model apiJob
-type APIJob struct {
-
- // Output. The time this job is created.
- // Format: date-time
- CreatedAt strfmt.DateTime `json:"created_at,omitempty"`
-
- // Optional input field. Describing the purpose of the job
- Description string `json:"description,omitempty"`
-
- // Input. Whether the job is enabled or not.
- Enabled bool `json:"enabled,omitempty"`
-
- // In case any error happens retrieving a job field, only job ID
- // and the error message is returned. Client has the flexibility of choosing
- // how to handle error. This is especially useful during listing call.
- Error string `json:"error,omitempty"`
-
- // Output. Unique run ID. Generated by API server.
- ID string `json:"id,omitempty"`
-
- // Required input field.
- // Specify how many runs can be executed concurrently. Rage [1-10]
- MaxConcurrency string `json:"max_concurrency,omitempty"`
-
- // mode
- Mode JobMode `json:"mode,omitempty"`
-
- // Required input field. Job name provided by user. Not unique.
- Name string `json:"name,omitempty"`
-
- // Required input field.
- // Describing what the pipeline manifest and parameters to use
- // for the scheduled job.
- PipelineSpec *APIPipelineSpec `json:"pipeline_spec,omitempty"`
-
- // Optional input field. Specify which resource this run belongs to.
- ResourceReferences []*APIResourceReference `json:"resource_references"`
-
- // Output. The status of the job.
- // One of [Enable, Disable, Error]
- Status string `json:"status,omitempty"`
-
- // Required input field.
- // Specify how a run is triggered. Support cron mode or periodic mode.
- Trigger *APITrigger `json:"trigger,omitempty"`
-
- // Output. The last time this job is updated.
- // Format: date-time
- UpdatedAt strfmt.DateTime `json:"updated_at,omitempty"`
-}
-
-// Validate validates this api job
-func (m *APIJob) Validate(formats strfmt.Registry) error {
- var res []error
-
- if err := m.validateCreatedAt(formats); err != nil {
- res = append(res, err)
- }
-
- if err := m.validateMode(formats); err != nil {
- res = append(res, err)
- }
-
- if err := m.validatePipelineSpec(formats); err != nil {
- res = append(res, err)
- }
-
- if err := m.validateResourceReferences(formats); err != nil {
- res = append(res, err)
- }
-
- if err := m.validateTrigger(formats); err != nil {
- res = append(res, err)
- }
-
- if err := m.validateUpdatedAt(formats); err != nil {
- res = append(res, err)
- }
-
- if len(res) > 0 {
- return errors.CompositeValidationError(res...)
- }
- return nil
-}
-
-func (m *APIJob) validateCreatedAt(formats strfmt.Registry) error {
-
- if swag.IsZero(m.CreatedAt) { // not required
- return nil
- }
-
- if err := validate.FormatOf("created_at", "body", "date-time", m.CreatedAt.String(), formats); err != nil {
- return err
- }
-
- return nil
-}
-
-func (m *APIJob) validateMode(formats strfmt.Registry) error {
-
- if swag.IsZero(m.Mode) { // not required
- return nil
- }
-
- if err := m.Mode.Validate(formats); err != nil {
- if ve, ok := err.(*errors.Validation); ok {
- return ve.ValidateName("mode")
- }
- return err
- }
-
- return nil
-}
-
-func (m *APIJob) validatePipelineSpec(formats strfmt.Registry) error {
-
- if swag.IsZero(m.PipelineSpec) { // not required
- return nil
- }
-
- if m.PipelineSpec != nil {
- if err := m.PipelineSpec.Validate(formats); err != nil {
- if ve, ok := err.(*errors.Validation); ok {
- return ve.ValidateName("pipeline_spec")
- }
- return err
- }
- }
-
- return nil
-}
-
-func (m *APIJob) validateResourceReferences(formats strfmt.Registry) error {
-
- if swag.IsZero(m.ResourceReferences) { // not required
- return nil
- }
-
- for i := 0; i < len(m.ResourceReferences); i++ {
- if swag.IsZero(m.ResourceReferences[i]) { // not required
- continue
- }
-
- if m.ResourceReferences[i] != nil {
- if err := m.ResourceReferences[i].Validate(formats); err != nil {
- if ve, ok := err.(*errors.Validation); ok {
- return ve.ValidateName("resource_references" + "." + strconv.Itoa(i))
- }
- return err
- }
- }
-
- }
-
- return nil
-}
-
-func (m *APIJob) validateTrigger(formats strfmt.Registry) error {
-
- if swag.IsZero(m.Trigger) { // not required
- return nil
- }
-
- if m.Trigger != nil {
- if err := m.Trigger.Validate(formats); err != nil {
- if ve, ok := err.(*errors.Validation); ok {
- return ve.ValidateName("trigger")
- }
- return err
- }
- }
-
- return nil
-}
-
-func (m *APIJob) validateUpdatedAt(formats strfmt.Registry) error {
-
- if swag.IsZero(m.UpdatedAt) { // not required
- return nil
- }
-
- if err := validate.FormatOf("updated_at", "body", "date-time", m.UpdatedAt.String(), formats); err != nil {
- return err
- }
-
- return nil
-}
-
-// MarshalBinary interface implementation
-func (m *APIJob) MarshalBinary() ([]byte, error) {
- if m == nil {
- return nil, nil
- }
- return swag.WriteJSON(m)
-}
-
-// UnmarshalBinary interface implementation
-func (m *APIJob) UnmarshalBinary(b []byte) error {
- var res APIJob
- if err := swag.ReadJSON(b, &res); err != nil {
- return err
- }
- *m = res
- return nil
-}
diff --git a/backend/api/go_http_client/job_model/api_periodic_schedule.go-- b/backend/api/go_http_client/job_model/api_periodic_schedule.go--
deleted file mode 100644
index 015fa577ec7..00000000000
--- a/backend/api/go_http_client/job_model/api_periodic_schedule.go--
+++ /dev/null
@@ -1,92 +0,0 @@
-// Code generated by go-swagger; DO NOT EDIT.
-
-package job_model
-
-// This file was generated by the swagger tool.
-// Editing this file might prove futile when you re-run the swagger generate command
-
-import (
- strfmt "github.com/go-openapi/strfmt"
-
- "github.com/go-openapi/errors"
- "github.com/go-openapi/swag"
- "github.com/go-openapi/validate"
-)
-
-// APIPeriodicSchedule PeriodicSchedule allow scheduling the job periodically with certain interval
-// swagger:model apiPeriodicSchedule
-type APIPeriodicSchedule struct {
-
- // The end time of the periodic job
- // Format: date-time
- EndTime strfmt.DateTime `json:"end_time,omitempty"`
-
- // The time interval between the starting time of consecutive jobs
- IntervalSecond string `json:"interval_second,omitempty"`
-
- // The start time of the periodic job
- // Format: date-time
- StartTime strfmt.DateTime `json:"start_time,omitempty"`
-}
-
-// Validate validates this api periodic schedule
-func (m *APIPeriodicSchedule) Validate(formats strfmt.Registry) error {
- var res []error
-
- if err := m.validateEndTime(formats); err != nil {
- res = append(res, err)
- }
-
- if err := m.validateStartTime(formats); err != nil {
- res = append(res, err)
- }
-
- if len(res) > 0 {
- return errors.CompositeValidationError(res...)
- }
- return nil
-}
-
-func (m *APIPeriodicSchedule) validateEndTime(formats strfmt.Registry) error {
-
- if swag.IsZero(m.EndTime) { // not required
- return nil
- }
-
- if err := validate.FormatOf("end_time", "body", "date-time", m.EndTime.String(), formats); err != nil {
- return err
- }
-
- return nil
-}
-
-func (m *APIPeriodicSchedule) validateStartTime(formats strfmt.Registry) error {
-
- if swag.IsZero(m.StartTime) { // not required
- return nil
- }
-
- if err := validate.FormatOf("start_time", "body", "date-time", m.StartTime.String(), formats); err != nil {
- return err
- }
-
- return nil
-}
-
-// MarshalBinary interface implementation
-func (m *APIPeriodicSchedule) MarshalBinary() ([]byte, error) {
- if m == nil {
- return nil, nil
- }
- return swag.WriteJSON(m)
-}
-
-// UnmarshalBinary interface implementation
-func (m *APIPeriodicSchedule) UnmarshalBinary(b []byte) error {
- var res APIPeriodicSchedule
- if err := swag.ReadJSON(b, &res); err != nil {
- return err
- }
- *m = res
- return nil
-}
diff --git a/backend/api/go_http_client/job_model/api_resource_type.go b/backend/api/go_http_client/job_model/api_resource_type.go
index cc2493d0627..655d4f4c3e5 100644
--- a/backend/api/go_http_client/job_model/api_resource_type.go
+++ b/backend/api/go_http_client/job_model/api_resource_type.go
@@ -48,6 +48,9 @@ const (
// APIResourceTypePIPELINEVERSION captures enum value "PIPELINE_VERSION"
APIResourceTypePIPELINEVERSION APIResourceType = "PIPELINE_VERSION"
+
+ // APIResourceTypeNAMESPACE captures enum value "NAMESPACE"
+ APIResourceTypeNAMESPACE APIResourceType = "NAMESPACE"
)
// for schema
@@ -55,7 +58,7 @@ var apiResourceTypeEnum []interface{}
func init() {
var res []APIResourceType
- if err := json.Unmarshal([]byte(`["UNKNOWN_RESOURCE_TYPE","EXPERIMENT","JOB","PIPELINE","PIPELINE_VERSION"]`), &res); err != nil {
+ if err := json.Unmarshal([]byte(`["UNKNOWN_RESOURCE_TYPE","EXPERIMENT","JOB","PIPELINE","PIPELINE_VERSION","NAMESPACE"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
diff --git a/backend/api/go_http_client/pipeline_model/api_resource_type.go b/backend/api/go_http_client/pipeline_model/api_resource_type.go
index 363e3e9b21d..8f5fb0f47ae 100644
--- a/backend/api/go_http_client/pipeline_model/api_resource_type.go
+++ b/backend/api/go_http_client/pipeline_model/api_resource_type.go
@@ -48,6 +48,9 @@ const (
// APIResourceTypePIPELINEVERSION captures enum value "PIPELINE_VERSION"
APIResourceTypePIPELINEVERSION APIResourceType = "PIPELINE_VERSION"
+
+ // APIResourceTypeNAMESPACE captures enum value "NAMESPACE"
+ APIResourceTypeNAMESPACE APIResourceType = "NAMESPACE"
)
// for schema
@@ -55,7 +58,7 @@ var apiResourceTypeEnum []interface{}
func init() {
var res []APIResourceType
- if err := json.Unmarshal([]byte(`["UNKNOWN_RESOURCE_TYPE","EXPERIMENT","JOB","PIPELINE","PIPELINE_VERSION"]`), &res); err != nil {
+ if err := json.Unmarshal([]byte(`["UNKNOWN_RESOURCE_TYPE","EXPERIMENT","JOB","PIPELINE","PIPELINE_VERSION","NAMESPACE"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
diff --git a/backend/api/go_http_client/run_model/api_resource_type.go b/backend/api/go_http_client/run_model/api_resource_type.go
index 7e9c02d7db7..2748aa0874c 100644
--- a/backend/api/go_http_client/run_model/api_resource_type.go
+++ b/backend/api/go_http_client/run_model/api_resource_type.go
@@ -48,6 +48,9 @@ const (
// APIResourceTypePIPELINEVERSION captures enum value "PIPELINE_VERSION"
APIResourceTypePIPELINEVERSION APIResourceType = "PIPELINE_VERSION"
+
+ // APIResourceTypeNAMESPACE captures enum value "NAMESPACE"
+ APIResourceTypeNAMESPACE APIResourceType = "NAMESPACE"
)
// for schema
@@ -55,7 +58,7 @@ var apiResourceTypeEnum []interface{}
func init() {
var res []APIResourceType
- if err := json.Unmarshal([]byte(`["UNKNOWN_RESOURCE_TYPE","EXPERIMENT","JOB","PIPELINE","PIPELINE_VERSION"]`), &res); err != nil {
+ if err := json.Unmarshal([]byte(`["UNKNOWN_RESOURCE_TYPE","EXPERIMENT","JOB","PIPELINE","PIPELINE_VERSION","NAMESPACE"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
diff --git a/backend/api/resource_reference.proto b/backend/api/resource_reference.proto
index f1ba7d70b42..27b474f2d85 100644
--- a/backend/api/resource_reference.proto
+++ b/backend/api/resource_reference.proto
@@ -23,6 +23,7 @@ enum ResourceType {
JOB = 2;
PIPELINE = 3;
PIPELINE_VERSION = 4;
+ NAMESPACE = 5;
}
enum Relationship {
diff --git a/backend/api/swagger/job.swagger.json b/backend/api/swagger/job.swagger.json
index 2a485f7a4cd..1a764619e56 100644
--- a/backend/api/swagger/job.swagger.json
+++ b/backend/api/swagger/job.swagger.json
@@ -65,7 +65,8 @@
"EXPERIMENT",
"JOB",
"PIPELINE",
- "PIPELINE_VERSION"
+ "PIPELINE_VERSION",
+ "NAMESPACE"
],
"default": "UNKNOWN_RESOURCE_TYPE"
},
@@ -466,7 +467,8 @@
"EXPERIMENT",
"JOB",
"PIPELINE",
- "PIPELINE_VERSION"
+ "PIPELINE_VERSION",
+ "NAMESPACE"
],
"default": "UNKNOWN_RESOURCE_TYPE"
},
diff --git a/backend/api/swagger/kfp_api_single_file.swagger.json b/backend/api/swagger/kfp_api_single_file.swagger.json
index c5d9db2982e..93e486eba76 100644
--- a/backend/api/swagger/kfp_api_single_file.swagger.json
+++ b/backend/api/swagger/kfp_api_single_file.swagger.json
@@ -2,7 +2,7 @@
"swagger": "2.0",
"info": {
"title": "Kubeflow Pipelines API",
- "version": "0.1.20",
+ "version": "0.1.38",
"description": "This file contains REST API specification for Kubeflow Pipelines. The file is autogenerated from the swagger definition."
},
"schemes": [
@@ -66,7 +66,8 @@
"EXPERIMENT",
"JOB",
"PIPELINE",
- "PIPELINE_VERSION"
+ "PIPELINE_VERSION",
+ "NAMESPACE"
],
"default": "UNKNOWN_RESOURCE_TYPE"
},
@@ -443,7 +444,8 @@
"EXPERIMENT",
"JOB",
"PIPELINE",
- "PIPELINE_VERSION"
+ "PIPELINE_VERSION",
+ "NAMESPACE"
],
"default": "UNKNOWN_RESOURCE_TYPE"
},
@@ -654,7 +656,8 @@
"EXPERIMENT",
"JOB",
"PIPELINE",
- "PIPELINE_VERSION"
+ "PIPELINE_VERSION",
+ "NAMESPACE"
],
"default": "UNKNOWN_RESOURCE_TYPE"
},
@@ -1385,7 +1388,8 @@
"EXPERIMENT",
"JOB",
"PIPELINE",
- "PIPELINE_VERSION"
+ "PIPELINE_VERSION",
+ "NAMESPACE"
],
"default": "UNKNOWN_RESOURCE_TYPE"
},
diff --git a/backend/api/swagger/pipeline.swagger.json b/backend/api/swagger/pipeline.swagger.json
index 7b175b1e0c5..4f9315f133f 100644
--- a/backend/api/swagger/pipeline.swagger.json
+++ b/backend/api/swagger/pipeline.swagger.json
@@ -44,7 +44,8 @@
"EXPERIMENT",
"JOB",
"PIPELINE",
- "PIPELINE_VERSION"
+ "PIPELINE_VERSION",
+ "NAMESPACE"
],
"default": "UNKNOWN_RESOURCE_TYPE"
},
@@ -565,7 +566,8 @@
"EXPERIMENT",
"JOB",
"PIPELINE",
- "PIPELINE_VERSION"
+ "PIPELINE_VERSION",
+ "NAMESPACE"
],
"default": "UNKNOWN_RESOURCE_TYPE"
},
diff --git a/backend/api/swagger/run.swagger.json b/backend/api/swagger/run.swagger.json
index 77df492339e..c1fb22c9a9c 100644
--- a/backend/api/swagger/run.swagger.json
+++ b/backend/api/swagger/run.swagger.json
@@ -65,7 +65,8 @@
"EXPERIMENT",
"JOB",
"PIPELINE",
- "PIPELINE_VERSION"
+ "PIPELINE_VERSION",
+ "NAMESPACE"
],
"default": "UNKNOWN_RESOURCE_TYPE"
},
@@ -597,7 +598,8 @@
"EXPERIMENT",
"JOB",
"PIPELINE",
- "PIPELINE_VERSION"
+ "PIPELINE_VERSION",
+ "NAMESPACE"
],
"default": "UNKNOWN_RESOURCE_TYPE"
},
diff --git a/backend/metadata_writer/Dockerfile b/backend/metadata_writer/Dockerfile
new file mode 100644
index 00000000000..d9b190fe488
--- /dev/null
+++ b/backend/metadata_writer/Dockerfile
@@ -0,0 +1,8 @@
+# ml-metadata package depends on tensorflow package
+FROM python:3.7
+RUN python3 -m pip install 'kubernetes>=8.0.0,<11.0.0' 'ml-metadata==0.15.2' pyyaml --upgrade --quiet
+COPY components/license.sh components/third_party_licenses.csv /kfp/metadata_writer/
+RUN mkdir /usr/licenses && /kfp/metadata_writer/license.sh /kfp/metadata_writer/third_party_licenses.csv /usr/licenses
+
+COPY backend/metadata_writer/src/* /kfp/metadata_writer/
+CMD python3 -u /kfp/metadata_writer/metadata_writer.py
diff --git a/backend/metadata_writer/OWNERS b/backend/metadata_writer/OWNERS
new file mode 100644
index 00000000000..898f657f90b
--- /dev/null
+++ b/backend/metadata_writer/OWNERS
@@ -0,0 +1,4 @@
+approvers:
+ - Ark-kun
+reviewers:
+ - Ark-kun
diff --git a/backend/metadata_writer/src/metadata_helpers.py b/backend/metadata_writer/src/metadata_helpers.py
new file mode 100644
index 00000000000..805a7f85a69
--- /dev/null
+++ b/backend/metadata_writer/src/metadata_helpers.py
@@ -0,0 +1,385 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import sys
+import ml_metadata
+from time import sleep
+from ml_metadata.proto import metadata_store_pb2
+from ml_metadata.metadata_store import metadata_store
+
+
+def connect_to_mlmd() -> metadata_store.MetadataStore:
+ metadata_service_host = os.environ.get('METADATA_SERVICE_SERVICE_HOST', 'metadata-service')
+ metadata_service_port = int(os.environ.get('METADATA_SERVICE_SERVICE_PORT', 8080))
+
+ mlmd_connection_config = metadata_store_pb2.MetadataStoreClientConfig(
+ host=metadata_service_host,
+ port=metadata_service_port,
+ )
+
+ # Checking the connection to the Metadata store.
+ for _ in range(100):
+ try:
+ mlmd_store = metadata_store.MetadataStore(mlmd_connection_config)
+ # All get requests fail when the DB is empty, so we have to use a put request.
+ # TODO: Replace with _ = mlmd_store.get_context_types() when https://github.com/google/ml-metadata/issues/28 is fixed
+ _ = mlmd_store.put_execution_type(
+ metadata_store_pb2.ExecutionType(
+ name="DummyExecutionType",
+ )
+ )
+ return mlmd_store
+ except Exception as e:
+ print('Failed to access the Metadata store. Exception: "{}"'.format(str(e)), file=sys.stderr)
+ sys.stderr.flush()
+ sleep(1)
+
+ raise RuntimeError('Could not connect to the Metadata store.')
+
+
+def get_or_create_artifact_type(store, type_name, properties: dict = None) -> metadata_store_pb2.ArtifactType:
+ try:
+ artifact_type = store.get_artifact_type(type_name=type_name)
+ return artifact_type
+ except:
+ artifact_type = metadata_store_pb2.ArtifactType(
+ name=type_name,
+ properties=properties,
+ )
+ artifact_type.id = store.put_artifact_type(artifact_type) # Returns ID
+ return artifact_type
+
+
+def get_or_create_execution_type(store, type_name, properties: dict = None) -> metadata_store_pb2.ExecutionType:
+ try:
+ execution_type = store.get_execution_type(type_name=type_name)
+ return execution_type
+ except:
+ execution_type = metadata_store_pb2.ExecutionType(
+ name=type_name,
+ properties=properties,
+ )
+ execution_type.id = store.put_execution_type(execution_type) # Returns ID
+ return execution_type
+
+
+def get_or_create_context_type(store, type_name, properties: dict = None) -> metadata_store_pb2.ContextType:
+ try:
+ context_type = store.get_context_type(type_name=type_name)
+ return context_type
+ except:
+ context_type = metadata_store_pb2.ContextType(
+ name=type_name,
+ properties=properties,
+ )
+ context_type.id = store.put_context_type(context_type) # Returns ID
+ return context_type
+
+
+def create_artifact_with_type(
+ store,
+ uri: str,
+ type_name: str,
+ properties: dict = None,
+ type_properties: dict = None,
+) -> metadata_store_pb2.Artifact:
+ artifact_type = get_or_create_artifact_type(
+ store=store,
+ type_name=type_name,
+ properties=type_properties,
+ )
+ artifact = metadata_store_pb2.Artifact(
+ uri=uri,
+ type_id=artifact_type.id,
+ properties=properties,
+ )
+ artifact.id = store.put_artifacts([artifact])[0]
+ return artifact
+
+
+def create_execution_with_type(
+ store,
+ type_name: str,
+ properties: dict = None,
+ type_properties: dict = None,
+) -> metadata_store_pb2.Execution:
+ execution_type = get_or_create_execution_type(
+ store=store,
+ type_name=type_name,
+ properties=type_properties,
+ )
+ execution = metadata_store_pb2.Execution(
+ type_id=execution_type.id,
+ properties=properties,
+ )
+ execution.id = store.put_executions([execution])[0]
+ return execution
+
+
+def create_context_with_type(
+ store,
+ context_name: str,
+ type_name: str,
+ properties: dict = None,
+ type_properties: dict = None,
+) -> metadata_store_pb2.Context:
+ # ! Context_name must be unique
+ context_type = get_or_create_context_type(
+ store=store,
+ type_name=type_name,
+ properties=type_properties,
+ )
+ context = metadata_store_pb2.Context(
+ name=context_name,
+ type_id=context_type.id,
+ properties=properties,
+ )
+ context.id = store.put_contexts([context])[0]
+ return context
+
+
+import functools
+@functools.lru_cache(maxsize=128)
+def get_context_by_name(
+ store,
+ context_name: str,
+) -> metadata_store_pb2.Context:
+ matching_contexts = [context for context in store.get_contexts() if context.name == context_name]
+ assert len(matching_contexts) <= 1
+ if len(matching_contexts) == 0:
+ raise ValueError('Context with name "{}" was not found'.format(context_name))
+ return matching_contexts[0]
+
+
+def get_or_create_context_with_type(
+ store,
+ context_name: str,
+ type_name: str,
+ properties: dict = None,
+ type_properties: dict = None,
+) -> metadata_store_pb2.Context:
+ try:
+ context = get_context_by_name(store, context_name)
+ except:
+ context = create_context_with_type(
+ store=store,
+ context_name=context_name,
+ type_name=type_name,
+ properties=properties,
+ type_properties=type_properties,
+ )
+ return context
+
+ # Verifying that the context has the expected type name
+ context_types = store.get_context_types_by_id([context.type_id])
+ assert len(context_types) == 1
+ if context_types[0].name != type_name:
+ raise RuntimeError('Context "{}" was found, but it has type "{}" instead of "{}"'.format(context_name, context_types[0].name, type_name))
+ return context
+
+
+def create_new_execution_in_existing_context(
+ store,
+ execution_type_name: str,
+ context_id: int,
+ properties: dict = None,
+ execution_type_properties: dict = None,
+) -> metadata_store_pb2.Execution:
+ execution = create_execution_with_type(
+ store=store,
+ properties=properties,
+ type_name=execution_type_name,
+ type_properties=execution_type_properties,
+ )
+ association = metadata_store_pb2.Association(
+ execution_id=execution.id,
+ context_id=context_id,
+ )
+
+ store.put_attributions_and_associations([], [association])
+ return execution
+
+
+RUN_CONTEXT_TYPE_NAME = "KfpRun"
+KFP_EXECUTION_TYPE_NAME_PREFIX = 'components.'
+
+ARTIFACT_IO_NAME_PROPERTY_NAME = "name"
+EXECUTION_COMPONENT_ID_PROPERTY_NAME = "component_id"# ~= Task ID
+
+#TODO: Get rid of these when https://github.com/tensorflow/tfx/issues/905 and https://github.com/kubeflow/pipelines/issues/2562 are fixed
+ARTIFACT_PIPELINE_NAME_PROPERTY_NAME = "pipeline_name"
+EXECUTION_PIPELINE_NAME_PROPERTY_NAME = "pipeline_name"
+CONTEXT_PIPELINE_NAME_PROPERTY_NAME = "pipeline_name"
+ARTIFACT_RUN_ID_PROPERTY_NAME = "run_id"
+EXECUTION_RUN_ID_PROPERTY_NAME = "run_id"
+CONTEXT_RUN_ID_PROPERTY_NAME = "run_id"
+
+
+def get_or_create_run_context(
+ store,
+ run_id: str,
+) -> metadata_store_pb2.Context:
+ context = get_or_create_context_with_type(
+ store=store,
+ context_name=run_id,
+ type_name=RUN_CONTEXT_TYPE_NAME,
+ type_properties={
+ CONTEXT_PIPELINE_NAME_PROPERTY_NAME: metadata_store_pb2.STRING,
+ CONTEXT_RUN_ID_PROPERTY_NAME: metadata_store_pb2.STRING,
+ },
+ properties={
+ CONTEXT_PIPELINE_NAME_PROPERTY_NAME: metadata_store_pb2.Value(string_value=run_id),
+ CONTEXT_RUN_ID_PROPERTY_NAME: metadata_store_pb2.Value(string_value=run_id),
+ },
+ )
+ return context
+
+
+def create_new_execution_in_existing_run_context(
+ store,
+ execution_type_name: str,
+ context_id: int,
+ # TODO: Remove when UX stops relying on thsese properties
+ pipeline_name: str = None,
+ run_id: str = None,
+ instance_id: str = None,
+) -> metadata_store_pb2.Execution:
+ pipeline_name = pipeline_name or 'Context_' + str(context_id) + '_pipeline'
+ run_id = run_id or 'Context_' + str(context_id) + '_run'
+ instance_id = instance_id or execution_type_name
+ return create_new_execution_in_existing_context(
+ store=store,
+ execution_type_name=execution_type_name,
+ context_id=context_id,
+ execution_type_properties={
+ EXECUTION_PIPELINE_NAME_PROPERTY_NAME: metadata_store_pb2.STRING,
+ EXECUTION_RUN_ID_PROPERTY_NAME: metadata_store_pb2.STRING,
+ EXECUTION_COMPONENT_ID_PROPERTY_NAME: metadata_store_pb2.STRING,
+ },
+ # TODO: Remove when UX stops relying on thsese properties
+ properties={
+ EXECUTION_PIPELINE_NAME_PROPERTY_NAME: metadata_store_pb2.Value(string_value=pipeline_name), # Mistakenly used for grouping in the UX
+ EXECUTION_RUN_ID_PROPERTY_NAME: metadata_store_pb2.Value(string_value=run_id),
+ EXECUTION_COMPONENT_ID_PROPERTY_NAME: metadata_store_pb2.Value(string_value=instance_id), # should set to task ID, not component ID
+ },
+ )
+
+
+def create_new_artifact_event_and_attribution(
+ store,
+ execution_id: int,
+ context_id: int,
+ uri: str,
+ type_name: str,
+ event_type: metadata_store_pb2.Event.Type,
+ properties: dict = None,
+ artifact_type_properties: dict = None,
+ artifact_name_path: metadata_store_pb2.Event.Path = None,
+ milliseconds_since_epoch: int = None,
+) -> metadata_store_pb2.Artifact:
+ artifact = create_artifact_with_type(
+ store=store,
+ uri=uri,
+ type_name=type_name,
+ type_properties=artifact_type_properties,
+ properties=properties,
+ )
+ event = metadata_store_pb2.Event(
+ execution_id=execution_id,
+ artifact_id=artifact.id,
+ type=event_type,
+ path=artifact_name_path,
+ milliseconds_since_epoch=milliseconds_since_epoch,
+ )
+ store.put_events([event])
+
+ attribution = metadata_store_pb2.Attribution(
+ context_id=context_id,
+ artifact_id=artifact.id,
+ )
+ store.put_attributions_and_associations([attribution], [])
+
+ return artifact
+
+
+def link_execution_to_input_artifact(
+ store,
+ execution_id: int,
+ uri: str,
+ input_name: str,
+) -> metadata_store_pb2.Artifact:
+ artifacts = store.get_artifacts_by_uri(uri)
+ if len(artifacts) == 0:
+ print('Error: Not found upstream artifact with URI={}.'.format(uri), file=sys.stderr)
+ return None
+ if len(artifacts) > 1:
+ print('Error: Found multiple artifacts with the same URI. {} Using the last one..'.format(artifacts), file=sys.stderr)
+
+ artifact = artifacts[-1]
+
+ event = metadata_store_pb2.Event(
+ execution_id=execution_id,
+ artifact_id=artifact.id,
+ type=metadata_store_pb2.Event.INPUT,
+ path=metadata_store_pb2.Event.Path(
+ steps=[
+ metadata_store_pb2.Event.Path.Step(
+ key=input_name,
+ ),
+ ]
+ ),
+ )
+ store.put_events([event])
+ return artifact
+
+
+def create_new_output_artifact(
+ store,
+ execution_id: int,
+ context_id: int,
+ uri: str,
+ type_name: str,
+ output_name: str,
+ run_id: str = None,
+) -> metadata_store_pb2.Artifact:
+ properties = {
+ ARTIFACT_IO_NAME_PROPERTY_NAME: metadata_store_pb2.Value(string_value=output_name),
+ }
+ if run_id:
+ properties[ARTIFACT_PIPELINE_NAME_PROPERTY_NAME] = metadata_store_pb2.Value(string_value=str(run_id))
+ properties[ARTIFACT_RUN_ID_PROPERTY_NAME] = metadata_store_pb2.Value(string_value=str(run_id))
+ return create_new_artifact_event_and_attribution(
+ store=store,
+ execution_id=execution_id,
+ context_id=context_id,
+ uri=uri,
+ type_name=type_name,
+ event_type=metadata_store_pb2.Event.OUTPUT,
+ artifact_name_path=metadata_store_pb2.Event.Path(
+ steps=[
+ metadata_store_pb2.Event.Path.Step(
+ key=output_name,
+ #index=0,
+ ),
+ ]
+ ),
+ properties=properties,
+ artifact_type_properties={
+ ARTIFACT_IO_NAME_PROPERTY_NAME: metadata_store_pb2.STRING,
+ ARTIFACT_PIPELINE_NAME_PROPERTY_NAME: metadata_store_pb2.STRING,
+ ARTIFACT_RUN_ID_PROPERTY_NAME: metadata_store_pb2.STRING,
+ },
+ #milliseconds_since_epoch=int(datetime.now(timezone.utc).timestamp() * 1000), # Happens automatically
+ )
diff --git a/backend/metadata_writer/src/metadata_writer.py b/backend/metadata_writer/src/metadata_writer.py
new file mode 100644
index 00000000000..816cfb0bcfd
--- /dev/null
+++ b/backend/metadata_writer/src/metadata_writer.py
@@ -0,0 +1,323 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+import hashlib
+import os
+import sys
+import kubernetes
+import yaml
+from time import sleep
+
+from metadata_helpers import *
+
+
+namespace_to_watch = os.environ.get('NAMESPACE_TO_WATCH', 'default')
+
+
+kubernetes.config.load_incluster_config()
+k8s_api = kubernetes.client.CoreV1Api()
+k8s_watch = kubernetes.watch.Watch()
+
+
+patch_retries = 20
+sleep_time = 0.1
+
+
+def patch_pod_metadata(
+ namespace: str,
+ pod_name: str,
+ patch: dict,
+ k8s_api: kubernetes.client.CoreV1Api = None,
+):
+ k8s_api = k8s_api or kubernetes.client.CoreV1Api()
+ patch = {
+ 'metadata': patch
+ }
+ for retry in range(patch_retries):
+ try:
+ pod = k8s_api.patch_namespaced_pod(
+ name=pod_name,
+ namespace=namespace,
+ body=patch,
+ )
+ return pod
+ except Exception as e:
+ print(e)
+ sleep(sleep_time)
+
+
+#Connecting to MetadataDB
+mlmd_store = connect_to_mlmd()
+print("Connected to the metadata store")
+
+
+ARGO_OUTPUTS_ANNOTATION_KEY = 'workflows.argoproj.io/outputs'
+ARGO_TEMPLATE_ANNOTATION_KEY = 'workflows.argoproj.io/template'
+KFP_COMPONENT_SPEC_ANNOTATION_KEY = 'pipelines.kubeflow.org/component_spec'
+METADATA_EXECUTION_ID_LABEL_KEY = 'pipelines.kubeflow.org/metadata_execution_id'
+METADATA_CONTEXT_ID_LABEL_KEY = 'pipelines.kubeflow.org/metadata_context_id'
+METADATA_ARTIFACT_IDS_ANNOTATION_KEY = 'pipelines.kubeflow.org/metadata_artifact_ids'
+METADATA_INPUT_ARTIFACT_IDS_ANNOTATION_KEY = 'pipelines.kubeflow.org/metadata_input_artifact_ids'
+METADATA_OUTPUT_ARTIFACT_IDS_ANNOTATION_KEY = 'pipelines.kubeflow.org/metadata_output_artifact_ids'
+
+ARGO_WORKFLOW_LABEL_KEY = 'workflows.argoproj.io/workflow'
+ARGO_COMPLETED_LABEL_KEY = 'workflows.argoproj.io/completed'
+METADATA_WRITTEN_LABEL_KEY = 'pipelines.kubeflow.org/metadata_written'
+
+
+def output_name_to_argo(name: str) -> str:
+ import re
+ return re.sub('-+', '-', re.sub('[^-0-9a-z]+', '-', name.lower())).strip('-')
+
+
+def argo_artifact_to_uri(artifact: dict) -> str:
+ if 's3' in artifact:
+ s3_artifact = artifact['s3']
+ return 'minio://{bucket}/{key}'.format(
+ bucket=s3_artifact.get('bucket', ''),
+ key=s3_artifact.get('key', ''),
+ )
+ elif 'raw' in artifact:
+ return None
+ else:
+ return None
+
+
+def is_tfx_pod(pod) -> bool:
+ main_containers = [container for container in pod.spec.containers if container.name == 'main']
+ if len(main_containers) != 1:
+ return False
+ main_container = main_containers[0]
+ return main_container.command and main_container.command[-1].endswith('tfx/orchestration/kubeflow/container_entrypoint.py')
+
+
+# Caches (not expected to be persistent)
+# These caches are only used to prevent race conditions. Race conditions happen because the writer can see multiple versions of K8s object before the applied labels show up.
+# They are expected to be lost when restarting the service.
+# The operation of the Metadata Writer remains correct even if it's getting restarted frequently. (Kubernetes only sends the latest version of resource for new watchers.)
+# Technically, we could remove the objects from cache as soon as we see that our labels have been applied successfully.
+pod_name_to_execution_id = {}
+workflow_name_to_context_id = {}
+pods_with_written_metadata = set()
+
+while True:
+ print("Start watching Kubernetes Pods created by Argo")
+ for event in k8s_watch.stream(
+ k8s_api.list_namespaced_pod,
+ namespace=namespace_to_watch,
+ label_selector=ARGO_WORKFLOW_LABEL_KEY,
+ ):
+ try:
+ obj = event['object']
+ print('Kubernetes Pod event: ', event['type'], obj.metadata.name, obj.metadata.resource_version)
+ if event['type'] == 'ERROR':
+ print(event)
+
+ # Logging pod changes for debugging
+ with open('/tmp/pod_' + obj.metadata.name + '_' + obj.metadata.resource_version, 'w') as f:
+ f.write(yaml.dump(obj.to_dict()))
+
+ assert obj.kind == 'Pod'
+
+ if METADATA_WRITTEN_LABEL_KEY in obj.metadata.labels:
+ continue
+
+ # Skip TFX pods - they have their own metadata writers
+ if is_tfx_pod(obj):
+ continue
+
+ argo_workflow_name = obj.metadata.labels[ARGO_WORKFLOW_LABEL_KEY] # Should exist due to initial filtering
+ argo_template = json.loads(obj.metadata.annotations[ARGO_TEMPLATE_ANNOTATION_KEY])
+ argo_template_name = argo_template['name']
+
+ component_name = argo_template_name
+ component_version = component_name
+ argo_output_name_to_type = {}
+ if KFP_COMPONENT_SPEC_ANNOTATION_KEY in obj.metadata.annotations:
+ component_spec_text = obj.metadata.annotations[KFP_COMPONENT_SPEC_ANNOTATION_KEY]
+ component_spec = json.loads(component_spec_text)
+ component_spec_digest = hashlib.sha256(component_spec_text.encode()).hexdigest()
+ component_name = component_spec.get('name', component_name)
+ component_version = component_name + '@sha256=' + component_spec_digest
+ output_name_to_type = {output['name']: output.get('type', None) for output in component_spec.get('outputs', [])}
+ argo_output_name_to_type = {output_name_to_argo(k): v for k, v in output_name_to_type.items() if v}
+
+ if obj.metadata.name in pod_name_to_execution_id:
+ execution_id = pod_name_to_execution_id[obj.metadata.name]
+ context_id = workflow_name_to_context_id[argo_workflow_name]
+ elif METADATA_EXECUTION_ID_LABEL_KEY in obj.metadata.labels:
+ execution_id = int(obj.metadata.labels[METADATA_EXECUTION_ID_LABEL_KEY])
+ context_id = int(obj.metadata.labels[METADATA_CONTEXT_ID_LABEL_KEY])
+ print('Found execution id: {}, context id: {} for pod {}.'.format(execution_id, context_id, obj.metadata.name))
+ else:
+ run_context = get_or_create_run_context(
+ store=mlmd_store,
+ run_id=argo_workflow_name, # We can switch to internal run IDs once backend starts adding them
+ )
+
+ # Adding new execution to the database
+ execution = create_new_execution_in_existing_run_context(
+ store=mlmd_store,
+ context_id=run_context.id,
+ execution_type_name=KFP_EXECUTION_TYPE_NAME_PREFIX + component_version,
+ pipeline_name=argo_workflow_name,
+ run_id=argo_workflow_name,
+ instance_id=component_name,
+ )
+
+ argo_input_artifacts = argo_template.get('inputs', {}).get('artifacts', [])
+ input_artifact_ids = []
+ for argo_artifact in argo_input_artifacts:
+ artifact_uri = argo_artifact_to_uri(argo_artifact)
+ if not artifact_uri:
+ continue
+
+ input_name = argo_artifact.get('path', '') # Every artifact should have a path in Argo
+ input_artifact_path_prefix = '/tmp/inputs/'
+ input_artifact_path_postfix = '/data'
+ if input_name.startswith(input_artifact_path_prefix):
+ input_name = input_name[len(input_artifact_path_prefix):]
+ if input_name.endswith(input_artifact_path_postfix):
+ input_name = input_name[0: -len(input_artifact_path_postfix)]
+
+ artifact = link_execution_to_input_artifact(
+ store=mlmd_store,
+ execution_id=execution.id,
+ uri=artifact_uri,
+ input_name=input_name,
+ )
+ if artifact is None:
+ # TODO: Maybe there is a better way to handle missing upstream artifacts
+ continue
+
+ input_artifact_ids.append(dict(
+ id=artifact.id,
+ name=input_name,
+ uri=artifact.uri,
+ ))
+ print('Found Input Artifact: ' + str(dict(
+ input_name=input_name,
+ id=artifact.id,
+ uri=artifact.uri,
+ )))
+
+ execution_id = execution.id
+ context_id = run_context.id
+
+ obj.metadata.labels[METADATA_EXECUTION_ID_LABEL_KEY] = execution_id
+ obj.metadata.labels[METADATA_CONTEXT_ID_LABEL_KEY] = context_id
+
+ metadata_to_add = {
+ 'labels': {
+ METADATA_EXECUTION_ID_LABEL_KEY: str(execution_id),
+ METADATA_CONTEXT_ID_LABEL_KEY: str(context_id),
+ },
+ 'annotations': {
+ METADATA_INPUT_ARTIFACT_IDS_ANNOTATION_KEY: json.dumps(input_artifact_ids),
+ },
+ }
+
+ patch_pod_metadata(
+ namespace=obj.metadata.namespace,
+ pod_name=obj.metadata.name,
+ patch=metadata_to_add,
+ )
+ pod_name_to_execution_id[obj.metadata.name] = execution_id
+ workflow_name_to_context_id[argo_workflow_name] = context_id
+
+ print('New execution id: {}, context id: {} for pod {}.'.format(execution_id, context_id, obj.metadata.name))
+
+ print('Execution: ' + str(dict(
+ context_id=context_id,
+ context_name=argo_workflow_name,
+ execution_id=execution_id,
+ execution_name=obj.metadata.name,
+ component_name=component_name,
+ )))
+
+ # TODO: Log input parameters as execution options.
+ # Unfortunately, DSL compiler loses the information about inputs and their arguments.
+
+ if (
+ obj.metadata.name not in pods_with_written_metadata
+ and (
+ obj.metadata.labels.get(ARGO_COMPLETED_LABEL_KEY, 'false') == 'true'
+ or ARGO_OUTPUTS_ANNOTATION_KEY in obj.metadata.annotations
+ )
+ ):
+ artifact_ids = []
+
+ if ARGO_OUTPUTS_ANNOTATION_KEY in obj.metadata.annotations: # Should be present
+ argo_outputs = json.loads(obj.metadata.annotations[ARGO_OUTPUTS_ANNOTATION_KEY])
+ argo_output_artifacts = {}
+
+ for artifact in argo_outputs.get('artifacts', []):
+ art_name = artifact['name']
+ output_prefix = argo_template_name + '-'
+ if art_name.startswith(output_prefix):
+ art_name = art_name[len(output_prefix):]
+ argo_output_artifacts[art_name] = artifact
+
+ output_artifacts = []
+ for name, art in argo_output_artifacts.items():
+ artifact_uri = argo_artifact_to_uri(art)
+ if not artifact_uri:
+ continue
+ artifact_type_name = argo_output_name_to_type.get(name, 'NoType') # Cannot be None or ''
+
+ print('Adding Output Artifact: ' + str(dict(
+ output_name=name,
+ uri=artifact_uri,
+ type=artifact_type_name,
+ )))
+
+ artifact = create_new_output_artifact(
+ store=mlmd_store,
+ execution_id=execution_id,
+ context_id=context_id,
+ uri=artifact_uri,
+ type_name=artifact_type_name,
+ output_name=name,
+ #run_id='Context_' + str(context_id) + '_run',
+ run_id=argo_workflow_name,
+ )
+
+ artifact_ids.append(dict(
+ id=artifact.id,
+ name=name,
+ uri=artifact_uri,
+ type=artifact_type_name,
+ ))
+
+ metadata_to_add = {
+ 'labels': {
+ METADATA_WRITTEN_LABEL_KEY: 'true',
+ },
+ 'annotations': {
+ METADATA_OUTPUT_ARTIFACT_IDS_ANNOTATION_KEY: json.dumps(artifact_ids),
+ },
+ }
+
+ patch_pod_metadata(
+ namespace=obj.metadata.namespace,
+ pod_name=obj.metadata.name,
+ patch=metadata_to_add,
+ )
+
+ pods_with_written_metadata.add(obj.metadata.name)
+
+ except Exception as e:
+ import traceback
+ print(traceback.format_exc())
diff --git a/backend/src/apiserver/BUILD.bazel b/backend/src/apiserver/BUILD.bazel
index eaa254f7a64..beb2b7a3e50 100644
--- a/backend/src/apiserver/BUILD.bazel
+++ b/backend/src/apiserver/BUILD.bazel
@@ -19,7 +19,6 @@ go_library(
"//backend/src/apiserver/storage:go_default_library",
"//backend/src/common/util:go_default_library",
"//backend/src/crd/pkg/client/clientset/versioned/typed/scheduledworkflow/v1beta1:go_default_library",
- "@com_github_argoproj_argo//pkg/client/clientset/versioned/typed/workflow/v1alpha1:go_default_library",
"@com_github_cenkalti_backoff//:go_default_library",
"@com_github_fsnotify_fsnotify//:go_default_library",
"@com_github_golang_glog//:go_default_library",
diff --git a/backend/src/apiserver/client/BUILD.bazel b/backend/src/apiserver/client/BUILD.bazel
index 25f6be6c30d..7a8a07a76f6 100644
--- a/backend/src/apiserver/client/BUILD.bazel
+++ b/backend/src/apiserver/client/BUILD.bazel
@@ -1,19 +1,28 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library")
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
+ "argo.go",
+ "argo_fake.go",
+ "kfam.go",
+ "kfam_fake.go",
"minio.go",
- "pod.go",
+ "kubernetes_core.go",
+ "kubernetes_core_fake.go",
+ "pod_fake.go",
"scheduled_workflow.go",
"sql.go",
- "workflow.go",
+ "workflow_fake.go",
],
importpath = "github.com/kubeflow/pipelines/backend/src/apiserver/client",
visibility = ["//visibility:public"],
deps = [
+ "//backend/src/apiserver/common:go_default_library",
+ "//backend/src/common/util:go_default_library",
"//backend/src/crd/pkg/client/clientset/versioned:go_default_library",
"//backend/src/crd/pkg/client/clientset/versioned/typed/scheduledworkflow/v1beta1:go_default_library",
+ "@com_github_argoproj_argo//pkg/apis/workflow/v1alpha1:go_default_library",
"@com_github_argoproj_argo//pkg/client/clientset/versioned:go_default_library",
"@com_github_argoproj_argo//pkg/client/clientset/versioned/typed/workflow/v1alpha1:go_default_library",
"@com_github_cenkalti_backoff//:go_default_library",
@@ -21,8 +30,21 @@ go_library(
"@com_github_golang_glog//:go_default_library",
"@com_github_minio_minio_go//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
+ "@io_k8s_api//core/v1:go_default_library",
+ "@io_k8s_api//policy/v1beta1:go_default_library",
+ "@io_k8s_apimachinery//pkg/apis/meta/v1:go_default_library",
+ "@io_k8s_apimachinery//pkg/types:go_default_library",
+ "@io_k8s_apimachinery//pkg/watch:go_default_library",
"@io_k8s_client_go//kubernetes:go_default_library",
"@io_k8s_client_go//kubernetes/typed/core/v1:go_default_library",
"@io_k8s_client_go//rest:go_default_library",
],
)
+
+go_test(
+ name = "go_default_test",
+ srcs = ["kfam_test.go"],
+ data = glob(["test/**/*"]), # keep
+ embed = [":go_default_library"],
+ deps = ["@com_github_stretchr_testify//assert:go_default_library"],
+)
diff --git a/backend/src/apiserver/client/argo.go b/backend/src/apiserver/client/argo.go
new file mode 100644
index 00000000000..6fe15530fb2
--- /dev/null
+++ b/backend/src/apiserver/client/argo.go
@@ -0,0 +1,67 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package client
+
+import (
+ "time"
+
+ argoclient "github.com/argoproj/argo/pkg/client/clientset/versioned"
+ argoprojv1alpha1 "github.com/argoproj/argo/pkg/client/clientset/versioned/typed/workflow/v1alpha1"
+ "github.com/cenkalti/backoff"
+ "github.com/golang/glog"
+ "github.com/kubeflow/pipelines/backend/src/apiserver/common"
+ "github.com/pkg/errors"
+ "k8s.io/client-go/rest"
+)
+
+const (
+ PodNamespace = "POD_NAMESPACE"
+)
+
+type ArgoClientInterface interface {
+ Workflow(namespace string) argoprojv1alpha1.WorkflowInterface
+}
+
+type ArgoClient struct {
+ argoProjClient argoprojv1alpha1.ArgoprojV1alpha1Interface
+}
+
+func (argoClient *ArgoClient) Workflow(namespace string) argoprojv1alpha1.WorkflowInterface {
+ if len(namespace) == 0 {
+ namespace = common.GetStringConfig(PodNamespace)
+ }
+ return argoClient.argoProjClient.Workflows(namespace)
+}
+
+func NewArgoClientOrFatal(initConnectionTimeout time.Duration) *ArgoClient {
+ var argoProjClient argoprojv1alpha1.ArgoprojV1alpha1Interface
+ var operation = func() error {
+ restConfig, err := rest.InClusterConfig()
+ if err != nil {
+ return errors.Wrap(err, "Failed to initialize the RestConfig")
+ }
+ argoProjClient = argoclient.NewForConfigOrDie(restConfig).ArgoprojV1alpha1()
+ return nil
+ }
+
+ b := backoff.NewExponentialBackOff()
+ b.MaxElapsedTime = initConnectionTimeout
+ err := backoff.Retry(operation, b)
+
+ if err != nil {
+ glog.Fatalf("Failed to create ArgoClient. Error: %v", err)
+ }
+ return &ArgoClient{argoProjClient}
+}
diff --git a/backend/src/apiserver/client/argo_fake.go b/backend/src/apiserver/client/argo_fake.go
new file mode 100644
index 00000000000..597d9715f87
--- /dev/null
+++ b/backend/src/apiserver/client/argo_fake.go
@@ -0,0 +1,70 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package client
+
+import (
+ argoprojv1alpha1 "github.com/argoproj/argo/pkg/client/clientset/versioned/typed/workflow/v1alpha1"
+ "github.com/pkg/errors"
+)
+
+type FakeArgoClient struct {
+ workflowClientFake *FakeWorkflowClient
+}
+
+func NewFakeArgoClient() *FakeArgoClient {
+ return &FakeArgoClient{NewWorkflowClientFake()}
+}
+
+func (c *FakeArgoClient) Workflow(namespace string) argoprojv1alpha1.WorkflowInterface {
+ return c.workflowClientFake
+}
+
+func (c *FakeArgoClient) GetWorkflowCount() int {
+ return len(c.workflowClientFake.workflows)
+}
+
+func (c *FakeArgoClient) GetWorkflowKeys() map[string]bool {
+ result := map[string]bool{}
+ for key := range c.workflowClientFake.workflows {
+ result[key] = true
+ }
+ return result
+}
+
+func (c *FakeArgoClient) IsTerminated(name string) (bool, error) {
+ workflow, ok := c.workflowClientFake.workflows[name]
+ if !ok {
+ return false, errors.New("No workflow found with name: " + name)
+ }
+
+ activeDeadlineSeconds := workflow.Spec.ActiveDeadlineSeconds
+ if activeDeadlineSeconds == nil {
+ return false, errors.New("No ActiveDeadlineSeconds found in workflow with name: " + name)
+ }
+
+ return *activeDeadlineSeconds == 0, nil
+}
+
+type FakeArgoClientWithBadWorkflow struct {
+ workflowClientFake *FakeBadWorkflowClient
+}
+
+func NewFakeArgoClientWithBadWorkflow() *FakeArgoClientWithBadWorkflow {
+ return &FakeArgoClientWithBadWorkflow{&FakeBadWorkflowClient{}}
+}
+
+func (c *FakeArgoClientWithBadWorkflow) Workflow(namespace string) argoprojv1alpha1.WorkflowInterface {
+ return c.workflowClientFake
+}
diff --git a/backend/src/apiserver/client/kfam.go b/backend/src/apiserver/client/kfam.go
new file mode 100644
index 00000000000..e47bf70378a
--- /dev/null
+++ b/backend/src/apiserver/client/kfam.go
@@ -0,0 +1,100 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package client
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "time"
+
+ "github.com/kubeflow/pipelines/backend/src/common/util"
+ "github.com/pkg/errors"
+)
+
+type KFAMClientInterface interface {
+ IsAuthorized(userIdentity string, namespace string) (bool, error)
+}
+
+type KFAMClient struct {
+ kfamServiceUrl string
+}
+
+type User struct {
+ Kind string
+ Name string
+}
+
+type RoleRef struct {
+ ApiGroup string
+ Kind string
+ Name string
+}
+
+type Binding struct {
+ User User
+ ReferredNamespace string
+ RoleRef RoleRef
+}
+
+type Bindings struct {
+ Bindings []Binding
+}
+
+const (
+ HTTP_TIMEOUT_SECONDS = 10
+)
+
+func (c *KFAMClient) IsAuthorized(userIdentity string, namespace string) (bool, error) {
+ req, err := http.NewRequest("GET", c.kfamServiceUrl, nil)
+ if err != nil {
+ return false, util.NewInternalServerError(err, "Failed to create a KFAM http request.")
+ }
+ q := req.URL.Query()
+ q.Add("user", userIdentity)
+ req.URL.RawQuery = q.Encode()
+
+ var httpClient = &http.Client{Timeout: HTTP_TIMEOUT_SECONDS * time.Second}
+
+ resp, err := httpClient.Get(req.URL.String())
+ if err != nil {
+ return false, util.NewInternalServerError(err, "Failed to connect to the KFAM service.")
+ }
+ if resp.StatusCode != http.StatusOK {
+ return false, util.NewInternalServerError(errors.New("Requests to the KFAM service failed."), resp.Status)
+ }
+ defer resp.Body.Close()
+
+ jsonBindings := new(Bindings)
+ err = json.NewDecoder(resp.Body).Decode(jsonBindings)
+
+ if err != nil {
+ return false, util.NewInternalServerError(err, "Failed to parse KFAM response.")
+ }
+
+ nsFound := false
+ for _, jsonBinding := range jsonBindings.Bindings {
+ if jsonBinding.ReferredNamespace == namespace {
+ nsFound = true
+ break
+ }
+ }
+ return nsFound, nil
+}
+
+func NewKFAMClient(kfamServiceHost string, kfamServicePort string) *KFAMClient {
+ kfamServiceUrl := fmt.Sprintf("http://%s:%s/kfam/v1/bindings", kfamServiceHost, kfamServicePort)
+ return &KFAMClient{kfamServiceUrl}
+}
diff --git a/backend/src/apiserver/client/kfam_fake.go b/backend/src/apiserver/client/kfam_fake.go
new file mode 100644
index 00000000000..f7cc606f4bd
--- /dev/null
+++ b/backend/src/apiserver/client/kfam_fake.go
@@ -0,0 +1,37 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package client
+
+type FakeKFAMClientAuthorized struct {
+}
+
+func NewFakeKFAMClientAuthorized() *FakeKFAMClientAuthorized {
+ return &FakeKFAMClientAuthorized{}
+}
+
+func (c *FakeKFAMClientAuthorized) IsAuthorized(userIdentity string, namespace string) (bool, error) {
+ return true, nil
+}
+
+type FakeKFAMClientUnauthorized struct {
+}
+
+func NewFakeKFAMClientUnauthorized() *FakeKFAMClientUnauthorized {
+ return &FakeKFAMClientUnauthorized{}
+}
+
+func (c *FakeKFAMClientUnauthorized) IsAuthorized(userIdentity string, namespace string) (bool, error) {
+ return false, nil
+}
\ No newline at end of file
diff --git a/backend/src/apiserver/client/kfam_test.go b/backend/src/apiserver/client/kfam_test.go
new file mode 100644
index 00000000000..8dd61b0770f
--- /dev/null
+++ b/backend/src/apiserver/client/kfam_test.go
@@ -0,0 +1,46 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package client
+import(
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestIsAuthorize(t *testing.T) {
+ expect_response := []byte (`{"bindings":[{"user": {"kind": "User","name": "userA@google.com"},"referredNamespace": "nsA","RoleRef": {"apiGroup": "","kind": "ClusterRole", "name":"edit"}},{"user": {"kind": "User","name": "userA@google.com"},"referredNamespace": "nsB","RoleRef": {"apiGroup": "","kind": "ClusterRole", "name":"admin"}}]}`)
+ srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(200)
+ w.Write(expect_response)
+ }))
+ defer srv.Close()
+ fmt.Println(srv.URL)
+ kfam_client := NewKFAMClient("","")
+ kfam_client.kfamServiceUrl = srv.URL
+ authorized, err := kfam_client.IsAuthorized("user", "nsA")
+ assert.Nil(t, err)
+ assert.True(t, authorized)
+
+ authorized, err = kfam_client.IsAuthorized("user", "nsB")
+ assert.Nil(t, err)
+ assert.True(t, authorized)
+
+ authorized, err = kfam_client.IsAuthorized("user", "nsC")
+ assert.Nil(t, err)
+ assert.False(t, authorized)
+}
\ No newline at end of file
diff --git a/backend/src/apiserver/client/pod.go b/backend/src/apiserver/client/kubernetes_core.go
similarity index 56%
rename from backend/src/apiserver/client/pod.go
rename to backend/src/apiserver/client/kubernetes_core.go
index 637c7e9e93d..5e1282906a4 100644
--- a/backend/src/apiserver/client/pod.go
+++ b/backend/src/apiserver/client/kubernetes_core.go
@@ -10,7 +10,19 @@ import (
"time"
)
-func createPodClient(namespace string) (v1.PodInterface, error) {
+type KubernetesCoreInterface interface {
+ PodClient(namespace string) v1.PodInterface
+}
+
+type KubernetesCore struct {
+ coreV1Client v1.CoreV1Interface
+}
+
+func (c *KubernetesCore) PodClient(namespace string) v1.PodInterface {
+ return c.coreV1Client.Pods(namespace)
+}
+
+func createKubernetesCore() (KubernetesCoreInterface, error) {
restConfig, err := rest.InClusterConfig()
if err != nil {
return nil, errors.Wrap(err, "Failed to initialize kubernetes client.")
@@ -20,15 +32,15 @@ func createPodClient(namespace string) (v1.PodInterface, error) {
if err != nil {
return nil, errors.Wrap(err, "Failed to initialize kubernetes client set.")
}
- return clientSet.CoreV1().Pods(namespace), nil
+ return &KubernetesCore{clientSet.CoreV1()}, nil
}
-// CreatePodClientOrFatal creates a new client for the Kubernetes pod.
-func CreatePodClientOrFatal(namespace string, initConnectionTimeout time.Duration) v1.PodInterface{
- var client v1.PodInterface
+// CreateKubernetesCoreOrFatal creates a new client for the Kubernetes pod.
+func CreateKubernetesCoreOrFatal(initConnectionTimeout time.Duration) KubernetesCoreInterface {
+ var client KubernetesCoreInterface
var err error
var operation = func() error {
- client, err = createPodClient(namespace)
+ client, err = createKubernetesCore()
if err != nil {
return err
}
diff --git a/backend/src/apiserver/client/kubernetes_core_fake.go b/backend/src/apiserver/client/kubernetes_core_fake.go
new file mode 100644
index 00000000000..2c40849ebda
--- /dev/null
+++ b/backend/src/apiserver/client/kubernetes_core_fake.go
@@ -0,0 +1,43 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package client
+
+import (
+ v1 "k8s.io/client-go/kubernetes/typed/core/v1"
+)
+
+type FakeKuberneteCoreClient struct {
+ podClientFake *FakePodClient
+}
+
+func (c *FakeKuberneteCoreClient) PodClient(namespace string) v1.PodInterface {
+ return c.podClientFake
+}
+
+func NewFakeKuberneteCoresClient() *FakeKuberneteCoreClient {
+ return &FakeKuberneteCoreClient{&FakePodClient{}}
+}
+
+type FakeKubernetesCoreClientWithBadPodClient struct {
+ podClientFake *FakeBadPodClient
+}
+
+func NewFakeKubernetesCoreClientWithBadPodClient() *FakeKubernetesCoreClientWithBadPodClient {
+ return &FakeKubernetesCoreClientWithBadPodClient{&FakeBadPodClient{}}
+}
+
+func (c *FakeKubernetesCoreClientWithBadPodClient) PodClient(namespace string) v1.PodInterface {
+ return c.podClientFake
+}
diff --git a/backend/src/apiserver/resource/pod_fake.go b/backend/src/apiserver/client/pod_fake.go
similarity index 99%
rename from backend/src/apiserver/resource/pod_fake.go
rename to backend/src/apiserver/client/pod_fake.go
index cfe89a3e08a..68b87c3a938 100644
--- a/backend/src/apiserver/resource/pod_fake.go
+++ b/backend/src/apiserver/client/pod_fake.go
@@ -1,4 +1,4 @@
-package resource
+package client
import (
"errors"
diff --git a/backend/src/apiserver/client/sql.go b/backend/src/apiserver/client/sql.go
index a7a93bce7bf..b973d8dc8db 100644
--- a/backend/src/apiserver/client/sql.go
+++ b/backend/src/apiserver/client/sql.go
@@ -21,18 +21,25 @@ import (
)
func CreateMySQLConfig(user, password string, mysqlServiceHost string,
- mysqlServicePort string, dbName string, mysqlGroupConcatMaxLen string) *mysql.Config {
+ mysqlServicePort string, dbName string, mysqlGroupConcatMaxLen string, mysqlExtraParams map[string]string) *mysql.Config {
+
+ params := map[string]string{
+ "charset": "utf8",
+ "parseTime": "True",
+ "loc": "Local",
+ "group_concat_max_len": mysqlGroupConcatMaxLen,
+ }
+
+ for k, v := range mysqlExtraParams {
+ params[k] = v
+ }
+
return &mysql.Config{
User: user,
Passwd: password,
Net: "tcp",
Addr: fmt.Sprintf("%s:%s", mysqlServiceHost, mysqlServicePort),
- Params: map[string]string{
- "charset": "utf8",
- "parseTime": "True",
- "loc": "Local",
- "group_concat_max_len": mysqlGroupConcatMaxLen,
- },
+ Params: params,
DBName: dbName,
AllowNativePasswords: true,
}
diff --git a/backend/src/apiserver/client/sql_test.go b/backend/src/apiserver/client/sql_test.go
new file mode 100644
index 00000000000..81d19cc8b5d
--- /dev/null
+++ b/backend/src/apiserver/client/sql_test.go
@@ -0,0 +1,81 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package client
+
+import (
+ "reflect"
+ "testing"
+
+ "github.com/go-sql-driver/mysql"
+)
+
+func TestCreateMySQLConfig(t *testing.T) {
+ type args struct {
+ user string
+ password string
+ host string
+ port string
+ dbName string
+ mysqlGroupConcatMaxLen string
+ mysqlExtraParams map[string]string
+ }
+ tests := []struct {
+ name string
+ args args
+ want *mysql.Config
+ }{
+ {
+ name: "default config",
+ args: args{
+ user: "root",
+ host: "mysql",
+ port: "3306",
+ mysqlGroupConcatMaxLen: "1024",
+ mysqlExtraParams: nil,
+ },
+ want: &mysql.Config{
+ User: "root",
+ Net: "tcp",
+ Addr: "mysql:3306",
+ Params: map[string]string{"charset": "utf8", "parseTime": "True", "loc": "Local", "group_concat_max_len": "1024"},
+ AllowNativePasswords: true,
+ },
+ },
+ {
+ name: "extra parameters",
+ args: args{
+ user: "root",
+ host: "mysql",
+ port: "3306",
+ mysqlGroupConcatMaxLen: "1024",
+ mysqlExtraParams: map[string]string{"tls": "true"},
+ },
+ want: &mysql.Config{
+ User: "root",
+ Net: "tcp",
+ Addr: "mysql:3306",
+ Params: map[string]string{"charset": "utf8", "parseTime": "True", "loc": "Local", "group_concat_max_len": "1024", "tls": "true"},
+ AllowNativePasswords: true,
+ },
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if got := CreateMySQLConfig(tt.args.user, tt.args.password, tt.args.host, tt.args.port, tt.args.dbName, tt.args.mysqlGroupConcatMaxLen, tt.args.mysqlExtraParams); !reflect.DeepEqual(got, tt.want) {
+ t.Errorf("CreateMySQLConfig() = %#v, want %v", got, tt.want)
+ }
+ })
+ }
+}
diff --git a/backend/src/apiserver/client/workflow.go b/backend/src/apiserver/client/workflow.go
deleted file mode 100644
index 6ff692eb888..00000000000
--- a/backend/src/apiserver/client/workflow.go
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package client
-
-import (
- "time"
-
- argoclient "github.com/argoproj/argo/pkg/client/clientset/versioned"
- "github.com/argoproj/argo/pkg/client/clientset/versioned/typed/workflow/v1alpha1"
- "github.com/cenkalti/backoff"
- "github.com/golang/glog"
- "github.com/pkg/errors"
- "k8s.io/client-go/rest"
-)
-
-func CreateWorkflowClient(namespace string) (v1alpha1.WorkflowInterface, error) {
- restConfig, err := rest.InClusterConfig()
- if err != nil {
- return nil, errors.Wrap(err, "Failed to initialize workflow client.")
- }
- wfClientSet := argoclient.NewForConfigOrDie(restConfig)
- wfClient := wfClientSet.ArgoprojV1alpha1().Workflows(namespace)
- return wfClient, nil
-}
-
-// creates a new client for the Kubernetes Workflow CRD.
-func CreateWorkflowClientOrFatal(namespace string, initConnectionTimeout time.Duration) v1alpha1.WorkflowInterface {
- var wfClient v1alpha1.WorkflowInterface
- var err error
- var operation = func() error {
- wfClient, err = CreateWorkflowClient(namespace)
- if err != nil {
- return err
- }
- return nil
- }
- b := backoff.NewExponentialBackOff()
- b.MaxElapsedTime = initConnectionTimeout
- err = backoff.Retry(operation, b)
-
- if err != nil {
- glog.Fatalf("Failed to create workflow client. Error: %v", err)
- }
- return wfClient
-}
diff --git a/backend/src/apiserver/resource/workflow_fake.go b/backend/src/apiserver/client/workflow_fake.go
similarity index 83%
rename from backend/src/apiserver/resource/workflow_fake.go
rename to backend/src/apiserver/client/workflow_fake.go
index 257a5bcc546..25992bef655 100644
--- a/backend/src/apiserver/resource/workflow_fake.go
+++ b/backend/src/apiserver/client/workflow_fake.go
@@ -12,13 +12,14 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-package resource
+package client
import (
"encoding/json"
- "github.com/kubeflow/pipelines/backend/src/common/util"
"strconv"
+ "github.com/kubeflow/pipelines/backend/src/common/util"
+
"github.com/argoproj/argo/pkg/apis/workflow/v1alpha1"
"github.com/golang/glog"
"github.com/pkg/errors"
@@ -49,18 +50,6 @@ func (c *FakeWorkflowClient) Create(workflow *v1alpha1.Workflow) (*v1alpha1.Work
return workflow, nil
}
-func (c *FakeWorkflowClient) GetWorkflowCount() int {
- return len(c.workflows)
-}
-
-func (c *FakeWorkflowClient) GetWorkflowKeys() map[string]bool {
- result := map[string]bool{}
- for key := range c.workflows {
- result[key] = true
- }
- return result
-}
-
func (c *FakeWorkflowClient) Get(name string, options v1.GetOptions) (*v1alpha1.Workflow, error) {
workflow, ok := c.workflows[name]
if ok {
@@ -88,13 +77,13 @@ func (c *FakeWorkflowClient) Delete(name string, options *v1.DeleteOptions) erro
}
func (c *FakeWorkflowClient) DeleteCollection(options *v1.DeleteOptions,
- listOptions v1.ListOptions) error {
+ listOptions v1.ListOptions) error {
glog.Error("This fake method is not yet implemented.")
return nil
}
func (c *FakeWorkflowClient) Patch(name string, pt types.PatchType, data []byte,
- subresources ...string) (*v1alpha1.Workflow, error) {
+ subresources ...string) (*v1alpha1.Workflow, error) {
var dat map[string]interface{}
json.Unmarshal(data, &dat)
@@ -129,20 +118,6 @@ func (c *FakeWorkflowClient) Patch(name string, pt types.PatchType, data []byte,
return nil, errors.New("Failed to patch workflow")
}
-func (c *FakeWorkflowClient) isTerminated(name string) (bool, error) {
- workflow, ok := c.workflows[name]
- if !ok {
- return false, errors.New("No workflow found with name: " + name)
- }
-
- activeDeadlineSeconds := workflow.Spec.ActiveDeadlineSeconds
- if activeDeadlineSeconds == nil {
- return false, errors.New("No ActiveDeadlineSeconds found in workflow with name: " + name)
- }
-
- return *activeDeadlineSeconds == 0, nil
-}
-
type FakeBadWorkflowClient struct {
FakeWorkflowClient
}
diff --git a/backend/src/apiserver/client_manager.go b/backend/src/apiserver/client_manager.go
index 179e9e728a5..4896476a90f 100644
--- a/backend/src/apiserver/client_manager.go
+++ b/backend/src/apiserver/client_manager.go
@@ -17,14 +17,12 @@ package main
import (
"database/sql"
"fmt"
- "github.com/kubeflow/pipelines/backend/src/apiserver/common"
- v1 "k8s.io/client-go/kubernetes/typed/core/v1"
"os"
"time"
- workflowclient "github.com/argoproj/argo/pkg/client/clientset/versioned/typed/workflow/v1alpha1"
"github.com/cenkalti/backoff"
"github.com/golang/glog"
+ "github.com/kubeflow/pipelines/backend/src/apiserver/common"
"github.com/jinzhu/gorm"
_ "github.com/jinzhu/gorm/dialects/sqlite"
@@ -37,19 +35,21 @@ import (
)
const (
- minioServiceHost = "MINIO_SERVICE_SERVICE_HOST"
- minioServicePort = "MINIO_SERVICE_SERVICE_PORT"
- mysqlServiceHost = "DBConfig.Host"
- mysqlServicePort = "DBConfig.Port"
- mysqlUser = "DBConfig.User"
- mysqlPassword = "DBConfig.Password"
- mysqlDBName = "DBConfig.DBName"
- mysqlGroupConcatMaxLen = "DBConfig.GroupConcatMaxLen"
+ minioServiceHost = "MINIO_SERVICE_SERVICE_HOST"
+ minioServicePort = "MINIO_SERVICE_SERVICE_PORT"
+ mysqlServiceHost = "DBConfig.Host"
+ mysqlServicePort = "DBConfig.Port"
+ mysqlUser = "DBConfig.User"
+ mysqlPassword = "DBConfig.Password"
+ mysqlDBName = "DBConfig.DBName"
+ mysqlGroupConcatMaxLen = "DBConfig.GroupConcatMaxLen"
+ kfamServiceHost = "PROFILES_KFAM_SERVICE_HOST"
+ kfamServicePort = "PROFILES_KFAM_SERVICE_PORT"
+ mysqlExtraParams = "DBConfig.ExtraParams"
visualizationServiceHost = "ML_PIPELINE_VISUALIZATIONSERVER_SERVICE_HOST"
visualizationServicePort = "ML_PIPELINE_VISUALIZATIONSERVER_SERVICE_PORT"
- podNamespace = "POD_NAMESPACE"
initConnectionTimeout = "InitConnectionTimeout"
)
@@ -64,9 +64,10 @@ type ClientManager struct {
dBStatusStore storage.DBStatusStoreInterface
defaultExperimentStore storage.DefaultExperimentStoreInterface
objectStore storage.ObjectStoreInterface
- wfClient workflowclient.WorkflowInterface
+ argoClient client.ArgoClientInterface
swfClient scheduledworkflowclient.ScheduledWorkflowInterface
- podClient v1.PodInterface
+ k8sCoreClient client.KubernetesCoreInterface
+ kfamClient client.KFAMClientInterface
time util.TimeInterface
uuid util.UUIDGeneratorInterface
}
@@ -103,16 +104,20 @@ func (c *ClientManager) ObjectStore() storage.ObjectStoreInterface {
return c.objectStore
}
-func (c *ClientManager) Workflow() workflowclient.WorkflowInterface {
- return c.wfClient
+func (c *ClientManager) ArgoClient() client.ArgoClientInterface {
+ return c.argoClient
}
func (c *ClientManager) ScheduledWorkflow() scheduledworkflowclient.ScheduledWorkflowInterface {
return c.swfClient
}
-func (c *ClientManager) PodClient() v1.PodInterface {
- return c.podClient
+func (c *ClientManager) KubernetesCoreClient() client.KubernetesCoreInterface {
+ return c.k8sCoreClient
+}
+
+func (c *ClientManager) KFAMClient() client.KFAMClientInterface {
+ return c.kfamClient
}
func (c *ClientManager) Time() util.TimeInterface {
@@ -142,18 +147,19 @@ func (c *ClientManager) init() {
c.defaultExperimentStore = storage.NewDefaultExperimentStore(db)
c.objectStore = initMinioClient(common.GetDurationConfig(initConnectionTimeout))
- c.wfClient = client.CreateWorkflowClientOrFatal(
- common.GetStringConfig(podNamespace), common.GetDurationConfig(initConnectionTimeout))
+ c.argoClient = client.NewArgoClientOrFatal(common.GetDurationConfig(initConnectionTimeout))
c.swfClient = client.CreateScheduledWorkflowClientOrFatal(
- common.GetStringConfig(podNamespace), common.GetDurationConfig(initConnectionTimeout))
+ common.GetStringConfig(client.PodNamespace), common.GetDurationConfig(initConnectionTimeout))
- c.podClient = client.CreatePodClientOrFatal(
- common.GetStringConfig(podNamespace), common.GetDurationConfig(initConnectionTimeout))
+ c.k8sCoreClient = client.CreateKubernetesCoreOrFatal(common.GetDurationConfig(initConnectionTimeout))
runStore := storage.NewRunStore(db, c.time)
c.runStore = runStore
+ if common.IsMultiUserMode() {
+ c.kfamClient = client.NewKFAMClient(common.GetStringConfig(kfamServiceHost), common.GetStringConfig(kfamServicePort))
+ }
glog.Infof("Client manager initialized successfully")
}
@@ -232,6 +238,16 @@ func initDBClient(initConnectionTimeout time.Duration) *storage.DB {
glog.Fatalf("Failed to update pipeline description type. Error: %s", response.Error)
}
+ // If the old unique index idx_pipeline_version_uuid_name on pipeline_versions exists, remove it.
+ rows, err := db.Raw(`show index from pipeline_versions where Key_name="idx_pipeline_version_uuid_name"`).Rows()
+ if err != nil {
+ glog.Fatalf("Failed to query pipeline_version table's indices. Error: %s", err)
+ }
+ if rows.Next() {
+ db.Exec(`drop index idx_pipeline_version_uuid_name on pipeline_versions`)
+ }
+ rows.Close()
+
return storage.NewDB(db.DB(), storage.NewMySQLDialect())
}
@@ -245,6 +261,7 @@ func initMysql(driverName string, initConnectionTimeout time.Duration) string {
common.GetStringConfigWithDefault(mysqlServicePort, "3306"),
"",
common.GetStringConfigWithDefault(mysqlGroupConcatMaxLen, "1024"),
+ common.GetMapConfig(mysqlExtraParams),
)
var db *sql.DB
@@ -278,6 +295,11 @@ func initMysql(driverName string, initConnectionTimeout time.Duration) string {
util.TerminateIfError(err)
mysqlConfig.DBName = dbName
+ // When updating, return rows matched instead of rows affected. This counts rows that are being
+ // set as the same values as before. If updating using a primary key and rows matched is 0, then
+ // it means this row is not found.
+ // Config reference: https://github.com/go-sql-driver/mysql#clientfoundrows
+ mysqlConfig.ClientFoundRows = true
return mysqlConfig.FormatDSN()
}
diff --git a/backend/src/apiserver/common/BUILD.bazel b/backend/src/apiserver/common/BUILD.bazel
index 5f1b7a81546..282a1f65264 100644
--- a/backend/src/apiserver/common/BUILD.bazel
+++ b/backend/src/apiserver/common/BUILD.bazel
@@ -1,4 +1,4 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library")
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
@@ -8,6 +8,7 @@ go_library(
"filter_context.go",
"pagination_context.go",
"paths.go",
+ "util.go",
],
importpath = "github.com/kubeflow/pipelines/backend/src/apiserver/common",
visibility = ["//visibility:public"],
@@ -18,3 +19,13 @@ go_library(
"@com_github_spf13_viper//:go_default_library",
],
)
+
+go_test(
+ name = "go_default_test",
+ srcs = ["util_test.go"],
+ embed = [":go_default_library"],
+ deps = [
+ "//backend/api:go_default_library",
+ "@com_github_stretchr_testify//assert:go_default_library",
+ ],
+)
diff --git a/backend/src/apiserver/common/config.go b/backend/src/apiserver/common/config.go
index a6bec9a4ebc..5e7b79d259b 100644
--- a/backend/src/apiserver/common/config.go
+++ b/backend/src/apiserver/common/config.go
@@ -22,6 +22,9 @@ import (
"github.com/spf13/viper"
)
+const (
+ MultiUserMode string = "MULTIUSER"
+)
func GetStringConfig(configName string) string {
if !viper.IsSet(configName) {
@@ -37,6 +40,14 @@ func GetStringConfigWithDefault(configName, value string) string {
return viper.GetString(configName)
}
+func GetMapConfig(configName string) map[string]string {
+ if !viper.IsSet(configName) {
+ glog.Infof("Config %s not specified, skipping", configName)
+ return nil
+ }
+ return viper.GetStringMapString(configName)
+}
+
func GetBoolConfigWithDefault(configName string, value bool) bool {
if !viper.IsSet(configName) {
return value
@@ -54,3 +65,7 @@ func GetDurationConfig(configName string) time.Duration {
}
return viper.GetDuration(configName)
}
+
+func IsMultiUserMode() bool {
+ return GetBoolConfigWithDefault(MultiUserMode, false)
+}
diff --git a/backend/src/apiserver/common/const.go b/backend/src/apiserver/common/const.go
index 5a27fa7ed75..08d8db3bf32 100644
--- a/backend/src/apiserver/common/const.go
+++ b/backend/src/apiserver/common/const.go
@@ -28,6 +28,7 @@ const (
Run ResourceType = "Run"
Pipeline ResourceType = "pipeline"
PipelineVersion ResourceType = "PipelineVersion"
+ Namespace ResourceType = "Namespace"
)
const (
@@ -35,6 +36,10 @@ const (
Creator Relationship = "Creator"
)
+const (
+ GoogleIAPUserIdentityHeader string = "x-goog-authenticated-user-email"
+)
+
func ToModelResourceType(apiType api.ResourceType) (ResourceType, error) {
switch apiType {
case api.ResourceType_EXPERIMENT:
@@ -43,6 +48,8 @@ func ToModelResourceType(apiType api.ResourceType) (ResourceType, error) {
return Job, nil
case api.ResourceType_PIPELINE_VERSION:
return PipelineVersion, nil
+ case api.ResourceType_NAMESPACE:
+ return Namespace, nil
default:
return "", util.NewInvalidInputError("Unsupported resource type: %s", api.ResourceType_name[int32(apiType)])
}
diff --git a/backend/src/apiserver/common/util.go b/backend/src/apiserver/common/util.go
new file mode 100644
index 00000000000..eedd0d6f1d7
--- /dev/null
+++ b/backend/src/apiserver/common/util.go
@@ -0,0 +1,30 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package common
+
+import (
+ api "github.com/kubeflow/pipelines/backend/api/go_client"
+)
+
+func GetNamespaceFromAPIResourceReferences(resourceRefs []*api.ResourceReference) string {
+ namespace := ""
+ for _, resourceRef := range resourceRefs {
+ if resourceRef.Key.Type == api.ResourceType_NAMESPACE {
+ namespace = resourceRef.Key.Id
+ break
+ }
+ }
+ return namespace
+}
diff --git a/backend/src/apiserver/common/util_test.go b/backend/src/apiserver/common/util_test.go
new file mode 100644
index 00000000000..5ddca65951f
--- /dev/null
+++ b/backend/src/apiserver/common/util_test.go
@@ -0,0 +1,49 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package common
+
+import (
+ "testing"
+
+ api "github.com/kubeflow/pipelines/backend/api/go_client"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestGetNamespaceFromResourceReferences(t *testing.T) {
+ references := []*api.ResourceReference{
+ {
+ Key: &api.ResourceKey{
+ Type: api.ResourceType_EXPERIMENT, Id: "123"},
+ Relationship: api.Relationship_CREATOR,
+ },
+ {
+ Key: &api.ResourceKey{
+ Type: api.ResourceType_NAMESPACE, Id: "ns"},
+ Relationship: api.Relationship_OWNER,
+ },
+ }
+ namespace := GetNamespaceFromAPIResourceReferences(references)
+ assert.Equal(t, "ns", namespace)
+
+ references = []*api.ResourceReference{
+ {
+ Key: &api.ResourceKey{
+ Type: api.ResourceType_EXPERIMENT, Id: "123"},
+ Relationship: api.Relationship_CREATOR,
+ },
+ }
+ namespace = GetNamespaceFromAPIResourceReferences(references)
+ assert.Equal(t, "", namespace)
+}
diff --git a/backend/src/apiserver/config/sample_config.json b/backend/src/apiserver/config/sample_config.json
index 57077ccff8b..c2b1ebd1207 100644
--- a/backend/src/apiserver/config/sample_config.json
+++ b/backend/src/apiserver/config/sample_config.json
@@ -1,32 +1,32 @@
[
{
"name": "[Sample] ML - XGBoost - Training with Confusion Matrix",
- "description": "[GCP Permission requirements](https://github.com/kubeflow/pipelines/tree/master/samples/core/xgboost_training_cm#requirements). A trainer that does end-to-end distributed training for XGBoost models. [source code](https://github.com/kubeflow/pipelines/tree/master/samples/core/xgboost_training_cm)",
- "file": "/samples/core/xgboost_training_cm/xgboost_training_cm.py.tar.gz"
+ "description": "[GCP Permission requirements](https://github.com/kubeflow/pipelines/blob/master/samples/core/xgboost_training_cm#requirements). [source code](https://github.com/kubeflow/pipelines/blob/master/samples/core/xgboost_training_cm). A trainer that does end-to-end distributed training for XGBoost models.",
+ "file": "/samples/core/xgboost_training_cm/xgboost_training_cm.py.yaml"
},
{
"name": "[Sample] Unified DSL - Taxi Tip Prediction Model Trainer",
- "description": "[GCP Permission requirements](https://github.com/kubeflow/pipelines/blob/master/samples/contrib/parameterized_tfx_oss#permission). Example pipeline that does classification with model analysis based on a public tax cab BigQuery dataset. [source code](https://github.com/kubeflow/pipelines/tree/master/samples/contrib/parameterized_tfx_oss)",
- "file": "/samples/contrib/parameterized_tfx_oss/parameterized_tfx_oss.tar.gz"
+ "description": "[GCP Permission requirements](https://github.com/kubeflow/pipelines/blob/master/samples/contrib/parameterized_tfx_oss#permission). [source code](https://console.cloud.google.com/mlengine/notebooks/deploy-notebook?q=download_url%3Dhttps%253A%252F%252Fraw.githubusercontent.com%252Fkubeflow%252Fpipelines%252F0.1.40%252Fsamples%252Fcore%252Fparameterized_tfx_oss%252Ftaxi_pipeline_notebook.ipynb). Example pipeline that does classification with model analysis based on a public tax cab BigQuery dataset.",
+ "file": "/samples/core/parameterized_tfx_oss/parameterized_tfx_oss.py.yaml"
},
{
"name": "[Sample] Basic - Sequential execution",
- "description": "A pipeline with two sequential steps. [source code](https://github.com/kubeflow/pipelines/blob/master/samples/core/sequential/sequential.py)",
- "file": "/samples/core/sequential/sequential.py.tar.gz"
+ "description": "[source code](https://github.com/kubeflow/pipelines/blob/master/samples/core/sequential/sequential.py) A pipeline with two sequential steps.",
+ "file": "/samples/core/sequential/sequential.py.yaml"
},
{
"name": "[Sample] Basic - Parallel execution",
- "description": "A pipeline that downloads two messages in parallel and prints the concatenated result. [source code](https://github.com/kubeflow/pipelines/blob/master/samples/core/parallel_join/parallel_join.py)",
- "file": "/samples/core/parallel_join/parallel_join.py.tar.gz"
+ "description": "[source code](https://github.com/kubeflow/pipelines/blob/master/samples/core/parallel_join/parallel_join.py) A pipeline that downloads two messages in parallel and prints the concatenated result.",
+ "file": "/samples/core/parallel_join/parallel_join.py.yaml"
},
{
"name": "[Sample] Basic - Conditional execution",
- "description": "A pipeline shows how to use dsl.Condition. [source code](https://github.com/kubeflow/pipelines/blob/master/samples/core/condition/condition.py)",
- "file": "/samples/core/condition/condition.py.tar.gz"
+ "description": "[source code](https://github.com/kubeflow/pipelines/blob/master/samples/core/condition/condition.py) A pipeline shows how to use dsl.Condition.",
+ "file": "/samples/core/condition/condition.py.yaml"
},
{
"name": "[Sample] Basic - Exit Handler",
- "description": "A pipeline that downloads a message and prints it out. Exit Handler will run at the end. [source code](https://github.com/kubeflow/pipelines/blob/master/samples/core/exit_handler/exit_handler.py)",
- "file": "/samples/core/exit_handler/exit_handler.py.tar.gz"
+ "description": "[source code](https://github.com/kubeflow/pipelines/blob/master/samples/core/exit_handler/exit_handler.py) A pipeline that downloads a message and prints it out. Exit Handler will run at the end.",
+ "file": "/samples/core/exit_handler/exit_handler.py.yaml"
}
]
diff --git a/backend/src/apiserver/main.go b/backend/src/apiserver/main.go
index 4c8f1b2f212..2af96fec3ca 100644
--- a/backend/src/apiserver/main.go
+++ b/backend/src/apiserver/main.go
@@ -71,6 +71,17 @@ func main() {
clientManager.Close()
}
+// A custom http request header matcher to pass on the user identity
+// Reference: https://github.com/grpc-ecosystem/grpc-gateway/blob/master/docs/_docs/customizingyourgateway.md#mapping-from-http-request-headers-to-grpc-client-metadata
+func grpcCustomMatcher(key string) (string, bool) {
+ switch strings.ToLower(key) {
+ case common.GoogleIAPUserIdentityHeader:
+ return strings.ToLower(key), true
+ default:
+ return strings.ToLower(key), false
+ }
+}
+
func startRpcServer(resourceManager *resource.ResourceManager) {
glog.Info("Starting RPC server")
listener, err := net.Listen("tcp", *rpcPortFlag)
@@ -107,7 +118,7 @@ func startHttpProxy(resourceManager *resource.ResourceManager) {
defer cancel()
// Create gRPC HTTP MUX and register services.
- mux := runtime.NewServeMux()
+ mux := runtime.NewServeMux(runtime.WithIncomingHeaderMatcher(grpcCustomMatcher))
registerHttpHandlerFromEndpoint(api.RegisterPipelineServiceHandlerFromEndpoint, "PipelineService", ctx, mux)
registerHttpHandlerFromEndpoint(api.RegisterExperimentServiceHandlerFromEndpoint, "ExperimentService", ctx, mux)
registerHttpHandlerFromEndpoint(api.RegisterJobServiceHandlerFromEndpoint, "JobService", ctx, mux)
@@ -219,4 +230,4 @@ func initConfig() {
// Read in config again
viper.ReadInConfig()
})
-}
\ No newline at end of file
+}
diff --git a/backend/src/apiserver/model/BUILD.bazel b/backend/src/apiserver/model/BUILD.bazel
index 366879d5758..9e90fadcee5 100644
--- a/backend/src/apiserver/model/BUILD.bazel
+++ b/backend/src/apiserver/model/BUILD.bazel
@@ -21,12 +21,16 @@ go_library(
go_test(
name = "go_default_test",
- srcs = ["pipeline_version_test.go"],
+ srcs = [
+ "pipeline_version_test.go",
+ "resource_reference_test.go",
+ ],
embed = [":go_default_library"],
importpath = "github.com/kubeflow/pipelines/backend/src/apiserver/model",
visibility = ["//visibility:public"],
deps = [
"//backend/api:go_default_library",
+ "//backend/src/apiserver/common:go_default_library",
"//backend/src/apiserver/list:go_default_library",
"@com_github_masterminds_squirrel//:go_default_library",
"@com_github_stretchr_testify//assert:go_default_library",
diff --git a/backend/src/apiserver/model/pipeline_version.go b/backend/src/apiserver/model/pipeline_version.go
index 8ff5137badf..1cdb55a9197 100644
--- a/backend/src/apiserver/model/pipeline_version.go
+++ b/backend/src/apiserver/model/pipeline_version.go
@@ -31,14 +31,14 @@ const (
type PipelineVersion struct {
UUID string `gorm:"column:UUID; not null; primary_key"`
CreatedAtInSec int64 `gorm:"column:CreatedAtInSec; not null; index"`
- Name string `gorm:"column:Name; not null; unique_index:idx_pipeline_version_uuid_name"`
+ Name string `gorm:"column:Name; not null; unique_index:idx_pipelineid_name"`
// Set size to 65535 so it will be stored as longtext.
// https://dev.mysql.com/doc/refman/8.0/en/column-count-limit.html
Parameters string `gorm:"column:Parameters; not null; size:65535"`
// PipelineVersion belongs to Pipeline. If a pipeline with a specific UUID
// is deleted from Pipeline table, all this pipeline's versions will be
// deleted from PipelineVersion table.
- PipelineId string `gorm:"column:PipelineId; not null;index;"`
+ PipelineId string `gorm:"column:PipelineId; not null;index; unique_index:idx_pipelineid_name"`
Status PipelineVersionStatus `gorm:"column:Status; not null"`
// Code source url links to the pipeline version's definition in repo.
CodeSourceUrl string `gorm:"column:CodeSourceUrl;"`
diff --git a/backend/src/apiserver/model/resource_reference.go b/backend/src/apiserver/model/resource_reference.go
index 6336e43571d..ca1331be0fa 100644
--- a/backend/src/apiserver/model/resource_reference.go
+++ b/backend/src/apiserver/model/resource_reference.go
@@ -14,7 +14,9 @@
package model
-import "github.com/kubeflow/pipelines/backend/src/apiserver/common"
+import (
+ "github.com/kubeflow/pipelines/backend/src/apiserver/common"
+)
// Resource reference table models the relationship between resources in a loosely coupled way.
type ResourceReference struct {
@@ -39,3 +41,14 @@ type ResourceReference struct {
// The json formatted blob of the resource reference.
Payload string `gorm:"column:Payload; not null; size:65535 "`
}
+
+func GetNamespaceFromModelResourceReferences(resourceRefs []*ResourceReference) string {
+ namespace := ""
+ for _, resourceRef := range resourceRefs {
+ if resourceRef.ReferenceType == common.Namespace {
+ namespace = resourceRef.ReferenceUUID
+ break
+ }
+ }
+ return namespace
+}
diff --git a/backend/src/apiserver/model/resource_reference_test.go b/backend/src/apiserver/model/resource_reference_test.go
new file mode 100644
index 00000000000..ff58545064e
--- /dev/null
+++ b/backend/src/apiserver/model/resource_reference_test.go
@@ -0,0 +1,51 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "github.com/kubeflow/pipelines/backend/src/apiserver/common"
+ "github.com/stretchr/testify/assert"
+ "testing"
+)
+
+func TestGetNamespaceFromResourceReferencesModel(t *testing.T) {
+ references := []*ResourceReference{
+ {
+ ReferenceType: common.Experiment,
+ ReferenceUUID: "123",
+ ReferenceName: "123",
+ Relationship: common.Creator,
+ },
+ {
+ ReferenceType: common.Namespace,
+ ReferenceName: "ns",
+ ReferenceUUID: "ns",
+ Relationship: common.Creator,
+ },
+ }
+ namespace := GetNamespaceFromModelResourceReferences(references)
+ assert.Equal(t, "ns", namespace)
+
+ references = []*ResourceReference{
+ {
+ ReferenceType: common.Experiment,
+ ReferenceUUID: "123",
+ ReferenceName: "123",
+ Relationship: common.Creator,
+ },
+ }
+ namespace = GetNamespaceFromModelResourceReferences(references)
+ assert.Equal(t, "", namespace)
+}
diff --git a/backend/src/apiserver/resource/BUILD.bazel b/backend/src/apiserver/resource/BUILD.bazel
index c477c49e6aa..5cbe4a8a136 100644
--- a/backend/src/apiserver/resource/BUILD.bazel
+++ b/backend/src/apiserver/resource/BUILD.bazel
@@ -5,16 +5,15 @@ go_library(
srcs = [
"client_manager_fake.go",
"model_converter.go",
- "pod_fake.go",
"resource_manager.go",
"resource_manager_util.go",
"scheduled_workflow_fake.go",
- "workflow_fake.go",
],
importpath = "github.com/kubeflow/pipelines/backend/src/apiserver/resource",
visibility = ["//visibility:public"],
deps = [
"//backend/api:go_default_library",
+ "//backend/src/apiserver/client:go_default_library",
"//backend/src/apiserver/common:go_default_library",
"//backend/src/apiserver/list:go_default_library",
"//backend/src/apiserver/model:go_default_library",
@@ -49,6 +48,7 @@ go_test(
embed = [":go_default_library"],
deps = [
"//backend/api:go_default_library",
+ "//backend/src/apiserver/client:go_default_library",
"//backend/src/apiserver/common:go_default_library",
"//backend/src/apiserver/model:go_default_library",
"//backend/src/apiserver/storage:go_default_library",
diff --git a/backend/src/apiserver/resource/client_manager_fake.go b/backend/src/apiserver/resource/client_manager_fake.go
index 2cbea55c72e..47d42dd52fe 100644
--- a/backend/src/apiserver/resource/client_manager_fake.go
+++ b/backend/src/apiserver/resource/client_manager_fake.go
@@ -15,12 +15,11 @@
package resource
import (
- workflowclient "github.com/argoproj/argo/pkg/client/clientset/versioned/typed/workflow/v1alpha1"
"github.com/golang/glog"
+ "github.com/kubeflow/pipelines/backend/src/apiserver/client"
"github.com/kubeflow/pipelines/backend/src/apiserver/storage"
"github.com/kubeflow/pipelines/backend/src/common/util"
scheduledworkflowclient "github.com/kubeflow/pipelines/backend/src/crd/pkg/client/clientset/versioned/typed/scheduledworkflow/v1beta1"
- v1 "k8s.io/client-go/kubernetes/typed/core/v1"
)
const (
@@ -38,9 +37,10 @@ type FakeClientManager struct {
dBStatusStore storage.DBStatusStoreInterface
defaultExperimentStore storage.DefaultExperimentStoreInterface
objectStore storage.ObjectStoreInterface
- workflowClientFake *FakeWorkflowClient
+ ArgoClientFake *client.FakeArgoClient
scheduledWorkflowClientFake *FakeScheduledWorkflowClient
- podClientFake v1.PodInterface
+ k8sCoreClientFake *client.FakeKuberneteCoreClient
+ KfamClientFake client.KFAMClientInterface
time util.TimeInterface
uuid util.UUIDGeneratorInterface
}
@@ -69,13 +69,14 @@ func NewFakeClientManager(time util.TimeInterface, uuid util.UUIDGeneratorInterf
pipelineStore: storage.NewPipelineStore(db, time, uuid),
jobStore: storage.NewJobStore(db, time),
runStore: storage.NewRunStore(db, time),
- workflowClientFake: NewWorkflowClientFake(),
+ ArgoClientFake: client.NewFakeArgoClient(),
resourceReferenceStore: storage.NewResourceReferenceStore(db),
dBStatusStore: storage.NewDBStatusStore(db),
defaultExperimentStore: storage.NewDefaultExperimentStore(db),
objectStore: storage.NewFakeObjectStore(),
scheduledWorkflowClientFake: NewScheduledWorkflowClientFake(),
- podClientFake: FakePodClient{},
+ k8sCoreClientFake: client.NewFakeKuberneteCoresClient(),
+ KfamClientFake: client.NewFakeKFAMClientAuthorized(),
time: time,
uuid: uuid,
}, nil
@@ -114,8 +115,8 @@ func (f *FakeClientManager) DB() *storage.DB {
return f.db
}
-func (f *FakeClientManager) Workflow() workflowclient.WorkflowInterface {
- return f.workflowClientFake
+func (f *FakeClientManager) ArgoClient() client.ArgoClientInterface {
+ return f.ArgoClientFake
}
func (f *FakeClientManager) JobStore() storage.JobStoreInterface {
@@ -142,8 +143,12 @@ func (f *FakeClientManager) ScheduledWorkflow() scheduledworkflowclient.Schedule
return f.scheduledWorkflowClientFake
}
-func (f *FakeClientManager) PodClient() v1.PodInterface {
- return f.podClientFake
+func (f *FakeClientManager) KubernetesCoreClient() client.KubernetesCoreInterface {
+ return f.k8sCoreClientFake
+}
+
+func (f *FakeClientManager) KFAMClient() client.KFAMClientInterface {
+ return f.KfamClientFake
}
func (f *FakeClientManager) Close() error {
diff --git a/backend/src/apiserver/resource/model_converter.go b/backend/src/apiserver/resource/model_converter.go
index 8f52b8fccfa..43283427f14 100644
--- a/backend/src/apiserver/resource/model_converter.go
+++ b/backend/src/apiserver/resource/model_converter.go
@@ -199,6 +199,8 @@ func (r *ResourceManager) toModelResourceReferences(
if err != nil {
return nil, util.Wrap(err, "Failed to find the referred resource")
}
+
+ //TODO(gaoning777) further investigation: Is the plain namespace a good option? maybe uuid for distinctness even with namespace deletion/recreation.
modelRef := &model.ResourceReference{
ResourceUUID: resourceId,
ResourceType: resourceType,
@@ -244,6 +246,8 @@ func (r *ResourceManager) getResourceName(resourceType common.ResourceType, reso
return "", util.Wrap(err, "Referred pipeline version not found.")
}
return version.Name, nil
+ case common.Namespace:
+ return resourceId, nil
default:
return "", util.NewInvalidInputError("Unsupported resource type: %s", string(resourceType))
}
diff --git a/backend/src/apiserver/resource/model_converter_test.go b/backend/src/apiserver/resource/model_converter_test.go
index a607ced0636..1043a0937fb 100644
--- a/backend/src/apiserver/resource/model_converter_test.go
+++ b/backend/src/apiserver/resource/model_converter_test.go
@@ -198,6 +198,17 @@ func TestToModelResourceReferences_UnknownRefType(t *testing.T) {
assert.Contains(t, err.Error(), "Failed to convert reference type")
}
+func TestToModelResourceReferences_NamespaceRef(t *testing.T) {
+ store, manager, _ := initWithJob(t)
+ defer store.Close()
+
+ modelRefs, err := manager.toModelResourceReferences("r1", common.Run, []*api.ResourceReference{
+ {Key: &api.ResourceKey{Type: api.ResourceType_NAMESPACE, Id: "e1"}, Relationship: api.Relationship_OWNER},
+ })
+ assert.Nil(t, err)
+ assert.Equal(t, 1, len(modelRefs))
+}
+
func TestToModelResourceReferences_UnknownRelationship(t *testing.T) {
store, manager, _ := initWithJob(t)
defer store.Close()
diff --git a/backend/src/apiserver/resource/resource_manager.go b/backend/src/apiserver/resource/resource_manager.go
index b0f43f21362..48ac6682de1 100644
--- a/backend/src/apiserver/resource/resource_manager.go
+++ b/backend/src/apiserver/resource/resource_manager.go
@@ -24,6 +24,7 @@ import (
"github.com/cenkalti/backoff"
"github.com/golang/glog"
api "github.com/kubeflow/pipelines/backend/api/go_client"
+ "github.com/kubeflow/pipelines/backend/src/apiserver/client"
"github.com/kubeflow/pipelines/backend/src/apiserver/common"
"github.com/kubeflow/pipelines/backend/src/apiserver/list"
"github.com/kubeflow/pipelines/backend/src/apiserver/model"
@@ -33,7 +34,6 @@ import (
scheduledworkflowclient "github.com/kubeflow/pipelines/backend/src/crd/pkg/client/clientset/versioned/typed/scheduledworkflow/v1beta1"
"github.com/pkg/errors"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/apimachinery/pkg/types"
)
@@ -41,6 +41,7 @@ import (
const (
defaultPipelineRunnerServiceAccountEnvVar = "DefaultPipelineRunnerServiceAccount"
defaultPipelineRunnerServiceAccount = "pipeline-runner"
+ defaultServiceAccount = "default-editor"
)
type ClientManagerInterface interface {
@@ -52,9 +53,10 @@ type ClientManagerInterface interface {
DBStatusStore() storage.DBStatusStoreInterface
DefaultExperimentStore() storage.DefaultExperimentStoreInterface
ObjectStore() storage.ObjectStoreInterface
- Workflow() workflowclient.WorkflowInterface
+ ArgoClient() client.ArgoClientInterface
ScheduledWorkflow() scheduledworkflowclient.ScheduledWorkflowInterface
- PodClient() corev1.PodInterface
+ KubernetesCoreClient() client.KubernetesCoreInterface
+ KFAMClient() client.KFAMClientInterface
Time() util.TimeInterface
UUID() util.UUIDGeneratorInterface
}
@@ -68,9 +70,10 @@ type ResourceManager struct {
dBStatusStore storage.DBStatusStoreInterface
defaultExperimentStore storage.DefaultExperimentStoreInterface
objectStore storage.ObjectStoreInterface
- workflowClient workflowclient.WorkflowInterface
+ argoClient client.ArgoClientInterface
scheduledWorkflowClient scheduledworkflowclient.ScheduledWorkflowInterface
- podClient corev1.PodInterface
+ k8sCoreClient client.KubernetesCoreInterface
+ kfamClient client.KFAMClientInterface
time util.TimeInterface
uuid util.UUIDGeneratorInterface
}
@@ -85,14 +88,19 @@ func NewResourceManager(clientManager ClientManagerInterface) *ResourceManager {
dBStatusStore: clientManager.DBStatusStore(),
defaultExperimentStore: clientManager.DefaultExperimentStore(),
objectStore: clientManager.ObjectStore(),
- workflowClient: clientManager.Workflow(),
+ argoClient: clientManager.ArgoClient(),
scheduledWorkflowClient: clientManager.ScheduledWorkflow(),
- podClient: clientManager.PodClient(),
+ k8sCoreClient: clientManager.KubernetesCoreClient(),
+ kfamClient: clientManager.KFAMClient(),
time: clientManager.Time(),
uuid: clientManager.UUID(),
}
}
+func (r *ResourceManager) getWorkflowClient(namespace string) workflowclient.WorkflowInterface {
+ return r.argoClient.Workflow(namespace)
+}
+
func (r *ResourceManager) GetTime() util.TimeInterface {
return r.time
}
@@ -259,7 +267,20 @@ func (r *ResourceManager) CreateRun(apiRun *api.Run) (*model.RunDetail, error) {
return nil, util.Wrap(err, "Failed to verify parameters.")
}
- workflow.SetServiceAccount(r.getDefaultSA())
+ multiuserMode := common.IsMultiUserMode()
+ if multiuserMode == true {
+ if len(workflow.Spec.ServiceAccountName) == 0 || workflow.Spec.ServiceAccountName == defaultPipelineRunnerServiceAccount {
+ // To reserve SDK backward compatibility, the backend currently replaces the serviceaccount in multi-user mode.
+ workflow.SetServiceAccount(defaultServiceAccount)
+ }
+ } else {
+ workflow.SetServiceAccount(r.getDefaultSA())
+ }
+
+ // Disable istio sidecar injection
+ if multiuserMode == true {
+ workflow.SetAnnotationsToAllTemplates(util.AnnotationKeyIstioSidecarInject, util.AnnotationValueIstioSidecarInjectDisabled)
+ }
// Append provided parameter
workflow.OverrideParameters(parameters)
// Add label to the workflow so it can be persisted by persistent agent later.
@@ -282,7 +303,7 @@ func (r *ResourceManager) CreateRun(apiRun *api.Run) (*model.RunDetail, error) {
}
// Create argo workflow CRD resource
- newWorkflow, err := r.workflowClient.Create(workflow.Get())
+ newWorkflow, err := r.getWorkflowClient(common.GetNamespaceFromAPIResourceReferences(apiRun.ResourceReferences)).Create(workflow.Get())
if err != nil {
return nil, util.NewInternalServerError(err, "Failed to create a workflow for (%s)", workflow.Name)
}
@@ -329,7 +350,11 @@ func (r *ResourceManager) DeleteRun(runID string) error {
if err != nil {
return util.Wrap(err, "Delete run failed")
}
- err = r.workflowClient.Delete(runDetail.Name, &v1.DeleteOptions{})
+ namespace, err := r.GetNamespaceFromRunID(runID)
+ if err != nil {
+ return util.Wrap(err, "Delete run failed")
+ }
+ err = r.getWorkflowClient(namespace).Delete(runDetail.Name, &v1.DeleteOptions{})
if err != nil {
// API won't need to delete the workflow CRD
// once persistent agent sync the state to DB and set TTL for it.
@@ -375,12 +400,17 @@ func (r *ResourceManager) TerminateRun(runId string) error {
return util.Wrap(err, "Terminate run failed")
}
+ namespace, err := r.GetNamespaceFromRunID(runId)
+ if err != nil {
+ return util.Wrap(err, "Terminate run failed")
+ }
+
err = r.runStore.TerminateRun(runId)
if err != nil {
return util.Wrap(err, "Terminate run failed")
}
- err = TerminateWorkflow(r.workflowClient, runDetail.Run.Name)
+ err = TerminateWorkflow(r.getWorkflowClient(namespace), runDetail.Run.Name)
if err != nil {
return util.NewInternalServerError(err, "Failed to terminate the run")
}
@@ -392,6 +422,10 @@ func (r *ResourceManager) RetryRun(runId string) error {
if err != nil {
return util.Wrap(err, "Retry run failed")
}
+ namespace, err := r.GetNamespaceFromRunID(runId)
+ if err != nil {
+ return util.Wrap(err, "Retry run failed")
+ }
if runDetail.WorkflowRuntimeManifest == "" {
return util.NewBadRequestError(errors.New("workflow cannot be retried"), "Workflow must be Failed/Error to retry")
@@ -406,16 +440,16 @@ func (r *ResourceManager) RetryRun(runId string) error {
return util.Wrap(err, "Retry run failed.")
}
- if err = deletePods(r.podClient, podsToDelete); err != nil {
+ if err = deletePods(r.k8sCoreClient, podsToDelete, namespace); err != nil {
return util.NewInternalServerError(err, "Retry run failed. Failed to clean up the failed pods from previous run.")
}
// First try to update workflow
- updateError := r.updateWorkflow(newWorkflow)
+ updateError := r.updateWorkflow(newWorkflow, namespace)
if updateError != nil {
// Remove resource version
newWorkflow.ResourceVersion = ""
- newCreatedWorkflow, createError := r.workflowClient.Create(newWorkflow.Workflow)
+ newCreatedWorkflow, createError := r.getWorkflowClient(namespace).Create(newWorkflow.Workflow)
if createError != nil {
return util.NewInternalServerError(createError,
"Retry run failed. Failed to create or update the run. Update Error: %s, Create Error: %s",
@@ -430,15 +464,15 @@ func (r *ResourceManager) RetryRun(runId string) error {
return nil
}
-func (r *ResourceManager) updateWorkflow(newWorkflow *util.Workflow) error {
+func (r *ResourceManager) updateWorkflow(newWorkflow *util.Workflow, namespace string) error {
// If fail to get the workflow, return error.
- latestWorkflow, err := r.workflowClient.Get(newWorkflow.Name, v1.GetOptions{})
+ latestWorkflow, err := r.getWorkflowClient(namespace).Get(newWorkflow.Name, v1.GetOptions{})
if err != nil {
return err
}
// Update the workflow's resource version to latest.
newWorkflow.ResourceVersion = latestWorkflow.ResourceVersion
- _, err = r.workflowClient.Update(newWorkflow.Workflow)
+ _, err = r.getWorkflowClient(namespace).Update(newWorkflow.Workflow)
return err
}
@@ -566,7 +600,7 @@ func (r *ResourceManager) ReportWorkflowResource(workflow *util.Workflow) error
if workflow.PersistedFinalState() {
// If workflow's final state has being persisted, the workflow should be garbage collected.
- err := r.workflowClient.Delete(workflow.Name, &v1.DeleteOptions{})
+ err := r.getWorkflowClient(workflow.Namespace).Delete(workflow.Name, &v1.DeleteOptions{})
if err != nil {
return util.NewInternalServerError(err, "Failed to delete the completed workflow for run %s", runId)
}
@@ -634,7 +668,7 @@ func (r *ResourceManager) ReportWorkflowResource(workflow *util.Workflow) error
}
if workflow.IsInFinalState() {
- err := AddWorkflowLabel(r.workflowClient, workflow.Name, util.LabelKeyWorkflowPersistedFinalState, "true")
+ err := AddWorkflowLabel(r.getWorkflowClient(workflow.Namespace), workflow.Name, util.LabelKeyWorkflowPersistedFinalState, "true")
if err != nil {
return util.Wrap(err, "Failed to add PersistedFinalState label to workflow")
}
@@ -945,3 +979,16 @@ func (r *ResourceManager) GetPipelineVersionTemplate(versionId string) ([]byte,
return template, nil
}
+
+func (r *ResourceManager) IsRequestAuthorized(userIdentity string, namespace string) (bool, error) {
+ return r.kfamClient.IsAuthorized(userIdentity, namespace)
+}
+
+func (r *ResourceManager) GetNamespaceFromRunID(runId string) (string, error) {
+ runDetail, err := r.GetRun(runId)
+ if err != nil {
+ return "", util.Wrap(err, "Failed to get namespace from run id.")
+ }
+ namespace := model.GetNamespaceFromModelResourceReferences(runDetail.ResourceReferences)
+ return namespace, nil
+}
diff --git a/backend/src/apiserver/resource/resource_manager_test.go b/backend/src/apiserver/resource/resource_manager_test.go
index 964fa3f79f0..f915425ba94 100644
--- a/backend/src/apiserver/resource/resource_manager_test.go
+++ b/backend/src/apiserver/resource/resource_manager_test.go
@@ -15,15 +15,15 @@
package resource
import (
+ "encoding/json"
"fmt"
"strings"
"testing"
"time"
- "encoding/json"
-
"github.com/argoproj/argo/pkg/apis/workflow/v1alpha1"
api "github.com/kubeflow/pipelines/backend/api/go_client"
+ "github.com/kubeflow/pipelines/backend/src/apiserver/client"
"github.com/kubeflow/pipelines/backend/src/apiserver/common"
"github.com/kubeflow/pipelines/backend/src/apiserver/model"
"github.com/kubeflow/pipelines/backend/src/apiserver/storage"
@@ -329,7 +329,7 @@ func TestCreateRun_ThroughPipelineID(t *testing.T) {
},
}
assert.Equal(t, expectedRunDetail, runDetail, "The CreateRun return has unexpected value.")
- assert.Equal(t, 1, store.workflowClientFake.GetWorkflowCount(), "Workflow CRD is not created.")
+ assert.Equal(t, 1, store.ArgoClientFake.GetWorkflowCount(), "Workflow CRD is not created.")
runDetail, err = manager.GetRun(runDetail.UUID)
assert.Nil(t, err)
assert.Equal(t, expectedRunDetail, runDetail, "CreateRun stored invalid data in database")
@@ -370,7 +370,7 @@ func TestCreateRun_ThroughWorkflowSpec(t *testing.T) {
},
}
assert.Equal(t, expectedRunDetail, runDetail, "The CreateRun return has unexpected value.")
- assert.Equal(t, 1, store.workflowClientFake.GetWorkflowCount(), "Workflow CRD is not created.")
+ assert.Equal(t, 1, store.ArgoClientFake.GetWorkflowCount(), "Workflow CRD is not created.")
runDetail, err := manager.GetRun(runDetail.UUID)
assert.Nil(t, err)
assert.Equal(t, expectedRunDetail, runDetail, "CreateRun stored invalid data in database")
@@ -460,7 +460,7 @@ func TestCreateRun_ThroughPipelineVersion(t *testing.T) {
},
}
assert.Equal(t, expectedRunDetail, runDetail, "The CreateRun return has unexpected value.")
- assert.Equal(t, 1, store.workflowClientFake.GetWorkflowCount(), "Workflow CRD is not created.")
+ assert.Equal(t, 1, store.ArgoClientFake.GetWorkflowCount(), "Workflow CRD is not created.")
runDetail, err = manager.GetRun(runDetail.UUID)
assert.Nil(t, err)
assert.Equal(t, expectedRunDetail, runDetail, "CreateRun stored invalid data in database")
@@ -554,7 +554,7 @@ func TestCreateRun_CreateWorkflowError(t *testing.T) {
store := NewFakeClientManagerOrFatal(util.NewFakeTimeForEpoch())
defer store.Close()
manager := NewResourceManager(store)
- manager.workflowClient = &FakeBadWorkflowClient{}
+ manager.argoClient = client.NewFakeArgoClientWithBadWorkflow()
apiRun := &api.Run{
Name: "run1",
PipelineSpec: &api.PipelineSpec{
@@ -612,7 +612,7 @@ func TestDeleteRun_CrdFailure(t *testing.T) {
store, manager, runDetail := initWithOneTimeRun(t)
defer store.Close()
- manager.workflowClient = &FakeBadWorkflowClient{}
+ manager.argoClient = client.NewFakeArgoClientWithBadWorkflow()
err := manager.DeleteRun(runDetail.UUID)
//assert.Equal(t, codes.Internal, err.(*util.UserError).ExternalStatusCode())
//assert.Contains(t, err.Error(), "some error")
@@ -678,7 +678,7 @@ func TestDeleteExperiment_CrdFailure(t *testing.T) {
store, manager, experiment := initWithExperiment(t)
defer store.Close()
- manager.workflowClient = &FakeBadWorkflowClient{}
+ manager.argoClient = client.NewFakeArgoClientWithBadWorkflow()
err := manager.DeleteExperiment(experiment.UUID)
assert.Nil(t, err)
}
@@ -704,7 +704,7 @@ func TestTerminateRun(t *testing.T) {
assert.Nil(t, err)
assert.Equal(t, "Terminating", actualRunDetail.Conditions)
- isTerminated, err := store.workflowClientFake.isTerminated(runDetail.Run.Name)
+ isTerminated, err := store.ArgoClientFake.IsTerminated(runDetail.Run.Name)
assert.Nil(t, err)
assert.True(t, isTerminated)
}
@@ -757,7 +757,7 @@ func TestRetryRun_FailedDeletePods(t *testing.T) {
store, manager, runDetail := initWithOneTimeFailedRun(t)
defer store.Close()
- manager.podClient = FakeBadPodClient{}
+ manager.k8sCoreClient = client.NewFakeKubernetesCoreClientWithBadPodClient()
err := manager.RetryRun(runDetail.UUID)
assert.NotNil(t, err)
assert.Contains(t, err.Error(), "failed to delete pod")
@@ -767,7 +767,7 @@ func TestRetryRun_UpdateAndCreateFailed(t *testing.T) {
store, manager, runDetail := initWithOneTimeFailedRun(t)
defer store.Close()
- manager.workflowClient = &FakeBadWorkflowClient{}
+ manager.argoClient = client.NewFakeArgoClientWithBadWorkflow()
err := manager.RetryRun(runDetail.UUID)
assert.NotNil(t, err)
assert.Contains(t, err.Error(), "Failed to create or update the run")
@@ -1291,7 +1291,7 @@ func TestReportWorkflowResource_WorkflowCompleted(t *testing.T) {
err := manager.ReportWorkflowResource(workflow)
assert.Nil(t, err)
- wf, err := store.workflowClientFake.Get(run.Run.Name, v1.GetOptions{})
+ wf, err := store.ArgoClientFake.Workflow("").Get(run.Run.Name, v1.GetOptions{})
assert.Nil(t, err)
assert.Equal(t, wf.Labels[util.LabelKeyWorkflowPersistedFinalState], "true")
}
@@ -1314,7 +1314,7 @@ func TestReportWorkflowResource_WorkflowCompleted_FinalStatePersisted(t *testing
func TestReportWorkflowResource_WorkflowCompleted_FinalStatePersisted_DeleteFailed(t *testing.T) {
store, manager, run := initWithOneTimeRun(t)
- manager.workflowClient = &FakeBadWorkflowClient{}
+ manager.argoClient = client.NewFakeArgoClientWithBadWorkflow()
defer store.Close()
// report workflow
workflow := util.NewWorkflow(&v1alpha1.Workflow{
diff --git a/backend/src/apiserver/resource/resource_manager_util.go b/backend/src/apiserver/resource/resource_manager_util.go
index 406e6b1cb9f..2ea2372350e 100644
--- a/backend/src/apiserver/resource/resource_manager_util.go
+++ b/backend/src/apiserver/resource/resource_manager_util.go
@@ -16,18 +16,19 @@ package resource
import (
"errors"
+ "regexp"
+ "strings"
+ "time"
+
wfv1 "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1"
"github.com/argoproj/argo/workflow/common"
api "github.com/kubeflow/pipelines/backend/api/go_client"
+ "github.com/kubeflow/pipelines/backend/src/apiserver/client"
"github.com/kubeflow/pipelines/backend/src/common/util"
scheduledworkflow "github.com/kubeflow/pipelines/backend/src/crd/pkg/apis/scheduledworkflow/v1beta1"
apierr "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/apis/meta/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
- "regexp"
- "strings"
- "time"
)
func toCRDTrigger(apiTrigger *api.Trigger) *scheduledworkflow.Trigger {
@@ -171,9 +172,9 @@ func formulateRetryWorkflow(wf *util.Workflow) (*util.Workflow, []string, error)
return util.NewWorkflow(newWF), podsToDelete, nil
}
-func deletePods(podClient corev1.PodInterface, podsToDelete []string) error {
+func deletePods(k8sCoreClient client.KubernetesCoreInterface, podsToDelete []string, namespace string) error {
for _, podId := range podsToDelete {
- err := podClient.Delete(podId, &metav1.DeleteOptions{})
+ err := k8sCoreClient.PodClient(namespace).Delete(podId, &metav1.DeleteOptions{})
if err != nil && !apierr.IsNotFound(err) {
return util.NewInternalServerError(err, "Failed to delete pods.")
}
diff --git a/backend/src/apiserver/resource/resource_manager_util_test.go b/backend/src/apiserver/resource/resource_manager_util_test.go
index 8d94f2ac337..7623351fa2a 100644
--- a/backend/src/apiserver/resource/resource_manager_util_test.go
+++ b/backend/src/apiserver/resource/resource_manager_util_test.go
@@ -235,16 +235,16 @@ status:
`
var workflow util.Workflow
- err := yaml.Unmarshal([]byte( wf), &workflow)
+ err := yaml.Unmarshal([]byte(wf), &workflow)
assert.Nil(t, err)
newWf, nodes, err := formulateRetryWorkflow(&workflow)
newWfString, err := yaml.Marshal(newWf)
assert.Nil(t, err)
- assert.Equal(t, []string{"resubmit-hl9ft-3879090716"},nodes)
+ assert.Equal(t, []string{"resubmit-hl9ft-3879090716"}, nodes)
expectedNewWfString :=
- `apiVersion: argoproj.io/v1alpha1
+ `apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
creationTimestamp: "2019-08-02T07:15:14Z"
@@ -319,7 +319,5 @@ status:
startedAt: "2019-08-02T07:15:14Z"
`
- assert.Equal(t, expectedNewWfString,string(newWfString))
+ assert.Equal(t, expectedNewWfString, string(newWfString))
}
-
-
diff --git a/backend/src/apiserver/server/BUILD.bazel b/backend/src/apiserver/server/BUILD.bazel
index 83c307b0b8a..2ea70466472 100644
--- a/backend/src/apiserver/server/BUILD.bazel
+++ b/backend/src/apiserver/server/BUILD.bazel
@@ -20,6 +20,7 @@ go_library(
visibility = ["//visibility:public"],
deps = [
"//backend/api:go_default_library",
+ "//backend/src/apiserver/client:go_default_library",
"//backend/src/apiserver/common:go_default_library",
"//backend/src/apiserver/list:go_default_library",
"//backend/src/apiserver/model:go_default_library",
@@ -29,12 +30,14 @@ go_library(
"@com_github_argoproj_argo//pkg/apis/workflow/v1alpha1:go_default_library",
"@com_github_golang_glog//:go_default_library",
"@com_github_golang_protobuf//jsonpb:go_default_library_gen",
+ "@com_github_pkg_errors//:go_default_library",
"@com_github_robfig_cron//:go_default_library",
"@com_github_stretchr_testify//assert:go_default_library",
"@io_bazel_rules_go//proto/wkt:empty_go_proto",
"@io_bazel_rules_go//proto/wkt:timestamp_go_proto",
"@io_k8s_apimachinery//pkg/apis/meta/v1:go_default_library",
"@org_golang_google_grpc//codes:go_default_library",
+ "@org_golang_google_grpc//metadata:go_default_library",
],
)
@@ -66,10 +69,12 @@ go_test(
"//backend/src/crd/pkg/apis/scheduledworkflow/v1beta1:go_default_library",
"@com_github_argoproj_argo//pkg/apis/workflow/v1alpha1:go_default_library",
"@com_github_google_go_cmp//cmp:go_default_library",
+ "@com_github_spf13_viper//:go_default_library",
"@com_github_stretchr_testify//assert:go_default_library",
"@io_bazel_rules_go//proto/wkt:timestamp_go_proto",
"@io_k8s_apimachinery//pkg/apis/meta/v1:go_default_library",
"@io_k8s_apimachinery//pkg/types:go_default_library",
"@org_golang_google_grpc//codes:go_default_library",
+ "@org_golang_google_grpc//metadata:go_default_library",
],
)
diff --git a/backend/src/apiserver/server/api_converter.go b/backend/src/apiserver/server/api_converter.go
index 1a2074c424d..2eb38f52cf5 100644
--- a/backend/src/apiserver/server/api_converter.go
+++ b/backend/src/apiserver/server/api_converter.go
@@ -268,6 +268,8 @@ func toApiResourceType(modelType common.ResourceType) api.ResourceType {
return api.ResourceType_JOB
case common.PipelineVersion:
return api.ResourceType_PIPELINE_VERSION
+ case common.Namespace:
+ return api.ResourceType_NAMESPACE
default:
return api.ResourceType_UNKNOWN_RESOURCE_TYPE
}
diff --git a/backend/src/apiserver/server/job_server.go b/backend/src/apiserver/server/job_server.go
index 759826c80c1..c264afd781f 100644
--- a/backend/src/apiserver/server/job_server.go
+++ b/backend/src/apiserver/server/job_server.go
@@ -19,9 +19,11 @@ import (
"github.com/golang/protobuf/ptypes/empty"
api "github.com/kubeflow/pipelines/backend/api/go_client"
+ "github.com/kubeflow/pipelines/backend/src/apiserver/common"
"github.com/kubeflow/pipelines/backend/src/apiserver/model"
"github.com/kubeflow/pipelines/backend/src/apiserver/resource"
"github.com/kubeflow/pipelines/backend/src/common/util"
+ "github.com/pkg/errors"
"github.com/robfig/cron"
)
@@ -30,6 +32,9 @@ type JobServer struct {
}
func (s *JobServer) CreateJob(ctx context.Context, request *api.CreateJobRequest) (*api.Job, error) {
+ if common.IsMultiUserMode() == true {
+ return nil, util.NewBadRequestError(errors.New("Job APIs are temporarily disabled in the multi-user mode until it is fully ready."), "Job APIs are temporarily disabled in the multi-user mode until it is fully ready.")
+ }
err := s.validateCreateJobRequest(request)
if err != nil {
return nil, err
@@ -76,6 +81,9 @@ func (s *JobServer) DisableJob(ctx context.Context, request *api.DisableJobReque
}
func (s *JobServer) DeleteJob(ctx context.Context, request *api.DeleteJobRequest) (*empty.Empty, error) {
+ if common.IsMultiUserMode() == true {
+ return nil, util.NewBadRequestError(errors.New("Job APIs are temporarily disabled in the multi-user mode until it is fully ready."), "Job APIs are temporarily disabled in the multi-user mode until it is fully ready.")
+ }
err := s.resourceManager.DeleteJob(request.Id)
if err != nil {
return nil, err
diff --git a/backend/src/apiserver/server/pipeline_upload_server.go b/backend/src/apiserver/server/pipeline_upload_server.go
index 001da385279..6ec06a27a7a 100644
--- a/backend/src/apiserver/server/pipeline_upload_server.go
+++ b/backend/src/apiserver/server/pipeline_upload_server.go
@@ -18,6 +18,7 @@ import (
"encoding/json"
"fmt"
"net/http"
+ "net/url"
"time"
"github.com/golang/glog"
@@ -28,8 +29,9 @@ import (
// These are valid conditions of a ScheduledWorkflow.
const (
- FormFileKey = "uploadfile"
- NameQueryStringKey = "name"
+ FormFileKey = "uploadfile"
+ NameQueryStringKey = "name"
+ DescriptionQueryStringKey = "description"
)
type PipelineUploadServer struct {
@@ -63,7 +65,13 @@ func (s *PipelineUploadServer) UploadPipeline(w http.ResponseWriter, r *http.Req
s.writeErrorToResponse(w, http.StatusBadRequest, util.Wrap(err, "Invalid pipeline name."))
return
}
- newPipeline, err := s.resourceManager.CreatePipeline(pipelineName, "", pipelineFile)
+ // We don't set a max length for pipeline description here, since in our DB the description type is longtext.
+ pipelineDescription, err := url.QueryUnescape(r.URL.Query().Get(DescriptionQueryStringKey))
+ if err != nil {
+ s.writeErrorToResponse(w, http.StatusBadRequest, util.Wrap(err, "Error read pipeline description."))
+ return
+ }
+ newPipeline, err := s.resourceManager.CreatePipeline(pipelineName, pipelineDescription, pipelineFile)
if err != nil {
s.writeErrorToResponse(w, http.StatusInternalServerError, util.Wrap(err, "Error creating pipeline"))
return
diff --git a/backend/src/apiserver/server/pipeline_upload_server_test.go b/backend/src/apiserver/server/pipeline_upload_server_test.go
index 2cf127d5371..68beb82c395 100644
--- a/backend/src/apiserver/server/pipeline_upload_server_test.go
+++ b/backend/src/apiserver/server/pipeline_upload_server_test.go
@@ -225,3 +225,53 @@ func TestUploadPipeline_FileNameTooLong(t *testing.T) {
assert.Equal(t, 400, rr.Code)
assert.Contains(t, string(rr.Body.Bytes()), "Pipeline name too long")
}
+
+func TestUploadPipeline_SpecifyFileDescription(t *testing.T) {
+ clientManager := resource.NewFakeClientManagerOrFatal(util.NewFakeTimeForEpoch())
+ resourceManager := resource.NewResourceManager(clientManager)
+ server := PipelineUploadServer{resourceManager: resourceManager}
+ b := &bytes.Buffer{}
+ w := multipart.NewWriter(b)
+ part, _ := w.CreateFormFile("uploadfile", "hello-world.yaml")
+ io.Copy(part, bytes.NewBufferString("apiVersion: argoproj.io/v1alpha1\nkind: Workflow"))
+ w.Close()
+ req, _ := http.NewRequest("POST", fmt.Sprintf("/apis/v1beta1/pipelines/upload?name=%s&description=%s", url.PathEscape("foo bar"), url.PathEscape("description of foo bar")), bytes.NewReader(b.Bytes()))
+ req.Header.Set("Content-Type", w.FormDataContentType())
+
+ rr := httptest.NewRecorder()
+ handler := http.HandlerFunc(server.UploadPipeline)
+ handler.ServeHTTP(rr, req)
+ assert.Equal(t, 200, rr.Code)
+
+ // Verify stored in object store
+ template, err := clientManager.ObjectStore().GetFile(storage.CreatePipelinePath(resource.DefaultFakeUUID))
+ assert.Nil(t, err)
+ assert.NotNil(t, template)
+
+ opts, err := list.NewOptions(&model.Pipeline{}, 2, "", nil)
+ assert.Nil(t, err)
+ // Verify metadata in db
+ pkgsExpect := []*model.Pipeline{
+ {
+ UUID: resource.DefaultFakeUUID,
+ CreatedAtInSec: 1,
+ Name: "foo bar",
+ Parameters: "[]",
+ Status: model.PipelineReady,
+ DefaultVersionId: resource.DefaultFakeUUID,
+ DefaultVersion: &model.PipelineVersion{
+ UUID: resource.DefaultFakeUUID,
+ CreatedAtInSec: 1,
+ Name: "foo bar",
+ Parameters: "[]",
+ Status: model.PipelineVersionReady,
+ PipelineId: resource.DefaultFakeUUID,
+ },
+ Description: "description of foo bar",
+ }}
+ pkg, total_size, str, err := clientManager.PipelineStore().ListPipelines(opts)
+ assert.Nil(t, err)
+ assert.Equal(t, 1, total_size)
+ assert.Equal(t, str, "")
+ assert.Equal(t, pkgsExpect, pkg)
+}
diff --git a/backend/src/apiserver/server/run_server.go b/backend/src/apiserver/server/run_server.go
index 9d09c23dbdb..ef4541fff41 100644
--- a/backend/src/apiserver/server/run_server.go
+++ b/backend/src/apiserver/server/run_server.go
@@ -19,9 +19,11 @@ import (
"github.com/golang/protobuf/ptypes/empty"
api "github.com/kubeflow/pipelines/backend/api/go_client"
+ "github.com/kubeflow/pipelines/backend/src/apiserver/common"
"github.com/kubeflow/pipelines/backend/src/apiserver/model"
"github.com/kubeflow/pipelines/backend/src/apiserver/resource"
"github.com/kubeflow/pipelines/backend/src/common/util"
+ "github.com/pkg/errors"
)
type RunServer struct {
@@ -33,6 +35,11 @@ func (s *RunServer) CreateRun(ctx context.Context, request *api.CreateRunRequest
if err != nil {
return nil, util.Wrap(err, "Validate create run request failed.")
}
+ err = CanAccessNamespaceInResourceReferences(s.resourceManager, ctx, request.Run.ResourceReferences)
+ if err != nil {
+ return nil, util.Wrap(err, "Failed to authorize the requests.")
+ }
+
run, err := s.resourceManager.CreateRun(request.Run)
if err != nil {
return nil, util.Wrap(err, "Failed to create a new run.")
@@ -67,7 +74,11 @@ func (s *RunServer) ListRuns(ctx context.Context, request *api.ListRunsRequest)
}
func (s *RunServer) ArchiveRun(ctx context.Context, request *api.ArchiveRunRequest) (*empty.Empty, error) {
- err := s.resourceManager.ArchiveRun(request.Id)
+ err := s.canAccessRun(ctx, request.Id)
+ if err != nil {
+ return nil, util.Wrap(err, "Failed to authorize the requests.")
+ }
+ err = s.resourceManager.ArchiveRun(request.Id)
if err != nil {
return nil, err
}
@@ -75,7 +86,11 @@ func (s *RunServer) ArchiveRun(ctx context.Context, request *api.ArchiveRunReque
}
func (s *RunServer) UnarchiveRun(ctx context.Context, request *api.UnarchiveRunRequest) (*empty.Empty, error) {
- err := s.resourceManager.UnarchiveRun(request.Id)
+ err := s.canAccessRun(ctx, request.Id)
+ if err != nil {
+ return nil, util.Wrap(err, "Failed to authorize the requests.")
+ }
+ err = s.resourceManager.UnarchiveRun(request.Id)
if err != nil {
return nil, err
}
@@ -83,7 +98,11 @@ func (s *RunServer) UnarchiveRun(ctx context.Context, request *api.UnarchiveRunR
}
func (s *RunServer) DeleteRun(ctx context.Context, request *api.DeleteRunRequest) (*empty.Empty, error) {
- err := s.resourceManager.DeleteRun(request.Id)
+ err := s.canAccessRun(ctx, request.Id)
+ if err != nil {
+ return nil, util.Wrap(err, "Failed to authorize the requests.")
+ }
+ err = s.resourceManager.DeleteRun(request.Id)
if err != nil {
return nil, err
}
@@ -138,7 +157,11 @@ func (s *RunServer) validateCreateRunRequest(request *api.CreateRunRequest) erro
}
func (s *RunServer) TerminateRun(ctx context.Context, request *api.TerminateRunRequest) (*empty.Empty, error) {
- err := s.resourceManager.TerminateRun(request.RunId)
+ err := s.canAccessRun(ctx, request.RunId)
+ if err != nil {
+ return nil, util.Wrap(err, "Failed to authorize the requests.")
+ }
+ err = s.resourceManager.TerminateRun(request.RunId)
if err != nil {
return nil, err
}
@@ -146,7 +169,11 @@ func (s *RunServer) TerminateRun(ctx context.Context, request *api.TerminateRunR
}
func (s *RunServer) RetryRun(ctx context.Context, request *api.RetryRunRequest) (*empty.Empty, error) {
- err := s.resourceManager.RetryRun(request.RunId)
+ err := s.canAccessRun(ctx, request.RunId)
+ if err != nil {
+ return nil, util.Wrap(err, "Failed to authorize the requests.")
+ }
+ err = s.resourceManager.RetryRun(request.RunId)
if err != nil {
return nil, err
}
@@ -154,6 +181,26 @@ func (s *RunServer) RetryRun(ctx context.Context, request *api.RetryRunRequest)
}
+func (s *RunServer) canAccessRun(ctx context.Context, runId string) error {
+ if common.IsMultiUserMode() == false {
+ // Skip authz if not multi-user mode.
+ return nil
+ }
+ namespace, err := s.resourceManager.GetNamespaceFromRunID(runId)
+ if err != nil {
+ return util.Wrap(err, "Failed to authorize with the run Id.")
+ }
+ if len(namespace) == 0 {
+ return util.NewInternalServerError(errors.New("There is no namespace found"), "There is no namespace found")
+ }
+
+ err = isAuthorized(s.resourceManager, ctx, namespace)
+ if err != nil {
+ return util.Wrap(err, "Failed to authorize with API resource references")
+ }
+ return nil
+}
+
func NewRunServer(resourceManager *resource.ResourceManager) *RunServer {
return &RunServer{resourceManager: resourceManager}
}
diff --git a/backend/src/apiserver/server/run_server_test.go b/backend/src/apiserver/server/run_server_test.go
index 44dac875b13..a516ff1fcd9 100644
--- a/backend/src/apiserver/server/run_server_test.go
+++ b/backend/src/apiserver/server/run_server_test.go
@@ -7,9 +7,12 @@ import (
"github.com/argoproj/argo/pkg/apis/workflow/v1alpha1"
"github.com/golang/protobuf/ptypes/timestamp"
api "github.com/kubeflow/pipelines/backend/api/go_client"
+ "github.com/kubeflow/pipelines/backend/src/apiserver/common"
"github.com/kubeflow/pipelines/backend/src/common/util"
+ "github.com/spf13/viper"
"github.com/stretchr/testify/assert"
"google.golang.org/grpc/codes"
+ "google.golang.org/grpc/metadata"
)
func TestCreateRun(t *testing.T) {
@@ -58,6 +61,22 @@ func TestCreateRun(t *testing.T) {
assert.Equal(t, expectedRunDetail, *runDetail)
}
+func TestCreateRun_Unauthorized(t *testing.T) {
+ clients, manager, _ := initWithExperiment_KFAM_Unauthorized(t)
+ defer clients.Close()
+ server := NewRunServer(manager)
+ run := &api.Run{
+ Name: "123",
+ ResourceReferences: validReference,
+ PipelineSpec: &api.PipelineSpec{
+ WorkflowManifest: testWorkflow.ToStringForStore(),
+ Parameters: []*api.Parameter{{Name: "param1", Value: "world"}},
+ },
+ }
+ _, err := server.CreateRun(nil, &api.CreateRunRequest{Run: run})
+ assert.Nil(t, err)
+}
+
func TestListRun(t *testing.T) {
clients, manager, experiment := initWithExperiment(t)
defer clients.Close()
@@ -302,3 +321,69 @@ func TestReportRunMetrics_PartialFailures(t *testing.T) {
}
assert.Equal(t, expectedResponse, response)
}
+
+func TestCanAccessRun_Unauthorized(t *testing.T) {
+ clients, manager, experiment := initWithExperiment_KFAM_Unauthorized(t)
+ defer clients.Close()
+ runServer := RunServer{resourceManager: manager}
+ viper.Set(common.MultiUserMode, "true")
+ md := metadata.New(map[string]string{common.GoogleIAPUserIdentityHeader: "accounts.google.com:user@google.com"})
+ ctx := metadata.NewIncomingContext(context.Background(), md)
+
+ apiRun := &api.Run{
+ Name: "run1",
+ PipelineSpec: &api.PipelineSpec{
+ WorkflowManifest: testWorkflow.ToStringForStore(),
+ Parameters: []*api.Parameter{
+ {Name: "param1", Value: "world"},
+ },
+ },
+ ResourceReferences: []*api.ResourceReference{
+ {
+ Key: &api.ResourceKey{Type: api.ResourceType_NAMESPACE, Id: "ns"},
+ Relationship: api.Relationship_OWNER,
+ },
+ {
+ Key: &api.ResourceKey{Type: api.ResourceType_EXPERIMENT, Id: experiment.UUID},
+ Relationship: api.Relationship_OWNER,
+ },
+ },
+ }
+ runDetail, _ := manager.CreateRun(apiRun)
+
+ err := runServer.canAccessRun(ctx, runDetail.UUID)
+ assert.NotNil(t, err)
+}
+
+func TestCanAccessRun_Authorized(t *testing.T) {
+ clients, manager, experiment := initWithExperiment(t)
+ defer clients.Close()
+ runServer := RunServer{resourceManager: manager}
+ viper.Set(common.MultiUserMode, "true")
+ md := metadata.New(map[string]string{common.GoogleIAPUserIdentityHeader: "accounts.google.com:user@google.com"})
+ ctx := metadata.NewIncomingContext(context.Background(), md)
+
+ apiRun := &api.Run{
+ Name: "run1",
+ PipelineSpec: &api.PipelineSpec{
+ WorkflowManifest: testWorkflow.ToStringForStore(),
+ Parameters: []*api.Parameter{
+ {Name: "param1", Value: "world"},
+ },
+ },
+ ResourceReferences: []*api.ResourceReference{
+ {
+ Key: &api.ResourceKey{Type: api.ResourceType_NAMESPACE, Id: "ns"},
+ Relationship: api.Relationship_OWNER,
+ },
+ {
+ Key: &api.ResourceKey{Type: api.ResourceType_EXPERIMENT, Id: experiment.UUID},
+ Relationship: api.Relationship_OWNER,
+ },
+ },
+ }
+ runDetail, _ := manager.CreateRun(apiRun)
+
+ err := runServer.canAccessRun(ctx, runDetail.UUID)
+ assert.Nil(t, err)
+}
diff --git a/backend/src/apiserver/server/test_util.go b/backend/src/apiserver/server/test_util.go
index 890e992ce97..a130f1e80c8 100644
--- a/backend/src/apiserver/server/test_util.go
+++ b/backend/src/apiserver/server/test_util.go
@@ -19,6 +19,7 @@ import (
"github.com/argoproj/argo/pkg/apis/workflow/v1alpha1"
api "github.com/kubeflow/pipelines/backend/api/go_client"
+ "github.com/kubeflow/pipelines/backend/src/apiserver/client"
"github.com/kubeflow/pipelines/backend/src/apiserver/model"
"github.com/kubeflow/pipelines/backend/src/apiserver/resource"
"github.com/kubeflow/pipelines/backend/src/common/util"
@@ -73,6 +74,16 @@ func initWithExperiment(t *testing.T) (*resource.FakeClientManager, *resource.Re
return clientManager, resourceManager, experiment
}
+func initWithExperiment_KFAM_Unauthorized(t *testing.T) (*resource.FakeClientManager, *resource.ResourceManager, *model.Experiment) {
+ clientManager := resource.NewFakeClientManagerOrFatal(util.NewFakeTimeForEpoch())
+ clientManager.KfamClientFake = client.NewFakeKFAMClientUnauthorized()
+ resourceManager := resource.NewResourceManager(clientManager)
+ experiment := &model.Experiment{Name: "123"}
+ experiment, err := resourceManager.CreateExperiment(experiment)
+ assert.Nil(t, err)
+ return clientManager, resourceManager, experiment
+}
+
func initWithExperimentAndPipelineVersion(t *testing.T) (*resource.FakeClientManager, *resource.ResourceManager, *model.Experiment) {
clientManager := resource.NewFakeClientManagerOrFatal(util.NewFakeTimeForEpoch())
resourceManager := resource.NewResourceManager(clientManager)
diff --git a/backend/src/apiserver/server/util.go b/backend/src/apiserver/server/util.go
index bffd1d52960..94938693aab 100644
--- a/backend/src/apiserver/server/util.go
+++ b/backend/src/apiserver/server/util.go
@@ -6,15 +6,21 @@ import (
"bufio"
"bytes"
"compress/gzip"
+ "context"
"encoding/json"
-
- api "github.com/kubeflow/pipelines/backend/api/go_client"
- "github.com/kubeflow/pipelines/backend/src/apiserver/resource"
- "github.com/kubeflow/pipelines/backend/src/common/util"
"io"
"io/ioutil"
"net/url"
+ "strconv"
"strings"
+
+ "github.com/golang/glog"
+ api "github.com/kubeflow/pipelines/backend/api/go_client"
+ "github.com/kubeflow/pipelines/backend/src/apiserver/common"
+ "github.com/kubeflow/pipelines/backend/src/apiserver/resource"
+ "github.com/kubeflow/pipelines/backend/src/common/util"
+ "github.com/pkg/errors"
+ "google.golang.org/grpc/metadata"
)
// These are valid conditions of a ScheduledWorkflow.
@@ -268,3 +274,69 @@ func CheckPipelineVersionReference(resourceManager *resource.ResourceManager, re
return &pipelineVersionId, nil
}
+
+func getUserIdentity(ctx context.Context) (string, error) {
+ if ctx == nil {
+ return "", util.NewBadRequestError(errors.New("Request error: context is nil"), "Request error: context is nil.")
+ }
+ md, _ := metadata.FromIncomingContext(ctx)
+ // If the request header contains the user identity, requests are authorized
+ // based on the namespace field in the request.
+ if userIdentityHeader, ok := md[common.GoogleIAPUserIdentityHeader]; ok {
+ if len(userIdentityHeader) != 1 {
+ return "", util.NewBadRequestError(errors.New("Request header error: unexpected number of user identity header. Expect 1 got "+strconv.Itoa(len(userIdentityHeader))),
+ "Request header error: unexpected number of user identity header. Expect 1 got "+strconv.Itoa(len(userIdentityHeader)))
+ }
+ userIdentityHeaderFields := strings.Split(userIdentityHeader[0], ":")
+ if len(userIdentityHeaderFields) != 2 {
+ return "", util.NewBadRequestError(errors.New("Request header error: user identity value is incorrectly formatted"),
+ "Request header error: user identity value is incorrectly formatted")
+ }
+ return userIdentityHeaderFields[1], nil
+ }
+ return "", util.NewBadRequestError(errors.New("Request header error: there is no user identity header."), "Request header error: there is no user identity header.")
+}
+
+func CanAccessNamespaceInResourceReferences(resourceManager *resource.ResourceManager, ctx context.Context, resourceRefs []*api.ResourceReference) error {
+ if common.IsMultiUserMode() == false {
+ // Skip authz if not multi-user mode.
+ return nil
+ }
+
+ namespace := common.GetNamespaceFromAPIResourceReferences(resourceRefs)
+ if len(namespace) == 0 {
+ return util.NewBadRequestError(errors.New("Namespace required in Kubeflow deployment for authorization."), "Namespace required in Kubeflow deployment for authorization.")
+ }
+ err := isAuthorized(resourceManager, ctx, namespace)
+ if err != nil {
+ return util.Wrap(err, "Failed to authorize with API resource references")
+ }
+ return nil
+}
+
+// isAuthorized verified whether the user identity, which is contains in the context object,
+// can access the target namespace. If the returned error is nil, the authorization passes.
+// Otherwise, Authorization fails with a non-nil error.
+func isAuthorized(resourceManager *resource.ResourceManager, ctx context.Context, namespace string) error {
+ userIdentity, err := getUserIdentity(ctx)
+ if err != nil {
+ return util.Wrap(err, "Bad request.")
+ }
+
+ if len(userIdentity) == 0 {
+ return util.NewBadRequestError(errors.New("Request header error: user identity is empty."), "Request header error: user identity is empty.")
+ }
+
+ isAuthorized, err := resourceManager.IsRequestAuthorized(userIdentity, namespace)
+ if err != nil {
+ return util.Wrap(err, "Authorization failure.")
+ }
+
+ if isAuthorized == false {
+ glog.Infof("Unauthorized access for %s to namespace %s", userIdentity, namespace)
+ return util.NewBadRequestError(errors.New("Unauthorized access for "+userIdentity+" to namespace "+namespace), "Unauthorized access for "+userIdentity+" to namespace "+namespace)
+ }
+
+ glog.Infof("Authorized user %s in namespace %s", userIdentity, namespace)
+ return nil
+}
diff --git a/backend/src/apiserver/server/util_test.go b/backend/src/apiserver/server/util_test.go
index e76473202d1..cb9515d3a91 100644
--- a/backend/src/apiserver/server/util_test.go
+++ b/backend/src/apiserver/server/util_test.go
@@ -1,15 +1,19 @@
package server
import (
+ "context"
"io/ioutil"
"os"
"strings"
"testing"
api "github.com/kubeflow/pipelines/backend/api/go_client"
+ "github.com/kubeflow/pipelines/backend/src/apiserver/common"
"github.com/kubeflow/pipelines/backend/src/apiserver/resource"
"github.com/kubeflow/pipelines/backend/src/common/util"
+ "github.com/spf13/viper"
"github.com/stretchr/testify/assert"
+ "google.golang.org/grpc/metadata"
)
func TestGetPipelineName_QueryStringNotEmpty(t *testing.T) {
@@ -332,3 +336,45 @@ func TestValidatePipelineSpec_ParameterTooLong(t *testing.T) {
assert.NotNil(t, err)
assert.Contains(t, err.Error(), "The input parameter length exceed maximum size")
}
+
+func TestGetUserIdentity(t *testing.T) {
+ md := metadata.New(map[string]string{common.GoogleIAPUserIdentityHeader: "accounts.google.com:user@google.com"})
+ ctx := metadata.NewIncomingContext(context.Background(), md)
+ userIdentity, err := getUserIdentity(ctx)
+ assert.Nil(t, err)
+ assert.Equal(t, "user@google.com", userIdentity)
+}
+
+func TestCanAccessNamespaceInResourceReferencesUnauthorized(t *testing.T) {
+ clients, manager, _ := initWithExperiment_KFAM_Unauthorized(t)
+ defer clients.Close()
+ viper.Set(common.MultiUserMode, "true")
+ md := metadata.New(map[string]string{common.GoogleIAPUserIdentityHeader: "accounts.google.com:user@google.com"})
+ ctx := metadata.NewIncomingContext(context.Background(), md)
+ references := []*api.ResourceReference{
+ {
+ Key: &api.ResourceKey{
+ Type: api.ResourceType_NAMESPACE, Id: "ns"},
+ Relationship: api.Relationship_OWNER,
+ },
+ }
+ err := CanAccessNamespaceInResourceReferences(manager, ctx, references)
+ assert.NotNil(t, err)
+}
+
+func TestCanAccessNamespaceInResourceReferences_Authorized(t *testing.T) {
+ clients, manager, _ := initWithExperiment(t)
+ defer clients.Close()
+ viper.Set(common.MultiUserMode, "true")
+ md := metadata.New(map[string]string{common.GoogleIAPUserIdentityHeader: "accounts.google.com:user@google.com"})
+ ctx := metadata.NewIncomingContext(context.Background(), md)
+ references := []*api.ResourceReference{
+ {
+ Key: &api.ResourceKey{
+ Type: api.ResourceType_NAMESPACE, Id: "ns"},
+ Relationship: api.Relationship_OWNER,
+ },
+ }
+ err := CanAccessNamespaceInResourceReferences(manager, ctx, references)
+ assert.Nil(t, err)
+}
diff --git a/backend/src/apiserver/server/visualization_server.go b/backend/src/apiserver/server/visualization_server.go
index abaf560f7ad..36fdeb254df 100644
--- a/backend/src/apiserver/server/visualization_server.go
+++ b/backend/src/apiserver/server/visualization_server.go
@@ -7,11 +7,14 @@ import (
"github.com/golang/glog"
"github.com/kubeflow/pipelines/backend/api/go_client"
"github.com/kubeflow/pipelines/backend/src/apiserver/resource"
+ "github.com/kubeflow/pipelines/backend/src/apiserver/common"
"github.com/kubeflow/pipelines/backend/src/common/util"
"io/ioutil"
"net/http"
"net/url"
"strings"
+
+ "github.com/pkg/errors"
)
type VisualizationServer struct {
@@ -20,6 +23,9 @@ type VisualizationServer struct {
}
func (s *VisualizationServer) CreateVisualization(ctx context.Context, request *go_client.CreateVisualizationRequest) (*go_client.Visualization, error) {
+ if common.IsMultiUserMode() == true {
+ return nil, util.NewBadRequestError(errors.New("Visualization APIs are temporarily disabled in the multi-user mode until it is fully ready."), "Visualization APIs are temporarily disabled in the multi-user mode until it is fully ready.")
+ }
if err := s.validateCreateVisualizationRequest(request); err != nil {
return nil, err
}
diff --git a/backend/src/apiserver/storage/resource_reference_store.go b/backend/src/apiserver/storage/resource_reference_store.go
index 3a43dbfa32e..79a65817b0f 100644
--- a/backend/src/apiserver/storage/resource_reference_store.go
+++ b/backend/src/apiserver/storage/resource_reference_store.go
@@ -64,6 +64,10 @@ func (s *ResourceReferenceStore) checkReferenceExist(tx *sql.Tx, referenceId str
selectBuilder = sq.Select("1").From("experiments").Where(sq.Eq{"uuid": referenceId})
case common.PipelineVersion:
selectBuilder = sq.Select("1").From("pipeline_versions").Where(sq.Eq{"uuid": referenceId})
+ case common.Namespace:
+ // This function is called to check the data validity when the data are transformed according to the DB schema.
+ // Since there is not a separate table to store the namespace data, thus always returning true.
+ return true
default:
return false
}
diff --git a/backend/src/common/util/consts.go b/backend/src/common/util/consts.go
index e51dc4fa94b..c1253a893e1 100644
--- a/backend/src/common/util/consts.go
+++ b/backend/src/common/util/consts.go
@@ -43,4 +43,8 @@ const (
LabelKeyWorkflowRunId = "pipeline/runid"
LabelKeyWorkflowPersistedFinalState = "pipeline/persistedFinalState"
+
+ AnnotationKeyIstioSidecarInject = "sidecar.istio.io/inject"
+ AnnotationValueIstioSidecarInjectEnabled = "true"
+ AnnotationValueIstioSidecarInjectDisabled = "false"
)
diff --git a/backend/src/common/util/workflow.go b/backend/src/common/util/workflow.go
index dc5acb967d6..10ac6def279 100644
--- a/backend/src/common/util/workflow.go
+++ b/backend/src/common/util/workflow.go
@@ -186,6 +186,19 @@ func (w *Workflow) OverrideName(name string) {
w.Name = name
}
+// SetAnnotations sets annotations on all templates in a Workflow
+func (w *Workflow) SetAnnotationsToAllTemplates(key string, value string) {
+ if len(w.Spec.Templates) == 0 {
+ return
+ }
+ for index, _ := range w.Spec.Templates {
+ if w.Spec.Templates[index].Metadata.Annotations == nil {
+ w.Spec.Templates[index].Metadata.Annotations = make(map[string]string)
+ }
+ w.Spec.Templates[index].Metadata.Annotations[key] = value
+ }
+}
+
// SetOwnerReferences sets owner references on a Workflow.
func (w *Workflow) SetOwnerReferences(schedule *swfapi.ScheduledWorkflow) {
w.OwnerReferences = []metav1.OwnerReference{
diff --git a/backend/src/crd/controller/viewer/reconciler/reconciler.go b/backend/src/crd/controller/viewer/reconciler/reconciler.go
index d7d67771426..50aec085494 100644
--- a/backend/src/crd/controller/viewer/reconciler/reconciler.go
+++ b/backend/src/crd/controller/viewer/reconciler/reconciler.go
@@ -24,6 +24,7 @@ package reconciler
import (
"context"
"fmt"
+ "strings"
"github.com/golang/glog"
viewerV1beta1 "github.com/kubeflow/pipelines/backend/src/crd/pkg/apis/viewer/v1beta1"
@@ -180,6 +181,11 @@ func setPodSpecForTensorboard(view *viewerV1beta1.Viewer, s *corev1.PodSpec) {
// when https://github.com/kubeflow/pipelines/issues/2514 is done
// "--bind_all",
}
+
+ if !strings.HasPrefix(view.Spec.TensorboardSpec.TensorflowImage, `tensorflow/tensorflow:1.`) {
+ c.Args = append(c.Args, "--bind_all")
+ }
+
c.Ports = []corev1.ContainerPort{
corev1.ContainerPort{ContainerPort: viewerTargetPort},
}
@@ -282,5 +288,5 @@ func (r *Reconciler) maybeDeleteOldestViewer(t viewerV1beta1.ViewerType, namespa
}
}
- return r.Client.Delete(context.Background(), oldest, nil)
+ return r.Client.Delete(context.Background(), oldest)
}
diff --git a/backend/src/crd/controller/viewer/reconciler/reconciler_test.go b/backend/src/crd/controller/viewer/reconciler/reconciler_test.go
index f425a2dd210..86a1b2bf5fd 100644
--- a/backend/src/crd/controller/viewer/reconciler/reconciler_test.go
+++ b/backend/src/crd/controller/viewer/reconciler/reconciler_test.go
@@ -181,7 +181,8 @@ func TestReconcile_EachViewerCreatesADeployment(t *testing.T) {
Args: []string{
"tensorboard",
"--logdir=gs://tensorboard/logdir",
- "--path_prefix=/tensorboard/viewer-123/"},
+ "--path_prefix=/tensorboard/viewer-123/",
+ "--bind_all"},
Ports: []corev1.ContainerPort{{ContainerPort: 6006}},
}}}}}}}
@@ -279,7 +280,8 @@ func TestReconcile_ViewerUsesSpecifiedVolumeMountsForDeployment(t *testing.T) {
Args: []string{
"tensorboard",
"--logdir=gs://tensorboard/logdir",
- "--path_prefix=/tensorboard/viewer-123/"},
+ "--path_prefix=/tensorboard/viewer-123/",
+ "--bind_all"},
Ports: []corev1.ContainerPort{{ContainerPort: 6006}},
VolumeMounts: []v1.VolumeMount{
{Name: "/volume-mount-name", MountPath: "/mount/path"},
diff --git a/components/OWNERS b/components/OWNERS
index d4161ec38bb..3eb413c808e 100644
--- a/components/OWNERS
+++ b/components/OWNERS
@@ -2,8 +2,10 @@ approvers:
- Ark-kun
- gaoning777
- hongye-sun
+ - numerology
reviewers:
- Ark-kun
- gaoning777
- hongye-sun
+ - numerology
- animeshsingh
diff --git a/components/filesystem/get_file/component.yaml b/components/filesystem/get_file/component.yaml
new file mode 100644
index 00000000000..f524751fc48
--- /dev/null
+++ b/components/filesystem/get_file/component.yaml
@@ -0,0 +1,20 @@
+name: Get file
+description: Get file from directory.
+inputs:
+- {name: Directory, type: Directory}
+- {name: Subpath, type: String}
+outputs:
+- {name: File}
+implementation:
+ container:
+ image: alpine
+ command:
+ - sh
+ - -ex
+ - -c
+ - |
+ mkdir -p "$(dirname "$2")"
+ cp -r "$0/$1" "$2"
+ - inputPath: Directory
+ - inputValue: Subpath
+ - outputPath: File
diff --git a/components/filesystem/get_subdirectory/component.yaml b/components/filesystem/get_subdirectory/component.yaml
new file mode 100644
index 00000000000..3f5fb34b862
--- /dev/null
+++ b/components/filesystem/get_subdirectory/component.yaml
@@ -0,0 +1,20 @@
+name: Get subdirectory
+description: Get subdirectory from directory.
+inputs:
+- {name: Directory, type: Directory}
+- {name: Subpath, type: String}
+outputs:
+- {name: Subdir, type: Directory}
+implementation:
+ container:
+ image: alpine
+ command:
+ - sh
+ - -ex
+ - -c
+ - |
+ mkdir -p "$(dirname "$2")"
+ cp -r "$0/$1" "$2"
+ - inputPath: Directory
+ - inputValue: Subpath
+ - outputPath: Subdir
diff --git a/components/filesystem/list_items/component.yaml b/components/filesystem/list_items/component.yaml
new file mode 100644
index 00000000000..7bb41b37a13
--- /dev/null
+++ b/components/filesystem/list_items/component.yaml
@@ -0,0 +1,19 @@
+name: List items
+description: Recursively list directory contents.
+inputs:
+- {name: Directory, type: Directory}
+outputs:
+- {name: Items}
+implementation:
+ container:
+ image: alpine
+ command:
+ - sh
+ - -ex
+ - -c
+ - |
+ mkdir -p "$(dirname "$1")"
+ #ls --almost-all --recursive "$0" > "$1"
+ ls -A -R "$0" > "$1"
+ - inputPath: Directory
+ - outputPath: Items
diff --git a/components/gcp/bigquery/query/README.md b/components/gcp/bigquery/query/README.md
index feae6a2f5aa..e59cf96f4e1 100644
--- a/components/gcp/bigquery/query/README.md
+++ b/components/gcp/bigquery/query/README.md
@@ -52,11 +52,7 @@ output_gcs_path | The path to the Cloud Storage bucket containing the query outp
To use the component, the following requirements must be met:
* The BigQuery API is enabled.
-* The component is running under a secret [Kubeflow user service account](https://www.kubeflow.org/docs/started/getting-started-gke/#gcp-service-accounts) in a Kubeflow Pipeline cluster. For example:
-
- ```
- bigquery_query_op(...).apply(gcp.use_gcp_secret('user-gcp-sa'))
- ```
+* The component can authenticate to GCP. Refer to [Authenticating Pipelines to GCP](https://www.kubeflow.org/docs/gke/authentication-pipelines/) for details.
* The Kubeflow user service account is a member of the `roles/bigquery.admin` role of the project.
* The Kubeflow user service account is a member of the `roles/storage.objectCreator `role of the Cloud Storage output bucket.
@@ -89,7 +85,7 @@ KFP_PACKAGE = 'https://storage.googleapis.com/ml-pipeline/release/0.1.14/kfp.tar
import kfp.components as comp
bigquery_query_op = comp.load_component_from_url(
- 'https://raw.githubusercontent.com/kubeflow/pipelines/4e7e6e866c1256e641b0c3effc55438e6e4b30f6/components/gcp/bigquery/query/component.yaml')
+ 'https://raw.githubusercontent.com/kubeflow/pipelines/ff116b6f1a0f0cdaafb64fcd04214c169045e6fc/components/gcp/bigquery/query/component.yaml')
help(bigquery_query_op)
```
@@ -125,7 +121,6 @@ OUTPUT_PATH = '{}/bigquery/query/questions.csv'.format(GCS_WORKING_DIR)
```python
import kfp.dsl as dsl
-import kfp.gcp as gcp
import json
@dsl.pipeline(
name='Bigquery query pipeline',
@@ -147,7 +142,7 @@ def pipeline(
table_id=table_id,
output_gcs_path=output_gcs_path,
dataset_location=dataset_location,
- job_config=job_config).apply(gcp.use_gcp_secret('user-gcp-sa'))
+ job_config=job_config)
```
#### Compile the pipeline
diff --git a/components/gcp/bigquery/query/component.yaml b/components/gcp/bigquery/query/component.yaml
index 3dc4e2386c2..f30ed69eea3 100644
--- a/components/gcp/bigquery/query/component.yaml
+++ b/components/gcp/bigquery/query/component.yaml
@@ -59,8 +59,9 @@ outputs:
type: UI metadata
implementation:
container:
- image: gcr.io/ml-pipeline/ml-pipeline-gcp:9ad7d7dd9776ce75a83712f5723db2ef93ba5c26
+ image: gcr.io/ml-pipeline/ml-pipeline-gcp:bae654dc5cf407359ac5f822d03833768739c4c1
args: [
+ --ui_metadata_path, {outputPath: MLPipeline UI metadata},
kfp_component.google.bigquery, query,
--query, {inputValue: query},
--project_id, {inputValue: project_id},
@@ -68,10 +69,9 @@ implementation:
--table_id, {inputValue: table_id},
--dataset_location, {inputValue: dataset_location},
--output_gcs_path, {inputValue: output_gcs_path},
- --job_config, {inputValue: job_config}
+ --job_config, {inputValue: job_config},
]
env:
KFP_POD_NAME: "{{pod.name}}"
fileOutputs:
output_gcs_path: /tmp/kfp/output/bigquery/query-output-path.txt
- MLPipeline UI metadata: /mlpipeline-ui-metadata.json
diff --git a/components/gcp/bigquery/query/sample.ipynb b/components/gcp/bigquery/query/sample.ipynb
index be750473926..b0bff35a79f 100644
--- a/components/gcp/bigquery/query/sample.ipynb
+++ b/components/gcp/bigquery/query/sample.ipynb
@@ -57,11 +57,7 @@
"To use the component, the following requirements must be met:\n",
"\n",
"* The BigQuery API is enabled.\n",
- "* The component is running under a secret [Kubeflow user service account](https://www.kubeflow.org/docs/started/getting-started-gke/#gcp-service-accounts) in a Kubeflow Pipeline cluster. For example:\n",
- "\n",
- " ```\n",
- " bigquery_query_op(...).apply(gcp.use_gcp_secret('user-gcp-sa'))\n",
- " ```\n",
+ "* The component can authenticate to use GCP APIs. Refer to [Authenticating Pipelines to GCP](https://www.kubeflow.org/docs/gke/authentication-pipelines/) for details.\n",
"* The Kubeflow user service account is a member of the `roles/bigquery.admin` role of the project.\n",
"* The Kubeflow user service account is a member of the `roles/storage.objectCreator `role of the Cloud Storage output bucket.\n",
"\n",
@@ -108,7 +104,7 @@
"import kfp.components as comp\n",
"\n",
"bigquery_query_op = comp.load_component_from_url(\n",
- " 'https://raw.githubusercontent.com/kubeflow/pipelines/4e7e6e866c1256e641b0c3effc55438e6e4b30f6/components/gcp/bigquery/query/component.yaml')\n",
+ " 'https://raw.githubusercontent.com/kubeflow/pipelines/ff116b6f1a0f0cdaafb64fcd04214c169045e6fc/components/gcp/bigquery/query/component.yaml')\n",
"help(bigquery_query_op)"
]
},
@@ -179,7 +175,6 @@
"outputs": [],
"source": [
"import kfp.dsl as dsl\n",
- "import kfp.gcp as gcp\n",
"import json\n",
"@dsl.pipeline(\n",
" name='Bigquery query pipeline',\n",
@@ -201,7 +196,7 @@
" table_id=table_id, \n",
" output_gcs_path=output_gcs_path, \n",
" dataset_location=dataset_location, \n",
- " job_config=job_config).apply(gcp.use_gcp_secret('user-gcp-sa'))"
+ " job_config=job_config)"
]
},
{
@@ -301,4 +296,4 @@
},
"nbformat": 4,
"nbformat_minor": 2
-}
+}
\ No newline at end of file
diff --git a/components/gcp/container/component_sdk/python/kfp_component/launcher/__main__.py b/components/gcp/container/component_sdk/python/kfp_component/launcher/__main__.py
index 7d9937935c9..04f7d4b8cd2 100644
--- a/components/gcp/container/component_sdk/python/kfp_component/launcher/__main__.py
+++ b/components/gcp/container/component_sdk/python/kfp_component/launcher/__main__.py
@@ -15,6 +15,7 @@
import argparse
import fire
import importlib
+import os
import sys
import logging
from .launcher import launch
@@ -26,8 +27,18 @@ def main():
description='Launch a python module or file.')
parser.add_argument('file_or_module', type=str,
help='Either a python file path or a module name.')
+ parser.add_argument(
+ '--ui_metadata_path',
+ type=str,
+ default='/mlpipeline-ui-metadata.json',
+ help='Path for the file where the mlpipeline-ui-metadata.json data '
+ 'should be written.')
parser.add_argument('args', nargs=argparse.REMAINDER)
args = parser.parse_args()
+
+ if args.ui_metadata_path:
+ os.environ['KFP_UI_METADATA_PATH'] = args.ui_metadata_path
+
launch(args.file_or_module, args.args)
if __name__ == '__main__':
diff --git a/components/gcp/container/component_sdk/python/setup.py b/components/gcp/container/component_sdk/python/setup.py
index d67684244de..2cfaa8dd806 100644
--- a/components/gcp/container/component_sdk/python/setup.py
+++ b/components/gcp/container/component_sdk/python/setup.py
@@ -15,7 +15,7 @@
from setuptools import setup
PACKAGE_NAME = 'kfp-component'
-VERSION = '0.1.36'
+VERSION = '0.1.40'
setup(
name=PACKAGE_NAME,
diff --git a/components/gcp/dataflow/launch_python/README.md b/components/gcp/dataflow/launch_python/README.md
index eef434b3363..bbd082f2684 100644
--- a/components/gcp/dataflow/launch_python/README.md
+++ b/components/gcp/dataflow/launch_python/README.md
@@ -63,14 +63,11 @@ job_id | The ID of the Cloud Dataflow job that is created.
## Cautions & requirements
To use the components, the following requirements must be met:
- Cloud Dataflow API is enabled.
-- The component is running under a secret Kubeflow user service account in a Kubeflow Pipelines cluster. For example:
- ```
- component_op(...).apply(gcp.use_gcp_secret('user-gcp-sa'))
- ```
-The Kubeflow user service account is a member of:
-- `roles/dataflow.developer` role of the project.
-- `roles/storage.objectViewer` role of the Cloud Storage Objects `python_file_path` and `requirements_file_path`.
-- `roles/storage.objectCreator` role of the Cloud Storage Object `staging_dir`.
+- The component can authenticate to GCP. Refer to [Authenticating Pipelines to GCP](https://www.kubeflow.org/docs/gke/authentication-pipelines/) for details.
+- The Kubeflow user service account is a member of:
+ - `roles/dataflow.developer` role of the project.
+ - `roles/storage.objectViewer` role of the Cloud Storage Objects `python_file_path` and `requirements_file_path`.
+ - `roles/storage.objectCreator` role of the Cloud Storage Object `staging_dir`.
## Detailed description
The component does several things during the execution:
@@ -94,7 +91,7 @@ The steps to use the component in a pipeline are:
```python
import kfp.components as comp
- dataflow_python_op = comp.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/4e7e6e866c1256e641b0c3effc55438e6e4b30f6/components/gcp/dataflow/launch_python/component.yaml')
+ dataflow_python_op = comp.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/ff116b6f1a0f0cdaafb64fcd04214c169045e6fc/components/gcp/dataflow/launch_python/component.yaml')
help(dataflow_python_op)
```
@@ -221,7 +218,6 @@ OUTPUT_FILE = '{}/wc/wordcount.out'.format(GCS_STAGING_DIR)
```python
import kfp.dsl as dsl
-import kfp.gcp as gcp
import json
@dsl.pipeline(
name='Dataflow launch python pipeline',
@@ -243,7 +239,7 @@ def pipeline(
staging_dir = staging_dir,
requirements_file_path = requirements_file_path,
args = args,
- wait_interval = wait_interval).apply(gcp.use_gcp_secret('user-gcp-sa'))
+ wait_interval = wait_interval)
```
#### Compile the pipeline
diff --git a/components/gcp/dataflow/launch_python/component.yaml b/components/gcp/dataflow/launch_python/component.yaml
index cc86a09a413..bef5fe6bbf9 100644
--- a/components/gcp/dataflow/launch_python/component.yaml
+++ b/components/gcp/dataflow/launch_python/component.yaml
@@ -53,18 +53,18 @@ outputs:
type: UI metadata
implementation:
container:
- image: gcr.io/ml-pipeline/ml-pipeline-gcp:9ad7d7dd9776ce75a83712f5723db2ef93ba5c26
+ image: gcr.io/ml-pipeline/ml-pipeline-gcp:bae654dc5cf407359ac5f822d03833768739c4c1
args: [
+ --ui_metadata_path, {outputPath: MLPipeline UI metadata},
kfp_component.google.dataflow, launch_python,
--python_file_path, {inputValue: python_file_path},
--project_id, {inputValue: project_id},
--staging_dir, {inputValue: staging_dir},
--requirements_file_path, {inputValue: requirements_file_path},
--args, {inputValue: args},
- --wait_interval, {inputValue: wait_interval}
+ --wait_interval, {inputValue: wait_interval},
]
env:
KFP_POD_NAME: "{{pod.name}}"
fileOutputs:
job_id: /tmp/kfp/output/dataflow/job_id.txt
- MLPipeline UI metadata: /mlpipeline-ui-metadata.json
diff --git a/components/gcp/dataflow/launch_python/sample.ipynb b/components/gcp/dataflow/launch_python/sample.ipynb
index 5feb75f3670..6d0ca6da52d 100644
--- a/components/gcp/dataflow/launch_python/sample.ipynb
+++ b/components/gcp/dataflow/launch_python/sample.ipynb
@@ -47,14 +47,11 @@
"## Cautions & requirements\n",
"To use the components, the following requirements must be met:\n",
"- Cloud Dataflow API is enabled.\n",
- "- The component is running under a secret Kubeflow user service account in a Kubeflow Pipeline cluster. For example:\n",
- "```\n",
- "component_op(...).apply(gcp.use_gcp_secret('user-gcp-sa'))\n",
- "```\n",
- "The Kubeflow user service account is a member of:\n",
- "- `roles/dataflow.developer` role of the project.\n",
- "- `roles/storage.objectViewer` role of the Cloud Storage Objects `python_file_path` and `requirements_file_path`.\n",
- "- `roles/storage.objectCreator` role of the Cloud Storage Object `staging_dir`. \n",
+ "- The component can authenticate to GCP. Refer to [Authenticating Pipelines to GCP](https://www.kubeflow.org/docs/gke/authentication-pipelines/) for details.\n",
+ "- The Kubeflow user service account is a member of:\n",
+ " - `roles/dataflow.developer` role of the project.\n",
+ " - `roles/storage.objectViewer` role of the Cloud Storage Objects `python_file_path` and `requirements_file_path`.\n",
+ " - `roles/storage.objectCreator` role of the Cloud Storage Object `staging_dir`. \n",
"\n",
"## Detailed description\n",
"The component does several things during the execution:\n",
@@ -95,7 +92,7 @@
"import kfp.components as comp\n",
"\n",
"dataflow_python_op = comp.load_component_from_url(\n",
- " 'https://raw.githubusercontent.com/kubeflow/pipelines/4e7e6e866c1256e641b0c3effc55438e6e4b30f6/components/gcp/dataflow/launch_python/component.yaml')\n",
+ " 'https://raw.githubusercontent.com/kubeflow/pipelines/ff116b6f1a0f0cdaafb64fcd04214c169045e6fc/components/gcp/dataflow/launch_python/component.yaml')\n",
"help(dataflow_python_op)"
]
},
@@ -295,7 +292,6 @@
"outputs": [],
"source": [
"import kfp.dsl as dsl\n",
- "import kfp.gcp as gcp\n",
"import json\n",
"@dsl.pipeline(\n",
" name='Dataflow launch python pipeline',\n",
@@ -317,7 +313,7 @@
" staging_dir = staging_dir, \n",
" requirements_file_path = requirements_file_path, \n",
" args = args,\n",
- " wait_interval = wait_interval).apply(gcp.use_gcp_secret('user-gcp-sa'))"
+ " wait_interval = wait_interval)"
]
},
{
@@ -417,4 +413,4 @@
},
"nbformat": 4,
"nbformat_minor": 2
-}
+}
\ No newline at end of file
diff --git a/components/gcp/dataflow/launch_template/README.md b/components/gcp/dataflow/launch_template/README.md
index aa86cb07973..cbfc82f153a 100644
--- a/components/gcp/dataflow/launch_template/README.md
+++ b/components/gcp/dataflow/launch_template/README.md
@@ -37,11 +37,8 @@ job_id | The id of the Cloud Dataflow job that is created.
To use the component, the following requirements must be met:
- Cloud Dataflow API is enabled.
-- The component is running under a secret [Kubeflow user service account](https://www.kubeflow.org/docs/started/getting-started-gke/#gcp-service-accounts) in a Kubeflow Pipeline cluster. For example:
- ```
- component_op(...).apply(gcp.use_gcp_secret('user-gcp-sa'))
- ```
-* The Kubeflow user service account is a member of:
+- The component can authenticate to GCP. Refer to [Authenticating Pipelines to GCP](https://www.kubeflow.org/docs/gke/authentication-pipelines/) for details.
+- The Kubeflow user service account is a member of:
- `roles/dataflow.developer` role of the project.
- `roles/storage.objectViewer` role of the Cloud Storage Object `gcs_path.`
- `roles/storage.objectCreator` role of the Cloud Storage Object `staging_dir.`
@@ -67,7 +64,7 @@ KFP_PACKAGE = 'https://storage.googleapis.com/ml-pipeline/release/0.1.14/kfp.tar
import kfp.components as comp
dataflow_template_op = comp.load_component_from_url(
- 'https://raw.githubusercontent.com/kubeflow/pipelines/4e7e6e866c1256e641b0c3effc55438e6e4b30f6/components/gcp/dataflow/launch_template/component.yaml')
+ 'https://raw.githubusercontent.com/kubeflow/pipelines/ff116b6f1a0f0cdaafb64fcd04214c169045e6fc/components/gcp/dataflow/launch_template/component.yaml')
help(dataflow_template_op)
```
@@ -102,7 +99,6 @@ OUTPUT_PATH = '{}/out/wc'.format(GCS_WORKING_DIR)
```python
import kfp.dsl as dsl
-import kfp.gcp as gcp
import json
@dsl.pipeline(
name='Dataflow launch template pipeline',
@@ -128,7 +124,7 @@ def pipeline(
location = location,
validate_only = validate_only,
staging_dir = staging_dir,
- wait_interval = wait_interval).apply(gcp.use_gcp_secret('user-gcp-sa'))
+ wait_interval = wait_interval))
```
#### Compile the pipeline
diff --git a/components/gcp/dataflow/launch_template/component.yaml b/components/gcp/dataflow/launch_template/component.yaml
index 6da76449fa8..0413368f38f 100644
--- a/components/gcp/dataflow/launch_template/component.yaml
+++ b/components/gcp/dataflow/launch_template/component.yaml
@@ -63,8 +63,9 @@ outputs:
type: UI metadata
implementation:
container:
- image: gcr.io/ml-pipeline/ml-pipeline-gcp:9ad7d7dd9776ce75a83712f5723db2ef93ba5c26
+ image: gcr.io/ml-pipeline/ml-pipeline-gcp:bae654dc5cf407359ac5f822d03833768739c4c1
args: [
+ --ui_metadata_path, {outputPath: MLPipeline UI metadata},
kfp_component.google.dataflow, launch_template,
--project_id, {inputValue: project_id},
--gcs_path, {inputValue: gcs_path},
@@ -78,4 +79,3 @@ implementation:
KFP_POD_NAME: "{{pod.name}}"
fileOutputs:
job_id: /tmp/kfp/output/dataflow/job_id.txt
- MLPipeline UI metadata: /mlpipeline-ui-metadata.json
diff --git a/components/gcp/dataflow/launch_template/sample.ipynb b/components/gcp/dataflow/launch_template/sample.ipynb
index 4e72c8b1e47..489a2e1bc39 100644
--- a/components/gcp/dataflow/launch_template/sample.ipynb
+++ b/components/gcp/dataflow/launch_template/sample.ipynb
@@ -42,11 +42,8 @@
"\n",
"To use the component, the following requirements must be met:\n",
"- Cloud Dataflow API is enabled.\n",
- "- The component is running under a secret [Kubeflow user service account](https://www.kubeflow.org/docs/started/getting-started-gke/#gcp-service-accounts) in a Kubeflow Pipeline cluster. For example:\n",
- " ```\n",
- " component_op(...).apply(gcp.use_gcp_secret('user-gcp-sa'))\n",
- " ```\n",
- "* The Kubeflow user service account is a member of:\n",
+ "- The component can authenticate to GCP. Refer to [Authenticating Pipelines to GCP](https://www.kubeflow.org/docs/gke/authentication-pipelines/) for details.\n",
+ "- The Kubeflow user service account is a member of:\n",
" - `roles/dataflow.developer` role of the project.\n",
" - `roles/storage.objectViewer` role of the Cloud Storage Object `gcs_path.`\n",
" - `roles/storage.objectCreator` role of the Cloud Storage Object `staging_dir.` \n",
@@ -85,7 +82,7 @@
"import kfp.components as comp\n",
"\n",
"dataflow_template_op = comp.load_component_from_url(\n",
- " 'https://raw.githubusercontent.com/kubeflow/pipelines/4e7e6e866c1256e641b0c3effc55438e6e4b30f6/components/gcp/dataflow/launch_template/component.yaml')\n",
+ " 'https://raw.githubusercontent.com/kubeflow/pipelines/ff116b6f1a0f0cdaafb64fcd04214c169045e6fc/components/gcp/dataflow/launch_template/component.yaml')\n",
"help(dataflow_template_op)"
]
},
@@ -155,7 +152,6 @@
"outputs": [],
"source": [
"import kfp.dsl as dsl\n",
- "import kfp.gcp as gcp\n",
"import json\n",
"@dsl.pipeline(\n",
" name='Dataflow launch template pipeline',\n",
@@ -181,7 +177,7 @@
" location = location, \n",
" validate_only = validate_only,\n",
" staging_dir = staging_dir,\n",
- " wait_interval = wait_interval).apply(gcp.use_gcp_secret('user-gcp-sa'))"
+ " wait_interval = wait_interval)"
]
},
{
@@ -282,4 +278,4 @@
},
"nbformat": 4,
"nbformat_minor": 2
-}
+}
\ No newline at end of file
diff --git a/components/gcp/dataproc/create_cluster/README.md b/components/gcp/dataproc/create_cluster/README.md
index 128f4d9a2e5..6da9c4c7cc7 100644
--- a/components/gcp/dataproc/create_cluster/README.md
+++ b/components/gcp/dataproc/create_cluster/README.md
@@ -62,11 +62,7 @@ Note: You can recycle the cluster by using the [Dataproc delete cluster componen
To use the component, you must:
* Set up the GCP project by following these [steps](https://cloud.google.com/dataproc/docs/guides/setup-project).
-* Run the component under a secret [Kubeflow user service account](https://www.kubeflow.org/docs/started/getting-started-gke/#gcp-service-accounts) in a Kubeflow cluster. For example:
-
- ```
- component_op(...).apply(gcp.use_gcp_secret('user-gcp-sa'))
- ```
+* The component can authenticate to GCP. Refer to [Authenticating Pipelines to GCP](https://www.kubeflow.org/docs/gke/authentication-pipelines/) for details.
* Grant the following types of access to the Kubeflow user service account:
* Read access to the Cloud Storage buckets which contain the initialization action files.
* The role, `roles/dataproc.editor`, on the project.
@@ -92,7 +88,7 @@ Follow these steps to use the component in a pipeline:
```python
import kfp.components as comp
- dataproc_create_cluster_op = comp.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/4e7e6e866c1256e641b0c3effc55438e6e4b30f6/components/gcp/dataproc/create_cluster/component.yaml')
+ dataproc_create_cluster_op = comp.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/ff116b6f1a0f0cdaafb64fcd04214c169045e6fc/components/gcp/dataproc/create_cluster/component.yaml')
help(dataproc_create_cluster_op)
```
@@ -114,7 +110,6 @@ EXPERIMENT_NAME = 'Dataproc - Create Cluster'
```python
import kfp.dsl as dsl
-import kfp.gcp as gcp
import json
@dsl.pipeline(
name='Dataproc create cluster pipeline',
@@ -140,7 +135,7 @@ def dataproc_create_cluster_pipeline(
config_bucket=config_bucket,
image_version=image_version,
cluster=cluster,
- wait_interval=wait_interval).apply(gcp.use_gcp_secret('user-gcp-sa'))
+ wait_interval=wait_interval)
```
#### Compile the pipeline
diff --git a/components/gcp/dataproc/create_cluster/component.yaml b/components/gcp/dataproc/create_cluster/component.yaml
index f232e4c155f..519af17771e 100644
--- a/components/gcp/dataproc/create_cluster/component.yaml
+++ b/components/gcp/dataproc/create_cluster/component.yaml
@@ -70,8 +70,9 @@ outputs:
type: UI metadata
implementation:
container:
- image: gcr.io/ml-pipeline/ml-pipeline-gcp:9ad7d7dd9776ce75a83712f5723db2ef93ba5c26
+ image: gcr.io/ml-pipeline/ml-pipeline-gcp:bae654dc5cf407359ac5f822d03833768739c4c1
args: [
+ --ui_metadata_path, {outputPath: MLPipeline UI metadata},
kfp_component.google.dataproc, create_cluster,
--project_id, {inputValue: project_id},
--region, {inputValue: region},
@@ -81,10 +82,9 @@ implementation:
--config_bucket, {inputValue: config_bucket},
--image_version, {inputValue: image_version},
--cluster, {inputValue: cluster},
- --wait_interval, {inputValue: wait_interval}
+ --wait_interval, {inputValue: wait_interval},
]
env:
KFP_POD_NAME: "{{pod.name}}"
fileOutputs:
cluster_name: /tmp/kfp/output/dataproc/cluster_name.txt
- MLPipeline UI metadata: /mlpipeline-ui-metadata.json
diff --git a/components/gcp/dataproc/create_cluster/sample.ipynb b/components/gcp/dataproc/create_cluster/sample.ipynb
index 8f3cea54006..02812db67c0 100644
--- a/components/gcp/dataproc/create_cluster/sample.ipynb
+++ b/components/gcp/dataproc/create_cluster/sample.ipynb
@@ -46,11 +46,7 @@
"\n",
"To use the component, you must:\n",
"* Set up the GCP project by following these [steps](https://cloud.google.com/dataproc/docs/guides/setup-project).\n",
- "* Run the component under a secret [Kubeflow user service account](https://www.kubeflow.org/docs/started/getting-started-gke/#gcp-service-accounts) in a Kubeflow cluster. For example:\n",
- "\n",
- " ```\n",
- " component_op(...).apply(gcp.use_gcp_secret('user-gcp-sa'))\n",
- " ```\n",
+ "* The component can authenticate to GCP. Refer to [Authenticating Pipelines to GCP](https://www.kubeflow.org/docs/gke/authentication-pipelines/) for details.\n",
"* Grant the following types of access to the Kubeflow user service account:\n",
" * Read access to the Cloud Storage buckets which contains initialization action files.\n",
" * The role, `roles/dataproc.editor` on the project.\n",
@@ -92,7 +88,7 @@
"import kfp.components as comp\n",
"\n",
"dataproc_create_cluster_op = comp.load_component_from_url(\n",
- " 'https://raw.githubusercontent.com/kubeflow/pipelines/4e7e6e866c1256e641b0c3effc55438e6e4b30f6/components/gcp/dataproc/create_cluster/component.yaml')\n",
+ " 'https://raw.githubusercontent.com/kubeflow/pipelines/ff116b6f1a0f0cdaafb64fcd04214c169045e6fc/components/gcp/dataproc/create_cluster/component.yaml')\n",
"help(dataproc_create_cluster_op)"
]
},
@@ -137,7 +133,6 @@
"outputs": [],
"source": [
"import kfp.dsl as dsl\n",
- "import kfp.gcp as gcp\n",
"import json\n",
"@dsl.pipeline(\n",
" name='Dataproc create cluster pipeline',\n",
@@ -163,7 +158,7 @@
" config_bucket=config_bucket, \n",
" image_version=image_version, \n",
" cluster=cluster, \n",
- " wait_interval=wait_interval).apply(gcp.use_gcp_secret('user-gcp-sa'))"
+ " wait_interval=wait_interval)"
]
},
{
@@ -248,4 +243,4 @@
},
"nbformat": 4,
"nbformat_minor": 2
-}
+}
\ No newline at end of file
diff --git a/components/gcp/dataproc/delete_cluster/README.md b/components/gcp/dataproc/delete_cluster/README.md
index 7dc13409e1e..5cbf479130f 100644
--- a/components/gcp/dataproc/delete_cluster/README.md
+++ b/components/gcp/dataproc/delete_cluster/README.md
@@ -43,11 +43,7 @@ ML workflow:
## Cautions & requirements
To use the component, you must:
* Set up a GCP project by following this [guide](https://cloud.google.com/dataproc/docs/guides/setup-project).
-* Run the component under a secret [Kubeflow user service account](https://www.kubeflow.org/docs/started/getting-started-gke/#gcp-service-accounts) in a Kubeflow cluster. For example:
-
- ```
- component_op(...).apply(gcp.use_gcp_secret('user-gcp-sa'))
- ```
+* The component can authenticate to GCP. Refer to [Authenticating Pipelines to GCP](https://www.kubeflow.org/docs/gke/authentication-pipelines/) for details.
* Grant the Kubeflow user service account the role, `roles/dataproc.editor`, on the project.
## Detailed description
@@ -70,7 +66,7 @@ Follow these steps to use the component in a pipeline:
```python
import kfp.components as comp
- dataproc_delete_cluster_op = comp.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/4e7e6e866c1256e641b0c3effc55438e6e4b30f6/components/gcp/dataproc/delete_cluster/component.yaml')
+ dataproc_delete_cluster_op = comp.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/ff116b6f1a0f0cdaafb64fcd04214c169045e6fc/components/gcp/dataproc/delete_cluster/component.yaml')
help(dataproc_delete_cluster_op)
```
@@ -98,7 +94,6 @@ EXPERIMENT_NAME = 'Dataproc - Delete Cluster'
```python
import kfp.dsl as dsl
-import kfp.gcp as gcp
import json
@dsl.pipeline(
name='Dataproc delete cluster pipeline',
@@ -112,7 +107,7 @@ def dataproc_delete_cluster_pipeline(
dataproc_delete_cluster_op(
project_id=project_id,
region=region,
- name=name).apply(gcp.use_gcp_secret('user-gcp-sa'))
+ name=name)
```
#### Compile the pipeline
diff --git a/components/gcp/dataproc/delete_cluster/component.yaml b/components/gcp/dataproc/delete_cluster/component.yaml
index 1c5cf5a8f26..1636f9914e3 100644
--- a/components/gcp/dataproc/delete_cluster/component.yaml
+++ b/components/gcp/dataproc/delete_cluster/component.yaml
@@ -36,7 +36,7 @@ inputs:
type: Integer
implementation:
container:
- image: gcr.io/ml-pipeline/ml-pipeline-gcp:9ad7d7dd9776ce75a83712f5723db2ef93ba5c26
+ image: gcr.io/ml-pipeline/ml-pipeline-gcp:bae654dc5cf407359ac5f822d03833768739c4c1
args: [
kfp_component.google.dataproc, delete_cluster,
--project_id, {inputValue: project_id},
diff --git a/components/gcp/dataproc/delete_cluster/sample.ipynb b/components/gcp/dataproc/delete_cluster/sample.ipynb
index e563a922dfa..648f02bfab7 100644
--- a/components/gcp/dataproc/delete_cluster/sample.ipynb
+++ b/components/gcp/dataproc/delete_cluster/sample.ipynb
@@ -33,11 +33,7 @@
"## Cautions & requirements\n",
"To use the component, you must:\n",
"* Set up a GCP project by following this [guide](https://cloud.google.com/dataproc/docs/guides/setup-project).\n",
- "* Run the component under a secret [Kubeflow user service account](https://www.kubeflow.org/docs/started/getting-started-gke/#gcp-service-accounts) in a Kubeflow cluster. For example:\n",
- "\n",
- " ```\n",
- " component_op(...).apply(gcp.use_gcp_secret('user-gcp-sa'))\n",
- " ```\n",
+ "* The component can authenticate to GCP. Refer to [Authenticating Pipelines to GCP](https://www.kubeflow.org/docs/gke/authentication-pipelines/) for details.\n",
"* Grant the Kubeflow user service account the role `roles/dataproc.editor` on the project.\n",
"\n",
"## Detailed description\n",
@@ -75,7 +71,7 @@
"import kfp.components as comp\n",
"\n",
"dataproc_delete_cluster_op = comp.load_component_from_url(\n",
- " 'https://raw.githubusercontent.com/kubeflow/pipelines/4e7e6e866c1256e641b0c3effc55438e6e4b30f6/components/gcp/dataproc/delete_cluster/component.yaml')\n",
+ " 'https://raw.githubusercontent.com/kubeflow/pipelines/ff116b6f1a0f0cdaafb64fcd04214c169045e6fc/components/gcp/dataproc/delete_cluster/component.yaml')\n",
"help(dataproc_delete_cluster_op)"
]
},
@@ -125,7 +121,6 @@
"outputs": [],
"source": [
"import kfp.dsl as dsl\n",
- "import kfp.gcp as gcp\n",
"import json\n",
"@dsl.pipeline(\n",
" name='Dataproc delete cluster pipeline',\n",
@@ -139,7 +134,7 @@
" dataproc_delete_cluster_op(\n",
" project_id=project_id, \n",
" region=region, \n",
- " name=name).apply(gcp.use_gcp_secret('user-gcp-sa'))"
+ " name=name)"
]
},
{
diff --git a/components/gcp/dataproc/submit_hadoop_job/README.md b/components/gcp/dataproc/submit_hadoop_job/README.md
index 49180798aa4..176a53ccd9c 100644
--- a/components/gcp/dataproc/submit_hadoop_job/README.md
+++ b/components/gcp/dataproc/submit_hadoop_job/README.md
@@ -60,11 +60,7 @@ job_id | The ID of the created job. | String
To use the component, you must:
* Set up a GCP project by following this [guide](https://cloud.google.com/dataproc/docs/guides/setup-project).
* [Create a new cluster](https://cloud.google.com/dataproc/docs/guides/create-cluster).
-* Run the component under a secret [Kubeflow user service account](https://www.kubeflow.org/docs/started/getting-started-gke/#gcp-service-accounts) in a Kubeflow cluster. For example:
-
- ```python
- component_op(...).apply(gcp.use_gcp_secret('user-gcp-sa'))
- ```
+* The component can authenticate to GCP. Refer to [Authenticating Pipelines to GCP](https://www.kubeflow.org/docs/gke/authentication-pipelines/) for details.
* Grant the Kubeflow user service account the role, `roles/dataproc.editor`, on the project.
## Detailed description
@@ -87,7 +83,7 @@ Follow these steps to use the component in a pipeline:
```python
import kfp.components as comp
- dataproc_submit_hadoop_job_op = comp.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/4e7e6e866c1256e641b0c3effc55438e6e4b30f6/components/gcp/dataproc/submit_hadoop_job/component.yaml')
+ dataproc_submit_hadoop_job_op = comp.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/ff116b6f1a0f0cdaafb64fcd04214c169045e6fc/components/gcp/dataproc/submit_hadoop_job/component.yaml')
help(dataproc_submit_hadoop_job_op)
```
@@ -135,7 +131,6 @@ Caution: This will remove all blob files under `OUTPUT_GCS_PATH`.
```python
import kfp.dsl as dsl
-import kfp.gcp as gcp
import json
@dsl.pipeline(
name='Dataproc submit Hadoop job pipeline',
@@ -164,7 +159,7 @@ def dataproc_submit_hadoop_job_pipeline(
args=args,
hadoop_job=hadoop_job,
job=job,
- wait_interval=wait_interval).apply(gcp.use_gcp_secret('user-gcp-sa'))
+ wait_interval=wait_interval)
```
#### Compile the pipeline
diff --git a/components/gcp/dataproc/submit_hadoop_job/component.yaml b/components/gcp/dataproc/submit_hadoop_job/component.yaml
index 93b7cadb5c0..6c2b7429c56 100644
--- a/components/gcp/dataproc/submit_hadoop_job/component.yaml
+++ b/components/gcp/dataproc/submit_hadoop_job/component.yaml
@@ -80,8 +80,9 @@ outputs:
type: UI metadata
implementation:
container:
- image: gcr.io/ml-pipeline/ml-pipeline-gcp:9ad7d7dd9776ce75a83712f5723db2ef93ba5c26
+ image: gcr.io/ml-pipeline/ml-pipeline-gcp:bae654dc5cf407359ac5f822d03833768739c4c1
args: [
+ --ui_metadata_path, {outputPath: MLPipeline UI metadata},
kfp_component.google.dataproc, submit_hadoop_job,
--project_id, {inputValue: project_id},
--region, {inputValue: region},
@@ -91,10 +92,9 @@ implementation:
--args, {inputValue: args},
--hadoop_job, {inputValue: hadoop_job},
--job, {inputValue: job},
- --wait_interval, {inputValue: wait_interval}
+ --wait_interval, {inputValue: wait_interval},
]
env:
KFP_POD_NAME: "{{pod.name}}"
fileOutputs:
job_id: /tmp/kfp/output/dataproc/job_id.txt
- MLPipeline UI metadata: /mlpipeline-ui-metadata.json
diff --git a/components/gcp/dataproc/submit_hadoop_job/sample.ipynb b/components/gcp/dataproc/submit_hadoop_job/sample.ipynb
index df158f18039..45c4be02020 100644
--- a/components/gcp/dataproc/submit_hadoop_job/sample.ipynb
+++ b/components/gcp/dataproc/submit_hadoop_job/sample.ipynb
@@ -46,11 +46,7 @@
"To use the component, you must:\n",
"* Set up a GCP project by following this [guide](https://cloud.google.com/dataproc/docs/guides/setup-project).\n",
"* [Create a new cluster](https://cloud.google.com/dataproc/docs/guides/create-cluster).\n",
- "* Run the component under a secret [Kubeflow user service account](https://www.kubeflow.org/docs/started/getting-started-gke/#gcp-service-accounts) in a Kubeflow cluster. For example:\n",
- "\n",
- " ```python\n",
- " component_op(...).apply(gcp.use_gcp_secret('user-gcp-sa'))\n",
- " ```\n",
+ "* The component can authenticate to GCP. Refer to [Authenticating Pipelines to GCP](https://www.kubeflow.org/docs/gke/authentication-pipelines/) for details.\n",
"* Grant the Kubeflow user service account the role `roles/dataproc.editor` on the project.\n",
"\n",
"## Detailed description\n",
@@ -90,7 +86,7 @@
"import kfp.components as comp\n",
"\n",
"dataproc_submit_hadoop_job_op = comp.load_component_from_url(\n",
- " 'https://raw.githubusercontent.com/kubeflow/pipelines/4e7e6e866c1256e641b0c3effc55438e6e4b30f6/components/gcp/dataproc/submit_hadoop_job/component.yaml')\n",
+ " 'https://raw.githubusercontent.com/kubeflow/pipelines/ff116b6f1a0f0cdaafb64fcd04214c169045e6fc/components/gcp/dataproc/submit_hadoop_job/component.yaml')\n",
"help(dataproc_submit_hadoop_job_op)"
]
},
@@ -186,7 +182,6 @@
"outputs": [],
"source": [
"import kfp.dsl as dsl\n",
- "import kfp.gcp as gcp\n",
"import json\n",
"@dsl.pipeline(\n",
" name='Dataproc submit Hadoop job pipeline',\n",
@@ -215,7 +210,7 @@
" args=args, \n",
" hadoop_job=hadoop_job, \n",
" job=job, \n",
- " wait_interval=wait_interval).apply(gcp.use_gcp_secret('user-gcp-sa'))"
+ " wait_interval=wait_interval)"
]
},
{
@@ -316,4 +311,4 @@
},
"nbformat": 4,
"nbformat_minor": 2
-}
+}
\ No newline at end of file
diff --git a/components/gcp/dataproc/submit_hive_job/README.md b/components/gcp/dataproc/submit_hive_job/README.md
index 0f090213fc6..2dcfdc95e77 100644
--- a/components/gcp/dataproc/submit_hive_job/README.md
+++ b/components/gcp/dataproc/submit_hive_job/README.md
@@ -52,11 +52,7 @@ job_id | The ID of the created job. | String
To use the component, you must:
* Set up a GCP project by following this [guide](https://cloud.google.com/dataproc/docs/guides/setup-project).
* [Create a new cluster](https://cloud.google.com/dataproc/docs/guides/create-cluster).
-* Run the component under a secret [Kubeflow user service account](https://www.kubeflow.org/docs/started/getting-started-gke/#gcp-service-accounts) in a Kubeflow cluster. For example:
-
- ```
- component_op(...).apply(gcp.use_gcp_secret('user-gcp-sa'))
- ```
+* The component can authenticate to GCP. Refer to [Authenticating Pipelines to GCP](https://www.kubeflow.org/docs/gke/authentication-pipelines/) for details.
* Grant the Kubeflow user service account the role `roles/dataproc.editor` on the project.
## Detailed description
@@ -77,7 +73,7 @@ Follow these steps to use the component in a pipeline:
```python
import kfp.components as comp
- dataproc_submit_hive_job_op = comp.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/4e7e6e866c1256e641b0c3effc55438e6e4b30f6/components/gcp/dataproc/submit_hive_job/component.yaml')
+ dataproc_submit_hive_job_op = comp.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/ff116b6f1a0f0cdaafb64fcd04214c169045e6fc/components/gcp/dataproc/submit_hive_job/component.yaml')
help(dataproc_submit_hive_job_op)
```
@@ -128,7 +124,6 @@ EXPERIMENT_NAME = 'Dataproc - Submit Hive Job'
```python
import kfp.dsl as dsl
-import kfp.gcp as gcp
import json
@dsl.pipeline(
name='Dataproc submit Hive job pipeline',
@@ -154,7 +149,7 @@ def dataproc_submit_hive_job_pipeline(
script_variables=script_variables,
hive_job=hive_job,
job=job,
- wait_interval=wait_interval).apply(gcp.use_gcp_secret('user-gcp-sa'))
+ wait_interval=wait_interval)
```
diff --git a/components/gcp/dataproc/submit_hive_job/component.yaml b/components/gcp/dataproc/submit_hive_job/component.yaml
index 2464d08c50f..2be60b1cdec 100644
--- a/components/gcp/dataproc/submit_hive_job/component.yaml
+++ b/components/gcp/dataproc/submit_hive_job/component.yaml
@@ -75,8 +75,9 @@ outputs:
type: UI metadata
implementation:
container:
- image: gcr.io/ml-pipeline/ml-pipeline-gcp:9ad7d7dd9776ce75a83712f5723db2ef93ba5c26
+ image: gcr.io/ml-pipeline/ml-pipeline-gcp:bae654dc5cf407359ac5f822d03833768739c4c1
args: [
+ --ui_metadata_path, {outputPath: MLPipeline UI metadata},
kfp_component.google.dataproc, submit_hive_job,
--project_id, {inputValue: project_id},
--region, {inputValue: region},
@@ -86,10 +87,9 @@ implementation:
--script_variables, {inputValue: script_variables},
--hive_job, {inputValue: hive_job},
--job, {inputValue: job},
- --wait_interval, {inputValue: wait_interval}
+ --wait_interval, {inputValue: wait_interval},
]
env:
KFP_POD_NAME: "{{pod.name}}"
fileOutputs:
job_id: /tmp/kfp/output/dataproc/job_id.txt
- MLPipeline UI metadata: /mlpipeline-ui-metadata.json
diff --git a/components/gcp/dataproc/submit_hive_job/sample.ipynb b/components/gcp/dataproc/submit_hive_job/sample.ipynb
index 9635f870158..7153e9a2172 100644
--- a/components/gcp/dataproc/submit_hive_job/sample.ipynb
+++ b/components/gcp/dataproc/submit_hive_job/sample.ipynb
@@ -39,11 +39,7 @@
"To use the component, you must:\n",
"* Set up a GCP project by following this [guide](https://cloud.google.com/dataproc/docs/guides/setup-project).\n",
"* [Create a new cluster](https://cloud.google.com/dataproc/docs/guides/create-cluster).\n",
- "* Run the component under a secret [Kubeflow user service account](https://www.kubeflow.org/docs/started/getting-started-gke/#gcp-service-accounts) in a Kubeflow cluster. For example:\n",
- "\n",
- " ```\n",
- " component_op(...).apply(gcp.use_gcp_secret('user-gcp-sa'))\n",
- " ```\n",
+ "* The component can authenticate to GCP. Refer to [Authenticating Pipelines to GCP](https://www.kubeflow.org/docs/gke/authentication-pipelines/) for details.\n",
"* Grant the Kubeflow user service account the role `roles/dataproc.editor` on the project.\n",
"\n",
"## Detailed description\n",
@@ -81,7 +77,7 @@
"import kfp.components as comp\n",
"\n",
"dataproc_submit_hive_job_op = comp.load_component_from_url(\n",
- " 'https://raw.githubusercontent.com/kubeflow/pipelines/4e7e6e866c1256e641b0c3effc55438e6e4b30f6/components/gcp/dataproc/submit_hive_job/component.yaml')\n",
+ " 'https://raw.githubusercontent.com/kubeflow/pipelines/ff116b6f1a0f0cdaafb64fcd04214c169045e6fc/components/gcp/dataproc/submit_hive_job/component.yaml')\n",
"help(dataproc_submit_hive_job_op)"
]
},
@@ -156,7 +152,6 @@
"outputs": [],
"source": [
"import kfp.dsl as dsl\n",
- "import kfp.gcp as gcp\n",
"import json\n",
"@dsl.pipeline(\n",
" name='Dataproc submit Hive job pipeline',\n",
@@ -182,7 +177,7 @@
" script_variables=script_variables, \n",
" hive_job=hive_job, \n",
" job=job, \n",
- " wait_interval=wait_interval).apply(gcp.use_gcp_secret('user-gcp-sa'))\n",
+ " wait_interval=wait_interval)\n",
" "
]
},
@@ -267,4 +262,4 @@
},
"nbformat": 4,
"nbformat_minor": 2
-}
+}
\ No newline at end of file
diff --git a/components/gcp/dataproc/submit_pig_job/README.md b/components/gcp/dataproc/submit_pig_job/README.md
index 5a9b19a392f..1a5a201038b 100644
--- a/components/gcp/dataproc/submit_pig_job/README.md
+++ b/components/gcp/dataproc/submit_pig_job/README.md
@@ -58,11 +58,7 @@ job_id | The ID of the created job. | String
To use the component, you must:
* Set up a GCP project by following this [guide](https://cloud.google.com/dataproc/docs/guides/setup-project).
* [Create a new cluster](https://cloud.google.com/dataproc/docs/guides/create-cluster).
-* Run the component under a secret [Kubeflow user service account](https://www.kubeflow.org/docs/started/getting-started-gke/#gcp-service-accounts) in a Kubeflow cluster. For example:
-
- ```
- component_op(...).apply(gcp.use_gcp_secret('user-gcp-sa'))
- ```
+* The component can authenticate to GCP. Refer to [Authenticating Pipelines to GCP](https://www.kubeflow.org/docs/gke/authentication-pipelines/) for details.
* Grant the Kubeflow user service account the role, `roles/dataproc.editor`, on the project.
## Detailed description
@@ -86,7 +82,7 @@ Follow these steps to use the component in a pipeline:
```python
import kfp.components as comp
- dataproc_submit_pig_job_op = comp.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/4e7e6e866c1256e641b0c3effc55438e6e4b30f6/components/gcp/dataproc/submit_pig_job/component.yaml')
+ dataproc_submit_pig_job_op = comp.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/ff116b6f1a0f0cdaafb64fcd04214c169045e6fc/components/gcp/dataproc/submit_pig_job/component.yaml')
help(dataproc_submit_pig_job_op)
```
@@ -124,7 +120,6 @@ EXPERIMENT_NAME = 'Dataproc - Submit Pig Job'
```python
import kfp.dsl as dsl
-import kfp.gcp as gcp
import json
@dsl.pipeline(
name='Dataproc submit Pig job pipeline',
@@ -150,7 +145,7 @@ def dataproc_submit_pig_job_pipeline(
script_variables=script_variables,
pig_job=pig_job,
job=job,
- wait_interval=wait_interval).apply(gcp.use_gcp_secret('user-gcp-sa'))
+ wait_interval=wait_interval)
```
diff --git a/components/gcp/dataproc/submit_pig_job/component.yaml b/components/gcp/dataproc/submit_pig_job/component.yaml
index cb2ffe8dff3..da4fe13acdd 100644
--- a/components/gcp/dataproc/submit_pig_job/component.yaml
+++ b/components/gcp/dataproc/submit_pig_job/component.yaml
@@ -75,8 +75,9 @@ outputs:
type: UI metadata
implementation:
container:
- image: gcr.io/ml-pipeline/ml-pipeline-gcp:9ad7d7dd9776ce75a83712f5723db2ef93ba5c26
+ image: gcr.io/ml-pipeline/ml-pipeline-gcp:bae654dc5cf407359ac5f822d03833768739c4c1
args: [
+ --ui_metadata_path, {outputPath: MLPipeline UI metadata},
kfp_component.google.dataproc, submit_pig_job,
--project_id, {inputValue: project_id},
--region, {inputValue: region},
@@ -86,10 +87,9 @@ implementation:
--script_variables, {inputValue: script_variables},
--pig_job, {inputValue: pig_job},
--job, {inputValue: job},
- --wait_interval, {inputValue: wait_interval}
+ --wait_interval, {inputValue: wait_interval},
]
env:
KFP_POD_NAME: "{{pod.name}}"
fileOutputs:
job_id: /tmp/kfp/output/dataproc/job_id.txt
- MLPipeline UI metadata: /mlpipeline-ui-metadata.json
diff --git a/components/gcp/dataproc/submit_pig_job/sample.ipynb b/components/gcp/dataproc/submit_pig_job/sample.ipynb
index 35789e40a4b..babf11099f7 100644
--- a/components/gcp/dataproc/submit_pig_job/sample.ipynb
+++ b/components/gcp/dataproc/submit_pig_job/sample.ipynb
@@ -42,11 +42,7 @@
"To use the component, you must:\n",
"* Set up a GCP project by following this [guide](https://cloud.google.com/dataproc/docs/guides/setup-project).\n",
"* [Create a new cluster](https://cloud.google.com/dataproc/docs/guides/create-cluster).\n",
- "* Run the component under a secret [Kubeflow user service account](https://www.kubeflow.org/docs/started/getting-started-gke/#gcp-service-accounts) in a Kubeflow cluster. For example:\n",
- "\n",
- " ```\n",
- " component_op(...).apply(gcp.use_gcp_secret('user-gcp-sa'))\n",
- " ```\n",
+ "* The component can authenticate to GCP. Refer to [Authenticating Pipelines to GCP](https://www.kubeflow.org/docs/gke/authentication-pipelines/) for details.\n",
"* Grant the Kubeflow user service account the role `roles/dataproc.editor` on the project.\n",
"\n",
"## Detailed description\n",
@@ -84,7 +80,7 @@
"import kfp.components as comp\n",
"\n",
"dataproc_submit_pig_job_op = comp.load_component_from_url(\n",
- " 'https://raw.githubusercontent.com/kubeflow/pipelines/4e7e6e866c1256e641b0c3effc55438e6e4b30f6/components/gcp/dataproc/submit_pig_job/component.yaml')\n",
+ " 'https://raw.githubusercontent.com/kubeflow/pipelines/ff116b6f1a0f0cdaafb64fcd04214c169045e6fc/components/gcp/dataproc/submit_pig_job/component.yaml')\n",
"help(dataproc_submit_pig_job_op)"
]
},
@@ -146,7 +142,6 @@
"outputs": [],
"source": [
"import kfp.dsl as dsl\n",
- "import kfp.gcp as gcp\n",
"import json\n",
"@dsl.pipeline(\n",
" name='Dataproc submit Pig job pipeline',\n",
@@ -172,7 +167,7 @@
" script_variables=script_variables, \n",
" pig_job=pig_job, \n",
" job=job, \n",
- " wait_interval=wait_interval).apply(gcp.use_gcp_secret('user-gcp-sa'))\n",
+ " wait_interval=wait_interval)\n",
" "
]
},
@@ -257,4 +252,4 @@
},
"nbformat": 4,
"nbformat_minor": 2
-}
+}
\ No newline at end of file
diff --git a/components/gcp/dataproc/submit_pyspark_job/README.md b/components/gcp/dataproc/submit_pyspark_job/README.md
index a6443d68523..43bac695cd3 100644
--- a/components/gcp/dataproc/submit_pyspark_job/README.md
+++ b/components/gcp/dataproc/submit_pyspark_job/README.md
@@ -54,11 +54,7 @@ job_id | The ID of the created job. | String
To use the component, you must:
* Set up a GCP project by following this [guide](https://cloud.google.com/dataproc/docs/guides/setup-project).
* [Create a new cluster](https://cloud.google.com/dataproc/docs/guides/create-cluster).
-* Run the component under a secret [Kubeflow user service account](https://www.kubeflow.org/docs/started/getting-started-gke/#gcp-service-accounts) in a Kubeflow cluster. For example:
-
- ```
- component_op(...).apply(gcp.use_gcp_secret('user-gcp-sa'))
- ```
+* The component can authenticate to GCP. Refer to [Authenticating Pipelines to GCP](https://www.kubeflow.org/docs/gke/authentication-pipelines/) for details.
* Grant the Kubeflow user service account the role `roles/dataproc.editor` on the project.
## Detailed description
@@ -83,7 +79,7 @@ Follow these steps to use the component in a pipeline:
```python
import kfp.components as comp
- dataproc_submit_pyspark_job_op = comp.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/4e7e6e866c1256e641b0c3effc55438e6e4b30f6/components/gcp/dataproc/submit_pyspark_job/component.yaml')
+ dataproc_submit_pyspark_job_op = comp.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/ff116b6f1a0f0cdaafb64fcd04214c169045e6fc/components/gcp/dataproc/submit_pyspark_job/component.yaml')
help(dataproc_submit_pyspark_job_op)
```
@@ -120,7 +116,6 @@ EXPERIMENT_NAME = 'Dataproc - Submit PySpark Job'
```python
import kfp.dsl as dsl
-import kfp.gcp as gcp
import json
@dsl.pipeline(
name='Dataproc submit PySpark job pipeline',
@@ -144,7 +139,7 @@ def dataproc_submit_pyspark_job_pipeline(
args=args,
pyspark_job=pyspark_job,
job=job,
- wait_interval=wait_interval).apply(gcp.use_gcp_secret('user-gcp-sa'))
+ wait_interval=wait_interval)
```
diff --git a/components/gcp/dataproc/submit_pyspark_job/component.yaml b/components/gcp/dataproc/submit_pyspark_job/component.yaml
index 4f256b002b7..3337f932cc3 100644
--- a/components/gcp/dataproc/submit_pyspark_job/component.yaml
+++ b/components/gcp/dataproc/submit_pyspark_job/component.yaml
@@ -69,8 +69,9 @@ outputs:
type: UI metadata
implementation:
container:
- image: gcr.io/ml-pipeline/ml-pipeline-gcp:9ad7d7dd9776ce75a83712f5723db2ef93ba5c26
+ image: gcr.io/ml-pipeline/ml-pipeline-gcp:bae654dc5cf407359ac5f822d03833768739c4c1
args: [
+ --ui_metadata_path, {outputPath: MLPipeline UI metadata},
kfp_component.google.dataproc, submit_pyspark_job,
--project_id, {inputValue: project_id},
--region, {inputValue: region},
@@ -79,10 +80,9 @@ implementation:
--args, {inputValue: args},
--pyspark_job, {inputValue: pyspark_job},
--job, {inputValue: job},
- --wait_interval, {inputValue: wait_interval}
+ --wait_interval, {inputValue: wait_interval},
]
env:
KFP_POD_NAME: "{{pod.name}}"
fileOutputs:
job_id: /tmp/kfp/output/dataproc/job_id.txt
- MLPipeline UI metadata: /mlpipeline-ui-metadata.json
diff --git a/components/gcp/dataproc/submit_pyspark_job/sample.ipynb b/components/gcp/dataproc/submit_pyspark_job/sample.ipynb
index b163b378646..dfe3d439449 100644
--- a/components/gcp/dataproc/submit_pyspark_job/sample.ipynb
+++ b/components/gcp/dataproc/submit_pyspark_job/sample.ipynb
@@ -42,11 +42,7 @@
"To use the component, you must:\n",
"* Set up a GCP project by following this [guide](https://cloud.google.com/dataproc/docs/guides/setup-project).\n",
"* [Create a new cluster](https://cloud.google.com/dataproc/docs/guides/create-cluster).\n",
- "* Run the component under a secret [Kubeflow user service account](https://www.kubeflow.org/docs/started/getting-started-gke/#gcp-service-accounts) in a Kubeflow cluster. For example:\n",
- "\n",
- " ```\n",
- " component_op(...).apply(gcp.use_gcp_secret('user-gcp-sa'))\n",
- " ```\n",
+ "* The component can authenticate to GCP. Refer to [Authenticating Pipelines to GCP](https://www.kubeflow.org/docs/gke/authentication-pipelines/) for details.\n",
"* Grant the Kubeflow user service account the role `roles/dataproc.editor` on the project.\n",
"\n",
"## Detailed description\n",
@@ -86,7 +82,7 @@
"import kfp.components as comp\n",
"\n",
"dataproc_submit_pyspark_job_op = comp.load_component_from_url(\n",
- " 'https://raw.githubusercontent.com/kubeflow/pipelines/4e7e6e866c1256e641b0c3effc55438e6e4b30f6/components/gcp/dataproc/submit_pyspark_job/component.yaml')\n",
+ " 'https://raw.githubusercontent.com/kubeflow/pipelines/ff116b6f1a0f0cdaafb64fcd04214c169045e6fc/components/gcp/dataproc/submit_pyspark_job/component.yaml')\n",
"help(dataproc_submit_pyspark_job_op)"
]
},
@@ -157,7 +153,6 @@
"outputs": [],
"source": [
"import kfp.dsl as dsl\n",
- "import kfp.gcp as gcp\n",
"import json\n",
"@dsl.pipeline(\n",
" name='Dataproc submit PySpark job pipeline',\n",
@@ -181,7 +176,7 @@
" args=args, \n",
" pyspark_job=pyspark_job, \n",
" job=job, \n",
- " wait_interval=wait_interval).apply(gcp.use_gcp_secret('user-gcp-sa'))\n",
+ " wait_interval=wait_interval)\n",
" "
]
},
@@ -266,4 +261,4 @@
},
"nbformat": 4,
"nbformat_minor": 2
-}
+}
\ No newline at end of file
diff --git a/components/gcp/dataproc/submit_spark_job/README.md b/components/gcp/dataproc/submit_spark_job/README.md
index 8412a712e10..b7d96958a35 100644
--- a/components/gcp/dataproc/submit_spark_job/README.md
+++ b/components/gcp/dataproc/submit_spark_job/README.md
@@ -66,13 +66,7 @@ To use the component, you must:
* Set up a GCP project by following this [guide](https://cloud.google.com/dataproc/docs/guides/setup-project).
* [Create a new cluster](https://cloud.google.com/dataproc/docs/guides/create-cluster).
-* Run the component under a secret [Kubeflow user service account](https://www.kubeflow.org/docs/started/getting-started-gke/#gcp-service-accounts), in a Kubeflow cluster. For example:
-
- ```
- component_op(...).apply(gcp.use_gcp_secret('user-gcp-sa'))
- ```
-
-
+* The component can authenticate to GCP. Refer to [Authenticating Pipelines to GCP](https://www.kubeflow.org/docs/gke/authentication-pipelines/) for details.
* Grant the Kubeflow user service account the role `roles/dataproc.editor` on the project.
@@ -101,7 +95,7 @@ Follow these steps to use the component in a pipeline:
import kfp.components as comp
dataproc_submit_spark_job_op = comp.load_component_from_url(
- 'https://raw.githubusercontent.com/kubeflow/pipelines/4e7e6e866c1256e641b0c3effc55438e6e4b30f6/components/gcp/dataproc/submit_spark_job/component.yaml')
+ 'https://raw.githubusercontent.com/kubeflow/pipelines/ff116b6f1a0f0cdaafb64fcd04214c169045e6fc/components/gcp/dataproc/submit_spark_job/component.yaml')
help(dataproc_submit_spark_job_op)
```
@@ -139,7 +133,6 @@ EXPERIMENT_NAME = 'Dataproc - Submit Spark Job'
```python
import kfp.dsl as dsl
-import kfp.gcp as gcp
import json
@dsl.pipeline(
name='Dataproc submit Spark job pipeline',
@@ -165,7 +158,7 @@ def dataproc_submit_spark_job_pipeline(
args=args,
spark_job=spark_job,
job=job,
- wait_interval=wait_interval).apply(gcp.use_gcp_secret('user-gcp-sa'))
+ wait_interval=wait_interval)
```
diff --git a/components/gcp/dataproc/submit_spark_job/component.yaml b/components/gcp/dataproc/submit_spark_job/component.yaml
index f915b7bb2ef..c881029da6f 100644
--- a/components/gcp/dataproc/submit_spark_job/component.yaml
+++ b/components/gcp/dataproc/submit_spark_job/component.yaml
@@ -76,8 +76,9 @@ outputs:
type: UI metadata
implementation:
container:
- image: gcr.io/ml-pipeline/ml-pipeline-gcp:9ad7d7dd9776ce75a83712f5723db2ef93ba5c26
+ image: gcr.io/ml-pipeline/ml-pipeline-gcp:bae654dc5cf407359ac5f822d03833768739c4c1
args: [
+ --ui_metadata_path, {outputPath: MLPipeline UI metadata},
kfp_component.google.dataproc, submit_spark_job,
--project_id, {inputValue: project_id},
--region, {inputValue: region},
@@ -87,10 +88,9 @@ implementation:
--args, {inputValue: args},
--spark_job, {inputValue: spark_job},
--job, {inputValue: job},
- --wait_interval, {inputValue: wait_interval}
+ --wait_interval, {inputValue: wait_interval},
]
env:
KFP_POD_NAME: "{{pod.name}}"
fileOutputs:
job_id: /tmp/kfp/output/dataproc/job_id.txt
- MLPipeline UI metadata: /mlpipeline-ui-metadata.json
diff --git a/components/gcp/dataproc/submit_spark_job/sample.ipynb b/components/gcp/dataproc/submit_spark_job/sample.ipynb
index acc05f3d718..d566f309fc3 100644
--- a/components/gcp/dataproc/submit_spark_job/sample.ipynb
+++ b/components/gcp/dataproc/submit_spark_job/sample.ipynb
@@ -50,13 +50,7 @@
"\n",
"* Set up a GCP project by following this [guide](https://cloud.google.com/dataproc/docs/guides/setup-project).\n",
"* [Create a new cluster](https://cloud.google.com/dataproc/docs/guides/create-cluster).\n",
- "* Run the component under a secret [Kubeflow user service account](https://www.kubeflow.org/docs/started/getting-started-gke/#gcp-service-accounts) in a Kubeflow cluster. For example:\n",
- "\n",
- " ```\n",
- " component_op(...).apply(gcp.use_gcp_secret('user-gcp-sa'))\n",
- " ```\n",
- "\n",
- "\n",
+ "* The component can authenticate to GCP. Refer to [Authenticating Pipelines to GCP](https://www.kubeflow.org/docs/gke/authentication-pipelines/) for details.\n",
"* Grant the Kubeflow user service account the role `roles/dataproc.editor` on the project.\n",
"\n",
"\n",
@@ -99,7 +93,7 @@
"import kfp.components as comp\n",
"\n",
"dataproc_submit_spark_job_op = comp.load_component_from_url(\n",
- " 'https://raw.githubusercontent.com/kubeflow/pipelines/4e7e6e866c1256e641b0c3effc55438e6e4b30f6/components/gcp/dataproc/submit_spark_job/component.yaml')\n",
+ " 'https://raw.githubusercontent.com/kubeflow/pipelines/ff116b6f1a0f0cdaafb64fcd04214c169045e6fc/components/gcp/dataproc/submit_spark_job/component.yaml')\n",
"help(dataproc_submit_spark_job_op)"
]
},
@@ -159,7 +153,6 @@
"outputs": [],
"source": [
"import kfp.dsl as dsl\n",
- "import kfp.gcp as gcp\n",
"import json\n",
"@dsl.pipeline(\n",
" name='Dataproc submit Spark job pipeline',\n",
@@ -185,7 +178,7 @@
" args=args, \n",
" spark_job=spark_job, \n",
" job=job, \n",
- " wait_interval=wait_interval).apply(gcp.use_gcp_secret('user-gcp-sa'))\n",
+ " wait_interval=wait_interval)\n",
" "
]
},
@@ -271,4 +264,4 @@
},
"nbformat": 4,
"nbformat_minor": 2
-}
+}
\ No newline at end of file
diff --git a/components/gcp/dataproc/submit_sparksql_job/README.md b/components/gcp/dataproc/submit_sparksql_job/README.md
index bd07f1e7d62..8beb1a5fda1 100644
--- a/components/gcp/dataproc/submit_sparksql_job/README.md
+++ b/components/gcp/dataproc/submit_sparksql_job/README.md
@@ -53,10 +53,7 @@ job_id | The ID of the created job. | String
To use the component, you must:
* Set up a GCP project by following this [guide](https://cloud.google.com/dataproc/docs/guides/setup-project).
* [Create a new cluster](https://cloud.google.com/dataproc/docs/guides/create-cluster).
-* Run the component under a secret [Kubeflow user service account](https://www.kubeflow.org/docs/started/getting-started-gke/#gcp-service-accounts) in a Kubeflow cluster. For example:
- ```
- component_op(...).apply(gcp.use_gcp_secret('user-gcp-sa'))
- ```
+* The component can authenticate to GCP. Refer to [Authenticating Pipelines to GCP](https://www.kubeflow.org/docs/gke/authentication-pipelines/) for details.
* Grant the Kubeflow user service account the role, `roles/dataproc.editor`, on the project.
## Detailed Description
@@ -77,7 +74,7 @@ Follow these steps to use the component in a pipeline:
```python
import kfp.components as comp
- dataproc_submit_sparksql_job_op = comp.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/4e7e6e866c1256e641b0c3effc55438e6e4b30f6/components/gcp/dataproc/submit_sparksql_job/component.yaml')
+ dataproc_submit_sparksql_job_op = comp.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/ff116b6f1a0f0cdaafb64fcd04214c169045e6fc/components/gcp/dataproc/submit_sparksql_job/component.yaml')
help(dataproc_submit_sparksql_job_op)
```
@@ -124,7 +121,6 @@ EXPERIMENT_NAME = 'Dataproc - Submit SparkSQL Job'
```python
import kfp.dsl as dsl
-import kfp.gcp as gcp
import json
@dsl.pipeline(
name='Dataproc submit SparkSQL job pipeline',
@@ -150,7 +146,7 @@ def dataproc_submit_sparksql_job_pipeline(
script_variables=script_variables,
sparksql_job=sparksql_job,
job=job,
- wait_interval=wait_interval).apply(gcp.use_gcp_secret('user-gcp-sa'))
+ wait_interval=wait_interval)
```
diff --git a/components/gcp/dataproc/submit_sparksql_job/component.yaml b/components/gcp/dataproc/submit_sparksql_job/component.yaml
index 89dd2fe948c..275f0f9a67f 100644
--- a/components/gcp/dataproc/submit_sparksql_job/component.yaml
+++ b/components/gcp/dataproc/submit_sparksql_job/component.yaml
@@ -75,8 +75,9 @@ outputs:
type: UI metadata
implementation:
container:
- image: gcr.io/ml-pipeline/ml-pipeline-gcp:9ad7d7dd9776ce75a83712f5723db2ef93ba5c26
+ image: gcr.io/ml-pipeline/ml-pipeline-gcp:bae654dc5cf407359ac5f822d03833768739c4c1
args: [
+ --ui_metadata_path, {outputPath: MLPipeline UI metadata},
kfp_component.google.dataproc, submit_sparksql_job,
--project_id, {inputValue: project_id},
--region, {inputValue: region},
@@ -86,10 +87,9 @@ implementation:
--script_variables, {inputValue: script_variables},
--sparksql_job, {inputValue: sparksql_job},
--job, {inputValue: job},
- --wait_interval, {inputValue: wait_interval}
+ --wait_interval, {inputValue: wait_interval},
]
env:
KFP_POD_NAME: "{{pod.name}}"
fileOutputs:
job_id: /tmp/kfp/output/dataproc/job_id.txt
- MLPipeline UI metadata: /mlpipeline-ui-metadata.json
diff --git a/components/gcp/dataproc/submit_sparksql_job/sample.ipynb b/components/gcp/dataproc/submit_sparksql_job/sample.ipynb
index aecf80b61e4..5e8069d7d5c 100644
--- a/components/gcp/dataproc/submit_sparksql_job/sample.ipynb
+++ b/components/gcp/dataproc/submit_sparksql_job/sample.ipynb
@@ -40,10 +40,7 @@
"To use the component, you must:\n",
"* Set up a GCP project by following this [guide](https://cloud.google.com/dataproc/docs/guides/setup-project).\n",
"* [Create a new cluster](https://cloud.google.com/dataproc/docs/guides/create-cluster).\n",
- "* Run the component under a secret [Kubeflow user service account](https://www.kubeflow.org/docs/started/getting-started-gke/#gcp-service-accounts) in a Kubeflow cluster. For example:\n",
- "```\n",
- "component_op(...).apply(gcp.use_gcp_secret('user-gcp-sa'))\n",
- "```\n",
+ "* The component can authenticate to GCP. Refer to [Authenticating Pipelines to GCP](https://www.kubeflow.org/docs/gke/authentication-pipelines/) for details.\n",
"* Grant the Kubeflow user service account the role `roles/dataproc.editor` on the project.\n",
"\n",
"## Detailed Description\n",
@@ -81,7 +78,7 @@
"import kfp.components as comp\n",
"\n",
"dataproc_submit_sparksql_job_op = comp.load_component_from_url(\n",
- " 'https://raw.githubusercontent.com/kubeflow/pipelines/4e7e6e866c1256e641b0c3effc55438e6e4b30f6/components/gcp/dataproc/submit_sparksql_job/component.yaml')\n",
+ " 'https://raw.githubusercontent.com/kubeflow/pipelines/ff116b6f1a0f0cdaafb64fcd04214c169045e6fc/components/gcp/dataproc/submit_sparksql_job/component.yaml')\n",
"help(dataproc_submit_sparksql_job_op)"
]
},
@@ -152,7 +149,6 @@
"outputs": [],
"source": [
"import kfp.dsl as dsl\n",
- "import kfp.gcp as gcp\n",
"import json\n",
"@dsl.pipeline(\n",
" name='Dataproc submit SparkSQL job pipeline',\n",
@@ -178,7 +174,7 @@
" script_variables=script_variables, \n",
" sparksql_job=sparksql_job, \n",
" job=job, \n",
- " wait_interval=wait_interval).apply(gcp.use_gcp_secret('user-gcp-sa'))\n",
+ " wait_interval=wait_interval)\n",
" "
]
},
@@ -263,4 +259,4 @@
},
"nbformat": 4,
"nbformat_minor": 2
-}
+}
\ No newline at end of file
diff --git a/components/gcp/ml_engine/batch_predict/README.md b/components/gcp/ml_engine/batch_predict/README.md
index cc7933e89bf..bc0bae52264 100644
--- a/components/gcp/ml_engine/batch_predict/README.md
+++ b/components/gcp/ml_engine/batch_predict/README.md
@@ -57,13 +57,7 @@ output_path | The output path of the batch prediction job | GCSPath
To use the component, you must:
* Set up a cloud environment by following this [guide](https://cloud.google.com/ml-engine/docs/tensorflow/getting-started-training-prediction#setup).
-* Run the component under a secret [Kubeflow user service account](https://www.kubeflow.org/docs/started/getting-started-gke/#gcp-service-accounts) in a Kubeflow cluster. For example:
-
- ```python
- mlengine_predict_op(...).apply(gcp.use_gcp_secret('user-gcp-sa'))
- ```
-
-
+* The component can authenticate to GCP. Refer to [Authenticating Pipelines to GCP](https://www.kubeflow.org/docs/gke/authentication-pipelines/) for details.
* Grant the following types of access to the Kubeflow user service account:
* Read access to the Cloud Storage buckets which contains the input data.
* Write access to the Cloud Storage bucket of the output directory.
@@ -94,7 +88,7 @@ KFP_PACKAGE = 'https://storage.googleapis.com/ml-pipeline/release/0.1.14/kfp.tar
import kfp.components as comp
mlengine_batch_predict_op = comp.load_component_from_url(
- 'https://raw.githubusercontent.com/kubeflow/pipelines/4e7e6e866c1256e641b0c3effc55438e6e4b30f6/components/gcp/ml_engine/batch_predict/component.yaml')
+ 'https://raw.githubusercontent.com/kubeflow/pipelines/ff116b6f1a0f0cdaafb64fcd04214c169045e6fc/components/gcp/ml_engine/batch_predict/component.yaml')
help(mlengine_batch_predict_op)
```
@@ -132,7 +126,6 @@ OUTPUT_GCS_PATH = GCS_WORKING_DIR + '/batch_predict/output/'
```python
import kfp.dsl as dsl
-import kfp.gcp as gcp
import json
@dsl.pipeline(
name='CloudML batch predict pipeline',
@@ -161,7 +154,7 @@ def pipeline(
output_data_format=output_data_format,
prediction_input=prediction_input,
job_id_prefix=job_id_prefix,
- wait_interval=wait_interval).apply(gcp.use_gcp_secret('user-gcp-sa'))
+ wait_interval=wait_interval)
```
#### Compile the pipeline
diff --git a/components/gcp/ml_engine/batch_predict/component.yaml b/components/gcp/ml_engine/batch_predict/component.yaml
index c6aa4ce2460..834207d2a6b 100644
--- a/components/gcp/ml_engine/batch_predict/component.yaml
+++ b/components/gcp/ml_engine/batch_predict/component.yaml
@@ -69,8 +69,9 @@ outputs:
type: UI metadata
implementation:
container:
- image: gcr.io/ml-pipeline/ml-pipeline-gcp:9ad7d7dd9776ce75a83712f5723db2ef93ba5c26
+ image: gcr.io/ml-pipeline/ml-pipeline-gcp:bae654dc5cf407359ac5f822d03833768739c4c1
args: [
+ --ui_metadata_path, {outputPath: MLPipeline UI metadata},
kfp_component.google.ml_engine, batch_predict,
--project_id, {inputValue: project_id},
--model_path, {inputValue: model_path},
@@ -81,10 +82,9 @@ implementation:
--output_data_format, {inputValue: output_data_format},
--prediction_input, {inputValue: prediction_input},
--job_id_prefix, {inputValue: job_id_prefix},
- --wait_interval, {inputValue: wait_interval}
+ --wait_interval, {inputValue: wait_interval},
]
env:
KFP_POD_NAME: "{{pod.name}}"
fileOutputs:
job_id: /tmp/kfp/output/ml_engine/job_id.txt
- MLPipeline UI metadata: /mlpipeline-ui-metadata.json
diff --git a/components/gcp/ml_engine/batch_predict/sample.ipynb b/components/gcp/ml_engine/batch_predict/sample.ipynb
index f27544231dc..df5ad2baeba 100644
--- a/components/gcp/ml_engine/batch_predict/sample.ipynb
+++ b/components/gcp/ml_engine/batch_predict/sample.ipynb
@@ -62,13 +62,7 @@
"To use the component, you must:\n",
"\n",
"* Set up a cloud environment by following this [guide](https://cloud.google.com/ml-engine/docs/tensorflow/getting-started-training-prediction#setup).\n",
- "* Run the component under a secret [Kubeflow user service account](https://www.kubeflow.org/docs/started/getting-started-gke/#gcp-service-accounts) in a Kubeflow cluster. For example:\n",
- "\n",
- " ```python\n",
- " mlengine_predict_op(...).apply(gcp.use_gcp_secret('user-gcp-sa'))\n",
- " ```\n",
- "\n",
- "\n",
+ "* The component can authenticate to GCP. Refer to [Authenticating Pipelines to GCP](https://www.kubeflow.org/docs/gke/authentication-pipelines/) for details.\n",
"* Grant the following types of access to the Kubeflow user service account:\n",
" * Read access to the Cloud Storage buckets which contains the input data.\n",
" * Write access to the Cloud Storage bucket of the output directory.\n",
@@ -112,7 +106,7 @@
"import kfp.components as comp\n",
"\n",
"mlengine_batch_predict_op = comp.load_component_from_url(\n",
- " 'https://raw.githubusercontent.com/kubeflow/pipelines/4e7e6e866c1256e641b0c3effc55438e6e4b30f6/components/gcp/ml_engine/batch_predict/component.yaml')\n",
+ " 'https://raw.githubusercontent.com/kubeflow/pipelines/ff116b6f1a0f0cdaafb64fcd04214c169045e6fc/components/gcp/ml_engine/batch_predict/component.yaml')\n",
"help(mlengine_batch_predict_op)"
]
},
@@ -185,7 +179,6 @@
"outputs": [],
"source": [
"import kfp.dsl as dsl\n",
- "import kfp.gcp as gcp\n",
"import json\n",
"@dsl.pipeline(\n",
" name='CloudML batch predict pipeline',\n",
@@ -214,7 +207,7 @@
" output_data_format=output_data_format, \n",
" prediction_input=prediction_input, \n",
" job_id_prefix=job_id_prefix,\n",
- " wait_interval=wait_interval).apply(gcp.use_gcp_secret('user-gcp-sa'))"
+ " wait_interval=wait_interval)"
]
},
{
@@ -315,4 +308,4 @@
},
"nbformat": 4,
"nbformat_minor": 2
-}
+}
\ No newline at end of file
diff --git a/components/gcp/ml_engine/deploy/README.md b/components/gcp/ml_engine/deploy/README.md
index dbdbaeade31..1d1252a4b4a 100644
--- a/components/gcp/ml_engine/deploy/README.md
+++ b/components/gcp/ml_engine/deploy/README.md
@@ -70,14 +70,7 @@ The accepted file formats are:
To use the component, you must:
* [Set up the cloud environment](https://cloud.google.com/ml-engine/docs/tensorflow/getting-started-training-prediction#setup).
-* Run the component under a secret [Kubeflow user service account](https://www.kubeflow.org/docs/started/getting-started-gke/#gcp-service-accounts) in a Kubeflow cluster. For example:
-
- ```
- ```python
- mlengine_deploy_op(...).apply(gcp.use_gcp_secret('user-gcp-sa'))
-
- ```
-
+* The component can authenticate to GCP. Refer to [Authenticating Pipelines to GCP](https://www.kubeflow.org/docs/gke/authentication-pipelines/) for details.
* Grant read access to the Cloud Storage bucket that contains the trained model to the Kubeflow user service account.
## Detailed description
@@ -110,7 +103,7 @@ KFP_PACKAGE = 'https://storage.googleapis.com/ml-pipeline/release/0.1.14/kfp.tar
import kfp.components as comp
mlengine_deploy_op = comp.load_component_from_url(
- 'https://raw.githubusercontent.com/kubeflow/pipelines/4e7e6e866c1256e641b0c3effc55438e6e4b30f6/components/gcp/ml_engine/deploy/component.yaml')
+ 'https://raw.githubusercontent.com/kubeflow/pipelines/ff116b6f1a0f0cdaafb64fcd04214c169045e6fc/components/gcp/ml_engine/deploy/component.yaml')
help(mlengine_deploy_op)
```
@@ -136,7 +129,6 @@ TRAINED_MODEL_PATH = 'gs://ml-pipeline-playground/samples/ml_engine/census/train
```python
import kfp.dsl as dsl
-import kfp.gcp as gcp
import json
@dsl.pipeline(
name='CloudML deploy pipeline',
@@ -163,7 +155,7 @@ def pipeline(
version=version,
replace_existing_version=replace_existing_version,
set_default=set_default,
- wait_interval=wait_interval).apply(gcp.use_gcp_secret('user-gcp-sa'))
+ wait_interval=wait_interval)
```
#### Compile the pipeline
diff --git a/components/gcp/ml_engine/deploy/component.yaml b/components/gcp/ml_engine/deploy/component.yaml
index 13b981fed53..cc909041922 100644
--- a/components/gcp/ml_engine/deploy/component.yaml
+++ b/components/gcp/ml_engine/deploy/component.yaml
@@ -95,8 +95,9 @@ outputs:
type: UI metadata
implementation:
container:
- image: gcr.io/ml-pipeline/ml-pipeline-gcp:9ad7d7dd9776ce75a83712f5723db2ef93ba5c26
+ image: gcr.io/ml-pipeline/ml-pipeline-gcp:bae654dc5cf407359ac5f822d03833768739c4c1
args: [
+ --ui_metadata_path, {outputPath: MLPipeline UI metadata},
kfp_component.google.ml_engine, deploy,
--model_uri, {inputValue: model_uri},
--project_id, {inputValue: project_id},
@@ -116,4 +117,3 @@ implementation:
model_uri: /tmp/kfp/output/ml_engine/model_uri.txt
model_name: /tmp/kfp/output/ml_engine/model_name.txt
version_name: /tmp/kfp/output/ml_engine/version_name.txt
- MLPipeline UI metadata: /mlpipeline-ui-metadata.json
diff --git a/components/gcp/ml_engine/deploy/sample.ipynb b/components/gcp/ml_engine/deploy/sample.ipynb
index 94a09d981a5..9911f300688 100644
--- a/components/gcp/ml_engine/deploy/sample.ipynb
+++ b/components/gcp/ml_engine/deploy/sample.ipynb
@@ -75,14 +75,7 @@
"To use the component, you must:\n",
"\n",
"* [Set up the cloud environment](https://cloud.google.com/ml-engine/docs/tensorflow/getting-started-training-prediction#setup).\n",
- "* Run the component under a secret [Kubeflow user service account](https://www.kubeflow.org/docs/started/getting-started-gke/#gcp-service-accounts) in a Kubeflow cluster. For example:\n",
- "\n",
- " ```\n",
- " ```python\n",
- " mlengine_deploy_op(...).apply(gcp.use_gcp_secret('user-gcp-sa'))\n",
- "\n",
- " ```\n",
- "\n",
+ "* The component can authenticate to GCP. Refer to [Authenticating Pipelines to GCP](https://www.kubeflow.org/docs/gke/authentication-pipelines/) for details.\n",
"* Grant read access to the Cloud Storage bucket that contains the trained model to the Kubeflow user service account.\n",
"\n",
"## Detailed description\n",
@@ -128,7 +121,7 @@
"import kfp.components as comp\n",
"\n",
"mlengine_deploy_op = comp.load_component_from_url(\n",
- " 'https://raw.githubusercontent.com/kubeflow/pipelines/4e7e6e866c1256e641b0c3effc55438e6e4b30f6/components/gcp/ml_engine/deploy/component.yaml')\n",
+ " 'https://raw.githubusercontent.com/kubeflow/pipelines/ff116b6f1a0f0cdaafb64fcd04214c169045e6fc/components/gcp/ml_engine/deploy/component.yaml')\n",
"help(mlengine_deploy_op)"
]
},
@@ -176,7 +169,6 @@
"outputs": [],
"source": [
"import kfp.dsl as dsl\n",
- "import kfp.gcp as gcp\n",
"import json\n",
"@dsl.pipeline(\n",
" name='CloudML deploy pipeline',\n",
@@ -203,7 +195,7 @@
" version=version, \n",
" replace_existing_version=replace_existing_version, \n",
" set_default=set_default, \n",
- " wait_interval=wait_interval).apply(gcp.use_gcp_secret('user-gcp-sa'))"
+ " wait_interval=wait_interval)"
]
},
{
@@ -288,4 +280,4 @@
},
"nbformat": 4,
"nbformat_minor": 2
-}
+}
\ No newline at end of file
diff --git a/components/gcp/ml_engine/train/README.md b/components/gcp/ml_engine/train/README.md
index a41de009edc..84ff3d030a4 100644
--- a/components/gcp/ml_engine/train/README.md
+++ b/components/gcp/ml_engine/train/README.md
@@ -73,12 +73,7 @@ The component accepts two types of inputs:
To use the component, you must:
* Set up a cloud environment by following this [guide](https://cloud.google.com/ml-engine/docs/tensorflow/getting-started-training-prediction#setup).
-* Run the component under a secret [Kubeflow user service account](https://www.kubeflow.org/docs/started/getting-started-gke/#gcp-service-accounts) in a Kubeflow cluster. For example:
-
- ```
- mlengine_train_op(...).apply(gcp.use_gcp_secret('user-gcp-sa'))
- ```
-
+* The component can authenticate to GCP. Refer to [Authenticating Pipelines to GCP](https://www.kubeflow.org/docs/gke/authentication-pipelines/) for details.
* Grant the following access to the Kubeflow user service account:
* Read access to the Cloud Storage buckets which contain the input data, packages, or Docker images.
* Write access to the Cloud Storage bucket of the output directory.
@@ -104,7 +99,7 @@ The steps to use the component in a pipeline are:
```python
import kfp.components as comp
- mlengine_train_op = comp.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/4e7e6e866c1256e641b0c3effc55438e6e4b30f6/components/gcp/ml_engine/train/component.yaml')
+ mlengine_train_op = comp.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/ff116b6f1a0f0cdaafb64fcd04214c169045e6fc/components/gcp/ml_engine/train/component.yaml')
help(mlengine_train_op)
```
### Sample
@@ -160,7 +155,6 @@ rm -fr ./cloudml-samples-master/ ./master.zip ./dist
```python
import kfp.dsl as dsl
-import kfp.gcp as gcp
import json
@dsl.pipeline(
name='CloudML training pipeline',
@@ -199,7 +193,7 @@ def pipeline(
worker_image_uri=worker_image_uri,
training_input=training_input,
job_id_prefix=job_id_prefix,
- wait_interval=wait_interval).apply(gcp.use_gcp_secret('user-gcp-sa'))
+ wait_interval=wait_interval)
```
#### Compile the pipeline
diff --git a/components/gcp/ml_engine/train/component.yaml b/components/gcp/ml_engine/train/component.yaml
index a48f1730a22..37d48918360 100644
--- a/components/gcp/ml_engine/train/component.yaml
+++ b/components/gcp/ml_engine/train/component.yaml
@@ -103,8 +103,9 @@ outputs:
type: UI metadata
implementation:
container:
- image: gcr.io/ml-pipeline/ml-pipeline-gcp:9ad7d7dd9776ce75a83712f5723db2ef93ba5c26
+ image: gcr.io/ml-pipeline/ml-pipeline-gcp:bae654dc5cf407359ac5f822d03833768739c4c1
args: [
+ --ui_metadata_path, {outputPath: MLPipeline UI metadata},
kfp_component.google.ml_engine, train,
--project_id, {inputValue: project_id},
--python_module, {inputValue: python_module},
@@ -118,11 +119,10 @@ implementation:
--worker_image_uri, {inputValue: worker_image_uri},
--training_input, {inputValue: training_input},
--job_id_prefix, {inputValue: job_id_prefix},
- --wait_interval, {inputValue: wait_interval}
+ --wait_interval, {inputValue: wait_interval},
]
env:
KFP_POD_NAME: "{{pod.name}}"
fileOutputs:
job_id: /tmp/kfp/output/ml_engine/job_id.txt
job_dir: /tmp/kfp/output/ml_engine/job_dir.txt
- MLPipeline UI metadata: /mlpipeline-ui-metadata.json
diff --git a/components/gcp/ml_engine/train/sample.ipynb b/components/gcp/ml_engine/train/sample.ipynb
index 9d1a8df9441..5f60e78425c 100644
--- a/components/gcp/ml_engine/train/sample.ipynb
+++ b/components/gcp/ml_engine/train/sample.ipynb
@@ -56,12 +56,7 @@
"To use the component, you must:\n",
"\n",
"* Set up a cloud environment by following this [guide](https://cloud.google.com/ml-engine/docs/tensorflow/getting-started-training-prediction#setup).\n",
- "* Run the component under a secret [Kubeflow user service account](https://www.kubeflow.org/docs/started/getting-started-gke/#gcp-service-accounts) in a Kubeflow cluster. For example:\n",
- "\n",
- " ```\n",
- " mlengine_train_op(...).apply(gcp.use_gcp_secret('user-gcp-sa'))\n",
- " ```\n",
- "\n",
+ "* The component can authenticate to GCP. Refer to [Authenticating Pipelines to GCP](https://www.kubeflow.org/docs/gke/authentication-pipelines/) for details.\n",
"* Grant the following access to the Kubeflow user service account: \n",
" * Read access to the Cloud Storage buckets which contain the input data, packages, or Docker images.\n",
" * Write access to the Cloud Storage bucket of the output directory.\n",
@@ -104,7 +99,7 @@
"import kfp.components as comp\n",
"\n",
"mlengine_train_op = comp.load_component_from_url(\n",
- " 'https://raw.githubusercontent.com/kubeflow/pipelines/4e7e6e866c1256e641b0c3effc55438e6e4b30f6/components/gcp/ml_engine/train/component.yaml')\n",
+ " 'https://raw.githubusercontent.com/kubeflow/pipelines/ff116b6f1a0f0cdaafb64fcd04214c169045e6fc/components/gcp/ml_engine/train/component.yaml')\n",
"help(mlengine_train_op)"
]
},
@@ -219,7 +214,6 @@
"outputs": [],
"source": [
"import kfp.dsl as dsl\n",
- "import kfp.gcp as gcp\n",
"import json\n",
"@dsl.pipeline(\n",
" name='CloudML training pipeline',\n",
@@ -258,7 +252,7 @@
" worker_image_uri=worker_image_uri, \n",
" training_input=training_input, \n",
" job_id_prefix=job_id_prefix, \n",
- " wait_interval=wait_interval).apply(gcp.use_gcp_secret('user-gcp-sa'))"
+ " wait_interval=wait_interval)"
]
},
{
@@ -360,4 +354,4 @@
},
"nbformat": 4,
"nbformat_minor": 2
-}
+}
\ No newline at end of file
diff --git a/components/git/clone/component.yaml b/components/git/clone/component.yaml
new file mode 100644
index 00000000000..0e18ed64e6c
--- /dev/null
+++ b/components/git/clone/component.yaml
@@ -0,0 +1,18 @@
+name: Git clone
+description: Creates a shallow clone of the specified repo branch
+inputs:
+- {name: Repo URI, type: URI}
+- {name: Branch, type: String, default: master}
+outputs:
+- {name: Repo dir, type: Directory}
+implementation:
+ container:
+ image: alpine/git
+ command:
+ - git
+ - clone
+ - --depth=1
+ - --branch
+ - inputValue: Branch
+ - inputValue: Repo URI
+ - outputPath: Repo dir
diff --git a/components/kubeflow/katib-launcher/src/launch_crd.py b/components/kubeflow/common/launch_crd.py
similarity index 100%
rename from components/kubeflow/katib-launcher/src/launch_crd.py
rename to components/kubeflow/common/launch_crd.py
diff --git a/components/kubeflow/deployer/component.yaml b/components/kubeflow/deployer/component.yaml
index ba88823fe44..938eefb66dd 100644
--- a/components/kubeflow/deployer/component.yaml
+++ b/components/kubeflow/deployer/component.yaml
@@ -11,7 +11,7 @@ inputs:
# - {name: Endppoint URI, type: Serving URI, description: 'URI of the deployed prediction service..'}
implementation:
container:
- image: gcr.io/ml-pipeline/ml-pipeline-kubeflow-deployer:9ad7d7dd9776ce75a83712f5723db2ef93ba5c26
+ image: gcr.io/ml-pipeline/ml-pipeline-kubeflow-deployer:bae654dc5cf407359ac5f822d03833768739c4c1
command: [/bin/deploy.sh]
args: [
--model-export-path, {inputValue: Model dir},
diff --git a/components/kubeflow/dnntrainer/component.yaml b/components/kubeflow/dnntrainer/component.yaml
index 5db3221684c..7254bf6a062 100644
--- a/components/kubeflow/dnntrainer/component.yaml
+++ b/components/kubeflow/dnntrainer/component.yaml
@@ -16,7 +16,7 @@ outputs:
- {name: MLPipeline UI metadata, type: UI metadata}
implementation:
container:
- image: gcr.io/ml-pipeline/ml-pipeline-kubeflow-tf-trainer:9ad7d7dd9776ce75a83712f5723db2ef93ba5c26
+ image: gcr.io/ml-pipeline/ml-pipeline-kubeflow-tf-trainer:bae654dc5cf407359ac5f822d03833768739c4c1
command: [python2, -m, trainer.task]
args: [
--transformed-data-dir, {inputValue: Transformed data dir},
diff --git a/components/kubeflow/dnntrainer/src/setup.py b/components/kubeflow/dnntrainer/src/setup.py
index fe6001945f0..b08a4134d00 100644
--- a/components/kubeflow/dnntrainer/src/setup.py
+++ b/components/kubeflow/dnntrainer/src/setup.py
@@ -28,7 +28,7 @@
long_description="""
""",
install_requires=[
- 'tensorflow==1.12.1',
+ 'tensorflow==1.15.0',
],
package_data={
},
diff --git a/components/kubeflow/katib-launcher/build_image.sh b/components/kubeflow/katib-launcher/build_image.sh
index 75f326fa837..181f73b9a14 100755
--- a/components/kubeflow/katib-launcher/build_image.sh
+++ b/components/kubeflow/katib-launcher/build_image.sh
@@ -34,6 +34,7 @@ done
mkdir -p ./build
rsync -arvp ./src/ ./build/
+rsync -arvp ../common/ ./build/
cp ../../license.sh ./build
cp ../../third_party_licenses.csv ./build
@@ -41,18 +42,18 @@ cp ../../third_party_licenses.csv ./build
LOCAL_LAUNCHER_IMAGE_NAME=ml-pipeline-kubeflow-experiment
docker build -t ${LOCAL_LAUNCHER_IMAGE_NAME} .
+if [ -z "${TAG_NAME}" ]; then
+ TAG_NAME=$(date +v%Y%m%d)-$(git describe --tags --always --dirty)-$(git diff | shasum -a256 | cut -c -6)
+fi
if [ -z "${LAUNCHER_IMAGE_NAME}" ]; then
- if [ -z "${TAG_NAME}" ]; then
- TAG_NAME=$(date +v%Y%m%d)-$(git describe --tags --always --dirty)-$(git diff | shasum -a256 | cut -c -6)
- fi
if [ -z "${PROJECT_ID}" ]; then
PROJECT_ID=$(gcloud config config-helper --format "value(configuration.properties.core.project)")
fi
docker tag ${LOCAL_LAUNCHER_IMAGE_NAME} gcr.io/${PROJECT_ID}/${LOCAL_LAUNCHER_IMAGE_NAME}:${TAG_NAME}
docker push gcr.io/${PROJECT_ID}/${LOCAL_LAUNCHER_IMAGE_NAME}:${TAG_NAME}
else
- docker tag ${LOCAL_LAUNCHER_IMAGE_NAME} ${LAUNCHER_IMAGE_NAME}
- docker push ${LAUNCHER_IMAGE_NAME}
+ docker tag ${LOCAL_LAUNCHER_IMAGE_NAME} ${LAUNCHER_IMAGE_NAME}:${TAG_NAME}
+ docker push ${LAUNCHER_IMAGE_NAME}:${TAG_NAME}
fi
rm -rf ./build
diff --git a/components/kubeflow/launcher/Dockerfile b/components/kubeflow/launcher/Dockerfile
index ed7f892d7fa..fd2309e119e 100644
--- a/components/kubeflow/launcher/Dockerfile
+++ b/components/kubeflow/launcher/Dockerfile
@@ -1,4 +1,4 @@
-# Copyright 2018 The Kubeflow Authors
+# Copyright 2019 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -11,52 +11,16 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
FROM ubuntu:16.04
-ARG TRAINER_IMAGE_NAME
-
-RUN apt-get update -y
-
-RUN apt-get install --no-install-recommends -y -q ca-certificates python-dev python-setuptools wget unzip git
-
-RUN easy_install pip
-
-RUN pip install pyyaml==3.12 six==1.11.0 requests==2.18.4 tensorflow==1.7.0 \
- kubernetes google-api-python-client retrying
-
-RUN wget -nv https://dl.google.com/dl/cloudsdk/release/google-cloud-sdk.zip && \
- unzip -qq google-cloud-sdk.zip -d tools && \
- rm google-cloud-sdk.zip && \
- tools/google-cloud-sdk/install.sh --usage-reporting=false \
- --path-update=false --bash-completion=false \
- --disable-installation-options && \
- tools/google-cloud-sdk/bin/gcloud -q components update \
- gcloud core gsutil && \
- tools/google-cloud-sdk/bin/gcloud -q components install kubectl && \
- tools/google-cloud-sdk/bin/gcloud config set component_manager/disable_update_check true && \
- touch /tools/google-cloud-sdk/lib/third_party/google.py
-
-RUN wget -nv https://github.com/ksonnet/ksonnet/releases/download/v0.9.0/ks_0.9.0_linux_amd64.tar.gz && \
- tar -xzf ks_0.9.0_linux_amd64.tar.gz && \
- mkdir -p /tools/ks/bin && \
- cp ./ks_0.9.0_linux_amd64/ks /tools/ks/bin && \
- rm ks_0.9.0_linux_amd64.tar.gz && \
- rm -r ks_0.9.0_linux_amd64
-
-RUN wget https://github.com/kubeflow/tf-operator/archive/v0.3.0.zip && \
- unzip v0.3.0.zip && \
- mv tf-operator-0.3.0 tf-operator
-
-ENV PYTHONPATH $PYTHONPATH:/tf-operator
-
-ENV PATH $PATH:/tools/google-cloud-sdk/bin:/tools/ks/bin
-
-ENV TRAINER_IMAGE_NAME $TRAINER_IMAGE_NAME
+RUN apt-get update -y && \
+ apt-get install --no-install-recommends -y -q ca-certificates python-dev python-setuptools wget && \
+ easy_install pip && \
+ pip install pyyaml==3.12 kubernetes
ADD build /ml
RUN mkdir /usr/licenses && \
/ml/license.sh /ml/third_party_licenses.csv /usr/licenses
-ENTRYPOINT ["python", "/ml/launch_tf_job.py"]
\ No newline at end of file
+ENTRYPOINT ["python", "/ml/launch_tfjob.py"]
diff --git a/components/kubeflow/launcher/OWNERS b/components/kubeflow/launcher/OWNERS
new file mode 100644
index 00000000000..808ae38f7a9
--- /dev/null
+++ b/components/kubeflow/launcher/OWNERS
@@ -0,0 +1,4 @@
+approvers:
+ - hougangliu
+reviewers:
+ - hougangliu
diff --git a/components/kubeflow/launcher/build_image.sh b/components/kubeflow/launcher/build_image.sh
index a1e22dd43c1..dc556368667 100755
--- a/components/kubeflow/launcher/build_image.sh
+++ b/components/kubeflow/launcher/build_image.sh
@@ -1,5 +1,5 @@
#!/bin/bash -e
-# Copyright 2018 Google LLC
+# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,13 +13,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
while getopts ":hp:t:i:" opt; do
case "${opt}" in
h) echo "-p: project name"
- echo "-t: tag name"
- echo "-i: image name. If provided, project name and tag name are not necessary"
- exit
+ echo "-t: tag name"
+ echo "-i: image name. If provided, project name and tag name are not necessary"
+ exit
;;
p) PROJECT_ID=${OPTARG}
;;
@@ -33,56 +32,28 @@ while getopts ":hp:t:i:" opt; do
esac
done
-LOCAL_LAUNCHER_IMAGE_NAME=ml-pipeline-kubeflow-tf
-LOCAL_TRAINER_IMAGE_NAME=ml-pipeline-kubeflow-tf-trainer
-
-if [ -z "${PROJECT_ID}" ]; then
- PROJECT_ID=$(gcloud config config-helper --format "value(configuration.properties.core.project)")
-fi
-
-if [ -z "${TAG_NAME}" ]; then
- TAG_NAME=$(date +v%Y%m%d)-$(git describe --tags --always --dirty)-$(git diff | shasum -a256 | cut -c -6)
-fi
-
mkdir -p ./build
rsync -arvp ./src/ ./build/
+rsync -arvp ../common/ ./build/
cp ../../license.sh ./build
cp ../../third_party_licenses.csv ./build
-# Build the trainer image
-if [ -z "${LAUNCHER_IMAGE_NAME}" ]; then
- TRAINER_IMAGE_NAME=gcr.io/${PROJECT_ID}/${LOCAL_TRAINER_IMAGE_NAME}:${TAG_NAME}
-else
- # construct the trainer image name as "laucher_image_name"-trainer:"launcher_image_tag"
- colon_index=`expr index "${LAUNCHER_IMAGE_NAME}" :`
- if [ $colon_index == '0' ]; then
- TRAINER_IMAGE_NAME=${LAUNCHER_IMAGE_NAME}-trainer
- else
- tag=${LAUNCHER_IMAGE_NAME:$colon_index}
- TRAINER_IMAGE_NAME=${LAUNCHER_IMAGE_NAME:0:$colon_index-1}-trainer:${tag}
- fi
-fi
+LOCAL_LAUNCHER_IMAGE_NAME=ml-pipeline-kubeflow-tfjob
-bash_dir=`dirname $0`
-bash_dir_abs=`realpath $bash_dir`
-parent_dir=`dirname ${bash_dir_abs}`
-trainer_dir=${parent_dir}/dnntrainer
-cd ${trainer_dir}
-if [ -z "${LAUNCHER_IMAGE_NAME}" ]; then
- ./build_image.sh -p ${PROJECT_ID} -t ${TAG_NAME}
-else
- ./build_image.sh -i ${TRAINER_IMAGE_NAME}
+docker build -t ${LOCAL_LAUNCHER_IMAGE_NAME} .
+if [ -z "${TAG_NAME}" ]; then
+ TAG_NAME=$(date +v%Y%m%d)-$(git describe --tags --always --dirty)-$(git diff | shasum -a256 | cut -c -6)
fi
-cd -
-
-docker build -t ${LOCAL_LAUNCHER_IMAGE_NAME} . --build-arg TRAINER_IMAGE_NAME=${TRAINER_IMAGE_NAME}
if [ -z "${LAUNCHER_IMAGE_NAME}" ]; then
+ if [ -z "${PROJECT_ID}" ]; then
+ PROJECT_ID=$(gcloud config config-helper --format "value(configuration.properties.core.project)")
+ fi
docker tag ${LOCAL_LAUNCHER_IMAGE_NAME} gcr.io/${PROJECT_ID}/${LOCAL_LAUNCHER_IMAGE_NAME}:${TAG_NAME}
docker push gcr.io/${PROJECT_ID}/${LOCAL_LAUNCHER_IMAGE_NAME}:${TAG_NAME}
else
- docker tag ${LOCAL_LAUNCHER_IMAGE_NAME} "${LAUNCHER_IMAGE_NAME}"
- docker push "${LAUNCHER_IMAGE_NAME}"
+ docker tag ${LOCAL_LAUNCHER_IMAGE_NAME} ${LAUNCHER_IMAGE_NAME}:${TAG_NAME}
+ docker push ${LAUNCHER_IMAGE_NAME}:${TAG_NAME}
fi
rm -rf ./build
diff --git a/components/kubeflow/launcher/component.yaml b/components/kubeflow/launcher/component.yaml
new file mode 100644
index 00000000000..3bddf5ec497
--- /dev/null
+++ b/components/kubeflow/launcher/component.yaml
@@ -0,0 +1,35 @@
+name: Kubeflow - Launch TFJob
+description: Kubeflow TFJob launcher
+inputs:
+- {name: Name, type: String, description: 'TFJob name.'}
+- {name: Namespace, type: String, default: kubeflow, description: 'TFJob namespace.'}
+- {name: Version, type: String, default: v1, description: 'TFJob version.'}
+- {name: ActiveDeadlineSeconds, type: Integer, default: -1, description: 'Specifies the duration (in seconds) since startTime during which the job can remain active before it is terminated. Must be a positive integer. This setting applies only to pods where restartPolicy is OnFailure or Always.'}
+- {name: BackoffLimit, type: Integer, default: -1, description: 'Number of retries before marking this job as failed.'}
+- {name: ttl Seconds After Finished, type: Integer, default: -1, description: 'Defines the TTL for cleaning up finished TFJobs.'}
+- {name: CleanPodPolicy, type: String, default: Running, description: 'Defines the policy for cleaning up pods after the TFJob completes.'}
+- {name: PS Spec, type: JSON, default: '{}', description: 'TFJob ps replicaSpecs.'}
+- {name: Worker Spec, type: JSON, default: '{}', description: 'TFJob worker replicaSpecs.'}
+- {name: Chief Spec, type: JSON, default: '{}', description: 'TFJob chief replicaSpecs.'}
+- {name: Evaluator Spec, type: JSON, default: '{}', description: 'TFJob evaluator replicaSpecs.'}
+- {name: Tfjob Timeout Minutes, type: Integer, default: 1440, description: 'Time in minutes to wait for the TFJob to complete.'}
+- {name: Delete Finished Tfjob, type: Bool, default: 'True' , description: 'Whether to delete the tfjob after it is finished.'}
+implementation:
+ container:
+ image: liuhougangxa/kubeflow-tfjob-launcher:latest
+ command: [python, /ml/launch_tfjob.py]
+ args: [
+ --name, {inputValue: Name},
+ --namespace, {inputValue: Namespace},
+ --version, {inputValue: Version},
+ --activeDeadlineSeconds, {inputValue: ActiveDeadlineSeconds},
+ --backoffLimit, {inputValue: BackoffLimit},
+ --cleanPodPolicy, {inputValue: CleanPodPolicy},
+ --ttlSecondsAfterFinished, {inputValue: ttl Seconds After Finished},
+ --psSpec, {inputValue: PS Spec},
+ --workerSpec, {inputValue: Worker Spec},
+ --chiefSpec, {inputValue: Chief Spec},
+ --evaluatorSpec, {inputValue: Evaluator Spec},
+ --tfjobTimeoutMinutes, {inputValue: Tfjob Timeout Minutes},
+ --deleteAfterDone, {inputValue: Delete Finished Tfjob},
+ ]
diff --git a/components/kubeflow/launcher/kubeflow_tfjob_launcher_op.py b/components/kubeflow/launcher/kubeflow_tfjob_launcher_op.py
deleted file mode 100644
index ee4b74de42d..00000000000
--- a/components/kubeflow/launcher/kubeflow_tfjob_launcher_op.py
+++ /dev/null
@@ -1,34 +0,0 @@
-# Copyright 2018 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from kfp import dsl
-
-def kubeflow_tfjob_launcher_op(container_image, command, number_of_workers: int, number_of_parameter_servers: int, tfjob_timeout_minutes: int, output_dir=None, step_name='TFJob-launcher'):
- return dsl.ContainerOp(
- name = step_name,
- image = 'gcr.io/ml-pipeline/ml-pipeline-kubeflow-tf:9ad7d7dd9776ce75a83712f5723db2ef93ba5c26',
- arguments = [
- '--workers', number_of_workers,
- '--pss', number_of_parameter_servers,
- '--tfjob-timeout-minutes', tfjob_timeout_minutes,
- '--container-image', container_image,
- '--output-dir', output_dir,
- '--ui-metadata-type', 'tensorboard',
- '--',
- ] + command,
- file_outputs = {'train': '/output.txt'},
- output_artifact_paths={
- 'mlpipeline-ui-metadata': '/mlpipeline-ui-metadata.json',
- },
- )
diff --git a/components/kubeflow/launcher/sample.py b/components/kubeflow/launcher/sample.py
new file mode 100644
index 00000000000..a4368190e9d
--- /dev/null
+++ b/components/kubeflow/launcher/sample.py
@@ -0,0 +1,75 @@
+import json
+from kfp import components
+import kfp.dsl as dsl
+
+@dsl.pipeline(
+ name="Launch kubeflow tfjob",
+ description="An example to launch tfjob."
+)
+def mnist_train(
+ name="mnist",
+ namespace="kubeflow",
+ workerNum=3,
+ ttlSecondsAfterFinished=-1,
+ tfjobTimeoutMinutes=60,
+ deleteAfterDone=False):
+ tfjob_launcher_op = components.load_component_from_file("./component.yaml")
+ # tfjob_launcher_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/master/components/kubeflow/launcher/component.yaml')
+
+ chief = {
+ "replicas": 1,
+ "restartPolicy": "OnFailure",
+ "template": {
+ "spec": {
+ "containers": [
+ {
+ "command": [
+ "python",
+ "/opt/model.py"
+ ],
+ "args": [
+ "--tf-train-steps=6000"
+ ],
+ "image": "liuhougangxa/tf-estimator-mnist",
+ "name": "tensorflow",
+ }
+ ]
+ }
+ }
+ }
+ worker = {}
+ if workerNum > 0:
+ worker = {
+ "replicas": workerNum,
+ "restartPolicy": "OnFailure",
+ "template": {
+ "spec": {
+ "containers": [
+ {
+ "command": [
+ "python",
+ "/opt/model.py"
+ ],
+ "args": [
+ "--tf-train-steps=6000"
+ ],
+ "image": "liuhougangxa/tf-estimator-mnist",
+ "name": "tensorflow",
+ }
+ ]
+ }
+ }
+ }
+ tfjob_launcher_op(
+ name=name,
+ namespace=namespace,
+ ttl_seconds_after_finished=ttlSecondsAfterFinished,
+ worker_spec=worker,
+ chief_spec=chief,
+ tfjob_timeout_minutes=tfjobTimeoutMinutes,
+ delete_finished_tfjob=deleteAfterDone
+ )
+
+if __name__ == "__main__":
+ import kfp.compiler as compiler
+ compiler.Compiler().compile(mnist_train, __file__ + ".tar.gz")
diff --git a/components/kubeflow/launcher/src/__init__.py b/components/kubeflow/launcher/src/__init__.py
index 9251ef2616a..ddd71c00285 100644
--- a/components/kubeflow/launcher/src/__init__.py
+++ b/components/kubeflow/launcher/src/__init__.py
@@ -1,4 +1,4 @@
-# Copyright 2018 Google LLC
+# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -11,5 +11,3 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
-from .kubeflow_tfjob_launcher_op import kubeflow_tfjob_launcher_op
diff --git a/components/kubeflow/launcher/src/launch_tf_job.py b/components/kubeflow/launcher/src/launch_tf_job.py
deleted file mode 100644
index 414c15fd7c5..00000000000
--- a/components/kubeflow/launcher/src/launch_tf_job.py
+++ /dev/null
@@ -1,197 +0,0 @@
-# Copyright 2018 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-Usage:
-python launch_tf_job.py
- --workers=3
- --pss=1
- --container-image=gcr.io/${PROJECT_ID}/ml-pipeline-kubeflow-tf-trainer:${TAG_NAME}
- --output-dir gs://ml-pipeline-playground/flower/trainer
- --ui-metadata-type tensorboard
- --
- python -m trainer.task
- --job-dir=gs://ml-pipeline-playground/flower/trainer
- --transformed-data-dir=gs://ml-pipeline-playground/flower/transformed
- --schema=gs://ml-pipeline-playground/flower/schema.json
- --target=label
- --hidden-layer-size=100,50
- --steps=2000
-"""
-# TODO: Add unit/integration tests
-
-import argparse
-import datetime
-import json
-import os
-import logging
-import requests
-import subprocess
-import six
-import time
-import yaml
-from py import tf_job_client
-from kubernetes import client as k8s_client
-from kubernetes import config
-
-
-def _generate_train_yaml(src_filename, tfjob_ns, workers, pss, trainer_image, command):
- """_generate_train_yaml generates train yaml files based on train.template.yaml"""
- with open(src_filename, 'r') as f:
- content = yaml.safe_load(f)
-
- content['metadata']['generateName'] = 'trainer-'
- content['metadata']['namespace'] = tfjob_ns
-
- if workers and pss:
- content['spec']['tfReplicaSpecs']['PS']['replicas'] = pss
- content['spec']['tfReplicaSpecs']['PS']['template']['spec']['containers'][0]['image'] = trainer_image
- content['spec']['tfReplicaSpecs']['PS']['template']['spec']['containers'][0]['command'] = command
- content['spec']['tfReplicaSpecs']['Worker']['replicas'] = workers
- content['spec']['tfReplicaSpecs']['Worker']['template']['spec']['containers'][0]['image'] = trainer_image
- content['spec']['tfReplicaSpecs']['Worker']['template']['spec']['containers'][0]['command'] = command
- content['spec']['tfReplicaSpecs']['MASTER']['template']['spec']['containers'][0]['image'] = trainer_image
- content['spec']['tfReplicaSpecs']['MASTER']['template']['spec']['containers'][0]['command'] = command
- else:
- # If no workers and pss set, default is 1.
- master_spec = content['spec']['tfReplicaSpecs']['MASTER']
- worker_spec = content['spec']['tfReplicaSpecs']['Worker']
- ps_spec = content['spec']['tfReplicaSpecs']['PS']
- master_spec['template']['spec']['containers'][0]['image'] = trainer_image
- master_spec['template']['spec']['containers'][0]['command'] = command
- worker_spec['template']['spec']['containers'][0]['image'] = trainer_image
- worker_spec['template']['spec']['containers'][0]['command'] = command
- ps_spec['template']['spec']['containers'][0]['image'] = trainer_image
- ps_spec['template']['spec']['containers'][0]['command'] = command
-
- return content
-
-def main(argv=None):
- parser = argparse.ArgumentParser(description='Kubeflow TFJob launcher')
- parser.add_argument('--container-image', type=str,
- help='''Container image to run using KubeFlow TFJob. The command line should be added after --.''')
- parser.add_argument('--workers', type=int, default=0)
- parser.add_argument('--pss', type=int, default=0)
- parser.add_argument('--cluster', type=str,
- help='GKE cluster set up for kubeflow. If set, zone must be provided. ' +
- 'If not set, assuming this runs in a GKE container and current ' +
- 'cluster is used.')
- parser.add_argument('--zone', type=str, help='zone of the kubeflow cluster.')
- parser.add_argument('--kfversion', type=str,
- default='v1alpha2',
- help='The version of the deployed kubeflow. ' +
- 'If not set, the default version is v1alpha2')
- parser.add_argument('--tfjob-ns', type=str,
- default='default',
- help='The namespace where the tfjob is submitted' +
- 'If not set, the default namespace is default')
- parser.add_argument('--tfjob-timeout-minutes', type=int,
- default=10,
- help='Time in minutes to wait for the TFJob to complete')
- parser.add_argument('--output-dir', type=str)
- parser.add_argument('--ui-metadata-type', type=str, default='tensorboard')
- import sys
- all_args = sys.argv[1:]
- separator_idx = all_args.index('--')
- launcher_args = all_args[:separator_idx]
- remaining_args = all_args[separator_idx + 1:]
-
- args = parser.parse_args(launcher_args)
-
- logging.getLogger().setLevel(logging.INFO)
- args_dict = vars(args)
- if args.cluster and args.zone:
- cluster = args_dict.pop('cluster')
- zone = args_dict.pop('zone')
- else:
- # Get culster name and zone from metadata
- metadata_server = "http://metadata/computeMetadata/v1/instance/"
- metadata_flavor = {'Metadata-Flavor' : 'Google'}
- cluster = requests.get(metadata_server + "attributes/cluster-name",
- headers = metadata_flavor).text
- zone = requests.get(metadata_server + "zone",
- headers = metadata_flavor).text.split('/')[-1]
-
- logging.info('Getting credentials for GKE cluster %s.' % cluster)
- subprocess.call(['gcloud', 'container', 'clusters', 'get-credentials', cluster,
- '--zone', zone])
-
- workers = args_dict.pop('workers')
- pss = args_dict.pop('pss')
- kf_version = args_dict.pop('kfversion')
- tfjob_ns = args_dict.pop('tfjob_ns')
- tfjob_timeout_minutes = args_dict.pop('tfjob_timeout_minutes')
- trainer_image = args.container_image or os.environ['TRAINER_IMAGE_NAME']
- command=remaining_args
- logging.info('Generating training template.')
- template_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'train.template.yaml')
- content_yaml = _generate_train_yaml(template_file, tfjob_ns, workers, pss, trainer_image, command)
-
- logging.info('Start training.')
- # Set up handler for k8s clients
- config.load_incluster_config()
- api_client = k8s_client.ApiClient()
- create_response = tf_job_client.create_tf_job(api_client, content_yaml, version=kf_version)
- job_name = create_response['metadata']['name']
-
- if args.output_dir:
- # Create metadata.json file for visualization.
- metadata = {
- 'outputs' : [{
- 'type': args.ui_metadata_type,
- 'source': args.output_dir,
- }]
- }
- with open('/mlpipeline-ui-metadata.json', 'w') as f:
- json.dump(metadata, f)
-
- wait_response = tf_job_client.wait_for_job(
- api_client, tfjob_ns, job_name, kf_version,
- timeout=datetime.timedelta(minutes=tfjob_timeout_minutes))
- succ = True
- #TODO: update this failure checking after tf-operator has the condition checking function.
- if 'Worker' in wait_response['status']['tfReplicaStatuses']:
- if 'Failed' in wait_response['status']['tfReplicaStatuses']['Worker']:
- logging.error('Training failed since workers failed.')
- succ = False
- if 'PS' in wait_response['status']['tfReplicaStatuses']:
- if 'Failed' in wait_response['status']['tfReplicaStatuses']['PS']:
- logging.error('Training failed since PSs failed.')
- succ = False
- if 'MASTER' in wait_response['status']['tfReplicaStatuses']:
- if 'Failed' in wait_response['status']['tfReplicaStatuses']['MASTER']:
- logging.error('Training failed since MASTER failed.')
- succ = False
-
- #TODO: remove this after kubeflow fixes the wait_for_job issue
- # because the wait_for_job returns when the worker finishes but the master might not be complete yet.
- if 'MASTER' in wait_response['status']['tfReplicaStatuses'] and 'active' in wait_response['status']['tfReplicaStatuses']['MASTER']:
- master_active = True
- while master_active:
- # Wait for master to finish
- time.sleep(2)
- wait_response = tf_job_client.wait_for_job(api_client, tfjob_ns, job_name, kf_version,
- timeout=datetime.timedelta(minutes=tfjob_timeout_minutes))
- if 'active' not in wait_response['status']['tfReplicaStatuses']['MASTER']:
- master_active = False
-
- if succ:
- logging.info('Training success.')
-
- tf_job_client.delete_tf_job(api_client, tfjob_ns, job_name, version=kf_version)
- with open('/output.txt', 'w') as f:
- f.write(args.output_dir)
-
-if __name__== "__main__":
- main()
diff --git a/components/kubeflow/launcher/src/launch_tfjob.py b/components/kubeflow/launcher/src/launch_tfjob.py
new file mode 100644
index 00000000000..30666d8d143
--- /dev/null
+++ b/components/kubeflow/launcher/src/launch_tfjob.py
@@ -0,0 +1,136 @@
+# Copyright 2019 kubeflow.org.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+import datetime
+from distutils.util import strtobool
+import json
+import os
+import logging
+import yaml
+import launch_crd
+
+from kubernetes import client as k8s_client
+from kubernetes import config
+
+def yamlOrJsonStr(str):
+ if str == "" or str == None:
+ return None
+ return yaml.safe_load(str)
+
+TFJobGroup = "kubeflow.org"
+TFJobPlural = "tfjobs"
+
+class TFJob(launch_crd.K8sCR):
+ def __init__(self, version="v1", client=None):
+ super(TFJob, self).__init__(TFJobGroup, TFJobPlural, version, client)
+
+ def is_expected_conditions(self, inst, expected_conditions):
+ conditions = inst.get('status', {}).get("conditions")
+ if not conditions:
+ return False, ""
+ if conditions[-1]["type"] in expected_conditions and conditions[-1]["status"] == "True":
+ return True, conditions[-1]["type"]
+ else:
+ return False, conditions[-1]["type"]
+
+def main(argv=None):
+ parser = argparse.ArgumentParser(description='Kubeflow TFJob launcher')
+ parser.add_argument('--name', type=str,
+ help='TFJob name.')
+ parser.add_argument('--namespace', type=str,
+ default='kubeflow',
+ help='TFJob namespace.')
+ parser.add_argument('--version', type=str,
+ default='v1',
+ help='TFJob version.')
+ parser.add_argument('--activeDeadlineSeconds', type=int,
+ default=-1,
+ help='Specifies the duration (in seconds) since startTime during which the job can remain active before it is terminated. Must be a positive integer. This setting applies only to pods where restartPolicy is OnFailure or Always.')
+ parser.add_argument('--backoffLimit', type=int,
+ default=-1,
+ help='Number of retries before marking this job as failed.')
+ parser.add_argument('--cleanPodPolicy', type=str,
+ default="Running",
+ help='Defines the policy for cleaning up pods after the TFJob completes.')
+ parser.add_argument('--ttlSecondsAfterFinished', type=int,
+ default=-1,
+ help='Defines the TTL for cleaning up finished TFJobs.')
+ parser.add_argument('--psSpec', type=yamlOrJsonStr,
+ default={},
+ help='TFJob ps replicaSpecs.')
+ parser.add_argument('--workerSpec', type=yamlOrJsonStr,
+ default={},
+ help='TFJob worker replicaSpecs.')
+ parser.add_argument('--chiefSpec', type=yamlOrJsonStr,
+ default={},
+ help='TFJob chief replicaSpecs.')
+ parser.add_argument('--evaluatorSpec', type=yamlOrJsonStr,
+ default={},
+ help='TFJob evaluator replicaSpecs.')
+ parser.add_argument('--deleteAfterDone', type=strtobool,
+ default=True,
+ help='When tfjob done, delete the tfjob automatically if it is True.')
+ parser.add_argument('--tfjobTimeoutMinutes', type=int,
+ default=60*24,
+ help='Time in minutes to wait for the TFJob to reach end')
+
+ args = parser.parse_args()
+
+ logging.getLogger().setLevel(logging.INFO)
+
+ logging.info('Generating tfjob template.')
+
+ config.load_incluster_config()
+ api_client = k8s_client.ApiClient()
+ tfjob = TFJob(version=args.version, client=api_client)
+ inst = {
+ "apiVersion": "%s/%s" % (TFJobGroup, args.version),
+ "kind": "TFJob",
+ "metadata": {
+ "name": args.name,
+ "namespace": args.namespace,
+ },
+ "spec": {
+ "cleanPodPolicy": args.cleanPodPolicy,
+ "tfReplicaSpecs": {
+ },
+ },
+ }
+ if args.ttlSecondsAfterFinished >=0:
+ inst["spec"]["ttlSecondsAfterFinished"] = args.ttlSecondsAfterFinished
+ if args.backoffLimit >= 0:
+ inst["spec"]["backoffLimit"] = args.backoffLimit
+ if args.activeDeadlineSeconds >=0:
+ inst["spec"]["activeDeadlineSecond"] = args.activeDeadlineSeconds
+ if args.psSpec:
+ inst["spec"]["tfReplicaSpecs"]["PS"] = args.psSpec
+ if args.chiefSpec:
+ inst["spec"]["tfReplicaSpecs"]["Chief"] = args.chiefSpec
+ if args.workerSpec:
+ inst["spec"]["tfReplicaSpecs"]["Worker"] = args.workerSpec
+ if args.evaluatorSpec:
+ inst["spec"]["tfReplicaSpecs"]["Evaluator"] = args.evaluatorSpec
+
+ create_response = tfjob.create(inst)
+
+ expected_conditions = ["Succeeded", "Failed"]
+ tfjob.wait_for_condition(
+ args.namespace, args.name, expected_conditions,
+ timeout=datetime.timedelta(minutes=args.tfjobTimeoutMinutes))
+ if args.deleteAfterDone:
+ tfjob.delete(args.name, args.namespace)
+
+if __name__== "__main__":
+ main()
diff --git a/components/kubeflow/launcher/src/train.template.yaml b/components/kubeflow/launcher/src/train.template.yaml
deleted file mode 100644
index 011c5bd74fa..00000000000
--- a/components/kubeflow/launcher/src/train.template.yaml
+++ /dev/null
@@ -1,90 +0,0 @@
-# Copyright 2018 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: kubeflow.org/v1alpha2
-kind: TFJob
-metadata:
- generateName: tfjob
- namespace: default
-spec:
- tfReplicaSpecs:
- PS:
- replicas: 1
- restartPolicy: OnFailure
- template:
- spec:
- containers:
- - name: tensorflow
- image: gcr.io/ml-pipeline/ml-pipeline-kubeflow-tf-trainer:9ad7d7dd9776ce75a83712f5723db2ef93ba5c26
- command:
- - python
- - -m
- - trainer.task
- env:
- - name: GOOGLE_APPLICATION_CREDENTIALS
- value: "/etc/secrets/user-gcp-sa.json"
- volumeMounts:
- - name: sa
- mountPath: "/etc/secrets"
- readOnly: true
- volumes:
- - name: sa
- secret:
- secretName: user-gcp-sa
- Worker:
- replicas: 1
- restartPolicy: OnFailure
- template:
- spec:
- containers:
- - name: tensorflow
- image: gcr.io/ml-pipeline/ml-pipeline-kubeflow-tf-trainer:9ad7d7dd9776ce75a83712f5723db2ef93ba5c26
- command:
- - python
- - -m
- - trainer.task
- env:
- - name: GOOGLE_APPLICATION_CREDENTIALS
- value: "/etc/secrets/user-gcp-sa.json"
- volumeMounts:
- - name: sa
- mountPath: "/etc/secrets"
- readOnly: true
- volumes:
- - name: sa
- secret:
- secretName: user-gcp-sa
- MASTER:
- replicas: 1
- restartPolicy: OnFailure
- template:
- spec:
- containers:
- - name: tensorflow
- image: gcr.io/ml-pipeline/ml-pipeline-kubeflow-tf-trainer:9ad7d7dd9776ce75a83712f5723db2ef93ba5c26
- command:
- - python
- - -m
- - trainer.task
- env:
- - name: GOOGLE_APPLICATION_CREDENTIALS
- value: "/etc/secrets/user-gcp-sa.json"
- volumeMounts:
- - name: sa
- mountPath: "/etc/secrets"
- readOnly: true
- volumes:
- - name: sa
- secret:
- secretName: user-gcp-sa
diff --git a/components/kubeflow/launcher/test/__init__.py b/components/kubeflow/launcher/test/__init__.py
deleted file mode 100644
index 2e94f3e551a..00000000000
--- a/components/kubeflow/launcher/test/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# Copyright 2018 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
diff --git a/components/kubeflow/launcher/test/test_launcher.py b/components/kubeflow/launcher/test/test_launcher.py
deleted file mode 100644
index 4b1f0ab5029..00000000000
--- a/components/kubeflow/launcher/test/test_launcher.py
+++ /dev/null
@@ -1,60 +0,0 @@
-# Copyright 2018 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-import launcher
-from launcher import train
-import os
-import shutil
-import subprocess
-import sys
-import tempfile
-import unittest
-import yaml
-
-
-class TestLauncher(unittest.TestCase):
-
- def test_yaml_generation_basic(self):
- """Test generating train yaml from templates"""
-
- test_data_dir = os.path.join(os.path.dirname(__file__), 'testdata')
- train_template_file = os.path.join(test_data_dir, 'train.template.yaml')
- tfjob_ns = 'default'
- worker = 2
- pss = 1
- args_list = []
- args_list.append('--learning-rate=0.1')
- generated_yaml = train._generate_train_yaml(train_template_file, tfjob_ns, worker, pss, args_list)
- with open(os.path.join(test_data_dir, 'train_basic.yaml'), 'r') as f:
- golden = yaml.safe_load(f)
- self.assertEqual(golden, generated_yaml)
-
- def test_yaml_generation_advanced(self):
- """Test generating train yaml with zero worker and specified tfjob namespace"""
-
- test_data_dir = os.path.join(os.path.dirname(__file__), 'testdata')
- train_template_file = os.path.join(test_data_dir, 'train.template.yaml')
- worker = 0
- pss = 0
- args_list = []
- tfjob_ns = 'kubeflow'
- args_list.append('--learning-rate=0.1')
- generated_yaml = train._generate_train_yaml(train_template_file, tfjob_ns, worker, pss, args_list)
- with open(os.path.join(test_data_dir, 'train_zero_worker.yaml'), 'r') as f:
- golden = yaml.safe_load(f)
- self.assertEqual(golden, generated_yaml)
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/components/kubeflow/launcher/test/testdata/train.template.yaml b/components/kubeflow/launcher/test/testdata/train.template.yaml
deleted file mode 100644
index aeaa0b456e5..00000000000
--- a/components/kubeflow/launcher/test/testdata/train.template.yaml
+++ /dev/null
@@ -1,56 +0,0 @@
-# Copyright 2018 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-apiVersion: kubeflow.org/v1alpha2
-kind: TFJob
-metadata:
- generateName: tfjob
- namespace: default
-spec:
- tfReplicaSpecs:
- PS:
- replicas: 1
- restartPolicy: OnFailure
- template:
- spec:
- containers:
- - name: tensorflow
- image: gcr.io/ml-pipeline/ml-pipeline-kubeflow-tf-trainer
- command:
- - python
- - -m
- - trainer.task
- Worker:
- replicas: 1
- restartPolicy: OnFailure
- template:
- spec:
- containers:
- - name: tensorflow
- image: gcr.io/ml-pipeline/ml-pipeline-kubeflow-tf-trainer
- command:
- - python
- - -m
- - trainer.task
- MASTER:
- replicas: 1
- restartPolicy: OnFailure
- template:
- spec:
- containers:
- - name: tensorflow
- image: gcr.io/ml-pipeline/ml-pipeline-kubeflow-tf-trainer
- command:
- - python
- - -m
- - trainer.task
diff --git a/components/kubeflow/launcher/test/testdata/train_basic.yaml b/components/kubeflow/launcher/test/testdata/train_basic.yaml
deleted file mode 100644
index 3e2a5aa6ff3..00000000000
--- a/components/kubeflow/launcher/test/testdata/train_basic.yaml
+++ /dev/null
@@ -1,59 +0,0 @@
-# Copyright 2018 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-apiVersion: kubeflow.org/v1alpha2
-kind: TFJob
-metadata:
- generateName: trainer-
- namespace: default
-spec:
- tfReplicaSpecs:
- PS:
- replicas: 1
- restartPolicy: OnFailure
- template:
- spec:
- containers:
- - name: tensorflow
- image: gcr.io/ml-pipeline/ml-pipeline-kubeflow-tf-trainer
- command:
- - python
- - -m
- - trainer.task
- - --learning-rate=0.1
- Worker:
- replicas: 2
- restartPolicy: OnFailure
- template:
- spec:
- containers:
- - name: tensorflow
- image: gcr.io/ml-pipeline/ml-pipeline-kubeflow-tf-trainer
- command:
- - python
- - -m
- - trainer.task
- - --learning-rate=0.1
- MASTER:
- replicas: 1
- restartPolicy: OnFailure
- template:
- spec:
- containers:
- - name: tensorflow
- image: gcr.io/ml-pipeline/ml-pipeline-kubeflow-tf-trainer
- command:
- - python
- - -m
- - trainer.task
- - --learning-rate=0.1
diff --git a/components/kubeflow/launcher/test/testdata/train_zero_worker.yaml b/components/kubeflow/launcher/test/testdata/train_zero_worker.yaml
deleted file mode 100644
index 593a0a53ea1..00000000000
--- a/components/kubeflow/launcher/test/testdata/train_zero_worker.yaml
+++ /dev/null
@@ -1,59 +0,0 @@
-# Copyright 2018 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-apiVersion: kubeflow.org/v1alpha2
-kind: TFJob
-metadata:
- generateName: trainer-
- namespace: kubeflow
-spec:
- tfReplicaSpecs:
- PS:
- replicas: 1
- restartPolicy: OnFailure
- template:
- spec:
- containers:
- - name: tensorflow
- image: gcr.io/ml-pipeline/ml-pipeline-kubeflow-tf-trainer
- command:
- - python
- - -m
- - trainer.task
- - --learning-rate=0.1
- Worker:
- replicas: 1
- restartPolicy: OnFailure
- template:
- spec:
- containers:
- - name: tensorflow
- image: gcr.io/ml-pipeline/ml-pipeline-kubeflow-tf-trainer
- command:
- - python
- - -m
- - trainer.task
- - --learning-rate=0.1
- MASTER:
- replicas: 1
- restartPolicy: OnFailure
- template:
- spec:
- containers:
- - name: tensorflow
- image: gcr.io/ml-pipeline/ml-pipeline-kubeflow-tf-trainer
- command:
- - python
- - -m
- - trainer.task
- - --learning-rate=0.1
diff --git a/components/local/confusion_matrix/component.yaml b/components/local/confusion_matrix/component.yaml
index 7bcf67435ce..3d15389db30 100644
--- a/components/local/confusion_matrix/component.yaml
+++ b/components/local/confusion_matrix/component.yaml
@@ -9,7 +9,7 @@ outputs:
- {name: MLPipeline Metrics, type: Metrics}
implementation:
container:
- image: gcr.io/ml-pipeline/ml-pipeline-local-confusion-matrix:9ad7d7dd9776ce75a83712f5723db2ef93ba5c26
+ image: gcr.io/ml-pipeline/ml-pipeline-local-confusion-matrix:bae654dc5cf407359ac5f822d03833768739c4c1
command: [python2, /ml/confusion_matrix.py]
args: [
--predictions, {inputValue: Predictions},
diff --git a/components/local/roc/component.yaml b/components/local/roc/component.yaml
index f259e7e0316..b514cf25d69 100644
--- a/components/local/roc/component.yaml
+++ b/components/local/roc/component.yaml
@@ -11,7 +11,7 @@ outputs:
- {name: MLPipeline Metrics, type: Metrics}
implementation:
container:
- image: gcr.io/ml-pipeline/ml-pipeline-local-confusion-matrix:9ad7d7dd9776ce75a83712f5723db2ef93ba5c26
+ image: gcr.io/ml-pipeline/ml-pipeline-local-confusion-matrix:bae654dc5cf407359ac5f822d03833768739c4c1
command: [python2, /ml/roc.py]
args: [
--predictions, {inputValue: Predictions dir},
diff --git a/components/release.sh b/components/release.sh
index 88d768689fb..58e8b9f5fdf 100755
--- a/components/release.sh
+++ b/components/release.sh
@@ -27,7 +27,7 @@ images=(
"ml-pipeline-kubeflow-deployer"
"ml-pipeline-kubeflow-tf-trainer"
"ml-pipeline-kubeflow-tf-trainer-gpu"
- "ml-pipeline-kubeflow-tf"
+ "ml-pipeline-kubeflow-tfjob"
"ml-pipeline-dataproc-analyze"
"ml-pipeline-dataproc-create-cluster"
"ml-pipeline-dataproc-delete-cluster"
diff --git a/components/tfx/Evaluator/component.py b/components/tfx/Evaluator/component.py
new file mode 100644
index 00000000000..a21deb8f5d6
--- /dev/null
+++ b/components/tfx/Evaluator/component.py
@@ -0,0 +1,130 @@
+# flake8: noqa TODO
+
+from kfp.components import InputPath, OutputPath
+
+
+def Evaluator(
+ examples_path: InputPath('Examples'),
+ model_exports_path: InputPath('Model'),
+ #model_path: InputPath('Model'),
+
+ output_path: OutputPath('ModelEval'),
+
+ feature_slicing_spec: 'JsonObject: evaluator_pb2.FeatureSlicingSpec' = None,
+):
+ """
+ A TFX component to evaluate models trained by a TFX Trainer component.
+
+ The Evaluator component performs model evaluations in the TFX pipeline and
+ the resultant metrics can be viewed in a Jupyter notebook. It uses the
+ input examples generated from the
+ [ExampleGen](https://www.tensorflow.org/tfx/guide/examplegen)
+ component to evaluate the models.
+
+ Specifically, it can provide:
+ - metrics computed on entire training and eval dataset
+ - tracking metrics over time
+ - model quality performance on different feature slices
+
+ ## Exporting the EvalSavedModel in Trainer
+
+ In order to setup Evaluator in a TFX pipeline, an EvalSavedModel needs to be
+ exported during training, which is a special SavedModel containing
+ annotations for the metrics, features, labels, and so on in your model.
+ Evaluator uses this EvalSavedModel to compute metrics.
+
+ As part of this, the Trainer component creates eval_input_receiver_fn,
+ analogous to the serving_input_receiver_fn, which will extract the features
+ and labels from the input data. As with serving_input_receiver_fn, there are
+ utility functions to help with this.
+
+ Please see https://www.tensorflow.org/tfx/model_analysis for more details.
+
+ Args:
+ examples: A Channel of 'ExamplesPath' type, usually produced by ExampleGen
+ component. @Ark-kun: Must have the eval split. _required_
+ model_exports: A Channel of 'ModelExportPath' type, usually produced by
+ Trainer component. Will be deprecated in the future for the `model`
+ parameter.
+ #model: Future replacement of the `model_exports` argument.
+ feature_slicing_spec:
+ [evaluator_pb2.FeatureSlicingSpec](https://github.com/tensorflow/tfx/blob/master/tfx/proto/evaluator.proto)
+ instance that describes how Evaluator should slice the data.
+ Returns:
+ output: Channel of `ModelEvalPath` to store the evaluation results.
+
+ Either `model_exports` or `model` must be present in the input arguments.
+
+ """
+ from tfx.components.evaluator.component import Evaluator
+ component_class = Evaluator
+ input_channels_with_splits = {'examples'}
+ output_channels_with_splits = {}
+
+
+ import json
+ import os
+ from google.protobuf import json_format, message
+ from tfx.types import Artifact, channel_utils
+
+ arguments = locals().copy()
+
+ component_class_args = {}
+
+ for name, execution_parameter in component_class.SPEC_CLASS.PARAMETERS.items():
+ argument_value_obj = argument_value = arguments.get(name, None)
+ if argument_value is None:
+ continue
+ parameter_type = execution_parameter.type
+ if isinstance(parameter_type, type) and issubclass(parameter_type, message.Message): # execution_parameter.type can also be a tuple
+ argument_value_obj = parameter_type()
+ json_format.Parse(argument_value, argument_value_obj)
+ component_class_args[name] = argument_value_obj
+
+ for name, channel_parameter in component_class.SPEC_CLASS.INPUTS.items():
+ artifact_path = arguments[name + '_path']
+ artifacts = []
+ if name in input_channels_with_splits:
+ # Recovering splits
+ splits = sorted(os.listdir(artifact_path))
+ for split in splits:
+ artifact = Artifact(type_name=channel_parameter.type_name)
+ artifact.split = split
+ artifact.uri = os.path.join(artifact_path, split) + '/'
+ artifacts.append(artifact)
+ else:
+ artifact = Artifact(type_name=channel_parameter.type_name)
+ artifact.uri = artifact_path + '/' # ?
+ artifacts.append(artifact)
+ component_class_args[name] = channel_utils.as_channel(artifacts)
+
+ component_class_instance = component_class(**component_class_args)
+
+ input_dict = {name: channel.get() for name, channel in component_class_instance.inputs.get_all().items()}
+ output_dict = {name: channel.get() for name, channel in component_class_instance.outputs.get_all().items()}
+ exec_properties = component_class_instance.exec_properties
+
+ # Generating paths for output artifacts
+ for name, artifacts in output_dict.items():
+ base_artifact_path = arguments[name + '_path']
+ for artifact in artifacts:
+ artifact.uri = os.path.join(base_artifact_path, artifact.split) # Default split is ''
+
+ print('component instance: ' + str(component_class_instance))
+
+ #executor = component_class.EXECUTOR_SPEC.executor_class() # Same
+ executor = component_class_instance.executor_spec.executor_class()
+ executor.Do(
+ input_dict=input_dict,
+ output_dict=output_dict,
+ exec_properties=exec_properties,
+ )
+
+
+if __name__ == '__main__':
+ import kfp
+ kfp.components.func_to_container_op(
+ Evaluator,
+ base_image='tensorflow/tfx:0.15.0',
+ output_component_file='component.yaml'
+ )
diff --git a/components/tfx/Evaluator/component.yaml b/components/tfx/Evaluator/component.yaml
new file mode 100644
index 00000000000..7fa0ad842fe
--- /dev/null
+++ b/components/tfx/Evaluator/component.yaml
@@ -0,0 +1,232 @@
+name: Evaluator
+description: |
+ A TFX component to evaluate models trained by a TFX Trainer component.
+
+ The Evaluator component performs model evaluations in the TFX pipeline and
+ the resultant metrics can be viewed in a Jupyter notebook. It uses the
+ input examples generated from the
+ [ExampleGen](https://www.tensorflow.org/tfx/guide/examplegen)
+ component to evaluate the models.
+
+ Specifically, it can provide:
+ - metrics computed on entire training and eval dataset
+ - tracking metrics over time
+ - model quality performance on different feature slices
+
+ ## Exporting the EvalSavedModel in Trainer
+
+ In order to setup Evaluator in a TFX pipeline, an EvalSavedModel needs to be
+ exported during training, which is a special SavedModel containing
+ annotations for the metrics, features, labels, and so on in your model.
+ Evaluator uses this EvalSavedModel to compute metrics.
+
+ As part of this, the Trainer component creates eval_input_receiver_fn,
+ analogous to the serving_input_receiver_fn, which will extract the features
+ and labels from the input data. As with serving_input_receiver_fn, there are
+ utility functions to help with this.
+
+ Please see https://www.tensorflow.org/tfx/model_analysis for more details.
+
+ Args:
+ examples: A Channel of 'ExamplesPath' type, usually produced by ExampleGen
+ component. @Ark-kun: Must have the eval split. _required_
+ model_exports: A Channel of 'ModelExportPath' type, usually produced by
+ Trainer component. Will be deprecated in the future for the `model`
+ parameter.
+ #model: Future replacement of the `model_exports` argument.
+ feature_slicing_spec:
+ [evaluator_pb2.FeatureSlicingSpec](https://github.com/tensorflow/tfx/blob/master/tfx/proto/evaluator.proto)
+ instance that describes how Evaluator should slice the data.
+ Returns:
+ output: Channel of `ModelEvalPath` to store the evaluation results.
+
+ Either `model_exports` or `model` must be present in the input arguments.
+inputs:
+- name: examples
+ type: Examples
+- name: model_exports
+ type: Model
+- name: feature_slicing_spec
+ type: 'JsonObject: evaluator_pb2.FeatureSlicingSpec'
+ optional: true
+outputs:
+- name: output
+ type: ModelEval
+implementation:
+ container:
+ image: tensorflow/tfx:0.15.0
+ command:
+ - python3
+ - -u
+ - -c
+ - |
+ class OutputPath:
+ '''When creating component from function, OutputPath should be used as function parameter annotation to tell the system that the function wants to output data by writing it into a file with the given path instead of returning the data from the function.'''
+ def __init__(self, type=None):
+ self.type = type
+
+ class InputPath:
+ '''When creating component from function, InputPath should be used as function parameter annotation to tell the system to pass the *data file path* to the function instead of passing the actual data.'''
+ def __init__(self, type=None):
+ self.type = type
+
+ def _make_parent_dirs_and_return_path(file_path: str):
+ import os
+ os.makedirs(os.path.dirname(file_path), exist_ok=True)
+ return file_path
+
+ def Evaluator(
+ examples_path: InputPath('Examples'),
+ model_exports_path: InputPath('Model'),
+ #model_path: InputPath('Model'),
+
+ output_path: OutputPath('ModelEval'),
+
+ feature_slicing_spec: 'JsonObject: evaluator_pb2.FeatureSlicingSpec' = None,
+ ):
+ """
+ A TFX component to evaluate models trained by a TFX Trainer component.
+
+ The Evaluator component performs model evaluations in the TFX pipeline and
+ the resultant metrics can be viewed in a Jupyter notebook. It uses the
+ input examples generated from the
+ [ExampleGen](https://www.tensorflow.org/tfx/guide/examplegen)
+ component to evaluate the models.
+
+ Specifically, it can provide:
+ - metrics computed on entire training and eval dataset
+ - tracking metrics over time
+ - model quality performance on different feature slices
+
+ ## Exporting the EvalSavedModel in Trainer
+
+ In order to setup Evaluator in a TFX pipeline, an EvalSavedModel needs to be
+ exported during training, which is a special SavedModel containing
+ annotations for the metrics, features, labels, and so on in your model.
+ Evaluator uses this EvalSavedModel to compute metrics.
+
+ As part of this, the Trainer component creates eval_input_receiver_fn,
+ analogous to the serving_input_receiver_fn, which will extract the features
+ and labels from the input data. As with serving_input_receiver_fn, there are
+ utility functions to help with this.
+
+ Please see https://www.tensorflow.org/tfx/model_analysis for more details.
+
+ Args:
+ examples: A Channel of 'ExamplesPath' type, usually produced by ExampleGen
+ component. @Ark-kun: Must have the eval split. _required_
+ model_exports: A Channel of 'ModelExportPath' type, usually produced by
+ Trainer component. Will be deprecated in the future for the `model`
+ parameter.
+ #model: Future replacement of the `model_exports` argument.
+ feature_slicing_spec:
+ [evaluator_pb2.FeatureSlicingSpec](https://github.com/tensorflow/tfx/blob/master/tfx/proto/evaluator.proto)
+ instance that describes how Evaluator should slice the data.
+ Returns:
+ output: Channel of `ModelEvalPath` to store the evaluation results.
+
+ Either `model_exports` or `model` must be present in the input arguments.
+
+ """
+ from tfx.components.evaluator.component import Evaluator
+ component_class = Evaluator
+ input_channels_with_splits = {'examples'}
+ output_channels_with_splits = {}
+
+ import json
+ import os
+ from google.protobuf import json_format, message
+ from tfx.types import Artifact, channel_utils
+
+ arguments = locals().copy()
+
+ component_class_args = {}
+
+ for name, execution_parameter in component_class.SPEC_CLASS.PARAMETERS.items():
+ argument_value_obj = argument_value = arguments.get(name, None)
+ if argument_value is None:
+ continue
+ parameter_type = execution_parameter.type
+ if isinstance(parameter_type, type) and issubclass(parameter_type, message.Message): # execution_parameter.type can also be a tuple
+ argument_value_obj = parameter_type()
+ json_format.Parse(argument_value, argument_value_obj)
+ component_class_args[name] = argument_value_obj
+
+ for name, channel_parameter in component_class.SPEC_CLASS.INPUTS.items():
+ artifact_path = arguments[name + '_path']
+ artifacts = []
+ if name in input_channels_with_splits:
+ # Recovering splits
+ splits = sorted(os.listdir(artifact_path))
+ for split in splits:
+ artifact = Artifact(type_name=channel_parameter.type_name)
+ artifact.split = split
+ artifact.uri = os.path.join(artifact_path, split) + '/'
+ artifacts.append(artifact)
+ else:
+ artifact = Artifact(type_name=channel_parameter.type_name)
+ artifact.uri = artifact_path + '/' # ?
+ artifacts.append(artifact)
+ component_class_args[name] = channel_utils.as_channel(artifacts)
+
+ component_class_instance = component_class(**component_class_args)
+
+ input_dict = {name: channel.get() for name, channel in component_class_instance.inputs.get_all().items()}
+ output_dict = {name: channel.get() for name, channel in component_class_instance.outputs.get_all().items()}
+ exec_properties = component_class_instance.exec_properties
+
+ # Generating paths for output artifacts
+ for name, artifacts in output_dict.items():
+ base_artifact_path = arguments[name + '_path']
+ for artifact in artifacts:
+ artifact.uri = os.path.join(base_artifact_path, artifact.split) # Default split is ''
+
+ print('component instance: ' + str(component_class_instance))
+
+ #executor = component_class.EXECUTOR_SPEC.executor_class() # Same
+ executor = component_class_instance.executor_spec.executor_class()
+ executor.Do(
+ input_dict=input_dict,
+ output_dict=output_dict,
+ exec_properties=exec_properties,
+ )
+
+ import argparse
+ _parser = argparse.ArgumentParser(prog='Evaluator', description="A TFX component to evaluate models trained by a TFX Trainer component.\n\n The Evaluator component performs model evaluations in the TFX pipeline and\n the resultant metrics can be viewed in a Jupyter notebook. It uses the\n input examples generated from the\n [ExampleGen](https://www.tensorflow.org/tfx/guide/examplegen)\n component to evaluate the models.\n\n Specifically, it can provide:\n - metrics computed on entire training and eval dataset\n - tracking metrics over time\n - model quality performance on different feature slices\n\n ## Exporting the EvalSavedModel in Trainer\n\n In order to setup Evaluator in a TFX pipeline, an EvalSavedModel needs to be\n exported during training, which is a special SavedModel containing\n annotations for the metrics, features, labels, and so on in your model.\n Evaluator uses this EvalSavedModel to compute metrics.\n\n As part of this, the Trainer component creates eval_input_receiver_fn,\n analogous to the serving_input_receiver_fn, which will extract the features\n and labels from the input data. As with serving_input_receiver_fn, there are\n utility functions to help with this.\n\n Please see https://www.tensorflow.org/tfx/model_analysis for more details.\n\n Args:\n examples: A Channel of 'ExamplesPath' type, usually produced by ExampleGen\n component. @Ark-kun: Must have the eval split. _required_\n model_exports: A Channel of 'ModelExportPath' type, usually produced by\n Trainer component. Will be deprecated in the future for the `model`\n parameter.\n #model: Future replacement of the `model_exports` argument.\n feature_slicing_spec:\n [evaluator_pb2.FeatureSlicingSpec](https://github.com/tensorflow/tfx/blob/master/tfx/proto/evaluator.proto)\n instance that describes how Evaluator should slice the data.\n Returns:\n output: Channel of `ModelEvalPath` to store the evaluation results.\n\n Either `model_exports` or `model` must be present in the input arguments.\n")
+ _parser.add_argument("--examples", dest="examples_path", type=str, required=True, default=argparse.SUPPRESS)
+ _parser.add_argument("--model-exports", dest="model_exports_path", type=str, required=True, default=argparse.SUPPRESS)
+ _parser.add_argument("--feature-slicing-spec", dest="feature_slicing_spec", type=str, required=False, default=argparse.SUPPRESS)
+ _parser.add_argument("--output", dest="output_path", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS)
+ _parsed_args = vars(_parser.parse_args())
+ _output_files = _parsed_args.pop("_output_paths", [])
+
+ _outputs = Evaluator(**_parsed_args)
+
+ if not hasattr(_outputs, '__getitem__') or isinstance(_outputs, str):
+ _outputs = [_outputs]
+
+ _output_serializers = [
+
+ ]
+
+ import os
+ for idx, output_file in enumerate(_output_files):
+ try:
+ os.makedirs(os.path.dirname(output_file))
+ except OSError:
+ pass
+ with open(output_file, 'w') as f:
+ f.write(_output_serializers[idx](_outputs[idx]))
+ args:
+ - --examples
+ - inputPath: examples
+ - --model-exports
+ - inputPath: model_exports
+ - if:
+ cond:
+ isPresent: feature_slicing_spec
+ then:
+ - --feature-slicing-spec
+ - inputValue: feature_slicing_spec
+ - --output
+ - outputPath: output
diff --git a/components/tfx/ExampleGen/BigQueryExampleGen/component.py b/components/tfx/ExampleGen/BigQueryExampleGen/component.py
new file mode 100644
index 00000000000..a7473e0bf81
--- /dev/null
+++ b/components/tfx/ExampleGen/BigQueryExampleGen/component.py
@@ -0,0 +1,107 @@
+# flake8: noqa TODO
+
+from kfp.components import InputPath, OutputPath
+
+
+def BigQueryExampleGen(
+ example_artifacts_path: OutputPath('Examples'),
+
+ query: str = None,
+ input_config: 'JsonObject: example_gen_pb2.Input' = None,
+ output_config: 'JsonObject: example_gen_pb2.Output' = None,
+):
+ """
+ Official TFX BigQueryExampleGen component.
+
+ The BigQuery examplegen component takes a query, and generates train
+ and eval examples for downsteam components.
+
+
+ Args:
+ query: BigQuery sql string, query result will be treated as a single
+ split, can be overwritten by input_config.
+ input_config: An example_gen_pb2.Input instance with Split.pattern as
+ BigQuery sql string. If set, it overwrites the 'query' arg, and allows
+ different queries per split.
+ output_config: An example_gen_pb2.Output instance, providing output
+ configuration. If unset, default splits will be 'train' and 'eval' with
+ size 2:1.
+ Returns:
+ example_artifacts: Optional channel of 'ExamplesPath' for output train and
+ eval examples.
+
+ Raises:
+ RuntimeError: Only one of query and input_config should be set.
+ """
+ from tfx.components.example_gen.csv_example_gen.component import BigQueryExampleGen
+ component_class = BigQueryExampleGen
+ input_channels_with_splits = {}
+ output_channels_with_splits = {'example_artifacts'}
+
+
+ import json
+ import os
+ from google.protobuf import json_format, message
+ from tfx.types import Artifact, channel_utils
+
+ arguments = locals().copy()
+
+ component_class_args = {}
+
+ for name, execution_parameter in component_class.SPEC_CLASS.PARAMETERS.items():
+ argument_value_obj = argument_value = arguments.get(name, None)
+ if argument_value is None:
+ continue
+ parameter_type = execution_parameter.type
+ if isinstance(parameter_type, type) and issubclass(parameter_type, message.Message): # execution_parameter.type can also be a tuple
+ argument_value_obj = parameter_type()
+ json_format.Parse(argument_value, argument_value_obj)
+ component_class_args[name] = argument_value_obj
+
+ for name, channel_parameter in component_class.SPEC_CLASS.INPUTS.items():
+ artifact_path = arguments[name + '_path']
+ artifacts = []
+ if name in input_channels_with_splits:
+ # Recovering splits
+ splits = sorted(os.listdir(artifact_path))
+ for split in splits:
+ artifact = Artifact(type_name=channel_parameter.type_name)
+ artifact.split = split
+ artifact.uri = os.path.join(artifact_path, split) + '/'
+ artifacts.append(artifact)
+ else:
+ artifact = Artifact(type_name=channel_parameter.type_name)
+ artifact.uri = artifact_path + '/' # ?
+ artifacts.append(artifact)
+ component_class_args[name] = channel_utils.as_channel(artifacts)
+
+ component_class_instance = component_class(**component_class_args)
+
+ input_dict = {name: channel.get() for name, channel in component_class_instance.inputs.get_all().items()}
+ output_dict = {name: channel.get() for name, channel in component_class_instance.outputs.get_all().items()}
+ exec_properties = component_class_instance.exec_properties
+
+ # Generating paths for output artifacts
+ for name, artifacts in output_dict.items():
+ base_artifact_path = arguments[name + '_path']
+ for artifact in artifacts:
+ artifact.uri = os.path.join(base_artifact_path, artifact.split) # Default split is ''
+
+ print('component instance: ' + str(component_class_instance))
+
+ #executor = component_class.EXECUTOR_SPEC.executor_class() # Same
+ executor = component_class_instance.executor_spec.executor_class()
+ executor.Do(
+ input_dict=input_dict,
+ output_dict=output_dict,
+ exec_properties=exec_properties,
+ )
+
+
+if __name__ == '__main__':
+ import kfp
+ kfp.components.func_to_container_op(
+ BigQueryExampleGen,
+ base_image='tensorflow/tfx:0.15.0',
+ output_component_file='component.yaml'
+ )
diff --git a/components/tfx/ExampleGen/BigQueryExampleGen/component.yaml b/components/tfx/ExampleGen/BigQueryExampleGen/component.yaml
new file mode 100644
index 00000000000..f0c8e76b3a6
--- /dev/null
+++ b/components/tfx/ExampleGen/BigQueryExampleGen/component.yaml
@@ -0,0 +1,193 @@
+name: Bigqueryexamplegen
+description: |
+ Official TFX BigQueryExampleGen component.
+
+ The BigQuery examplegen component takes a query, and generates train
+ and eval examples for downsteam components.
+
+
+ Args:
+ query: BigQuery sql string, query result will be treated as a single
+ split, can be overwritten by input_config.
+ input_config: An example_gen_pb2.Input instance with Split.pattern as
+ BigQuery sql string. If set, it overwrites the 'query' arg, and allows
+ different queries per split.
+ output_config: An example_gen_pb2.Output instance, providing output
+ configuration. If unset, default splits will be 'train' and 'eval' with
+ size 2:1.
+ Returns:
+ example_artifacts: Optional channel of 'ExamplesPath' for output train and
+ eval examples.
+
+ Raises:
+ RuntimeError: Only one of query and input_config should be set.
+inputs:
+- name: query
+ type: String
+ optional: true
+- name: input_config
+ type: 'JsonObject: example_gen_pb2.Input'
+ optional: true
+- name: output_config
+ type: 'JsonObject: example_gen_pb2.Output'
+ optional: true
+outputs:
+- name: example_artifacts
+ type: Examples
+implementation:
+ container:
+ image: tensorflow/tfx:0.15.0
+ command:
+ - python3
+ - -u
+ - -c
+ - |
+ def _make_parent_dirs_and_return_path(file_path: str):
+ import os
+ os.makedirs(os.path.dirname(file_path), exist_ok=True)
+ return file_path
+
+ class OutputPath:
+ '''When creating component from function, OutputPath should be used as function parameter annotation to tell the system that the function wants to output data by writing it into a file with the given path instead of returning the data from the function.'''
+ def __init__(self, type=None):
+ self.type = type
+
+ def BigQueryExampleGen(
+ example_artifacts_path: OutputPath('Examples'),
+
+ query: str = None,
+ input_config: 'JsonObject: example_gen_pb2.Input' = None,
+ output_config: 'JsonObject: example_gen_pb2.Output' = None,
+ ):
+ """
+ Official TFX BigQueryExampleGen component.
+
+ The BigQuery examplegen component takes a query, and generates train
+ and eval examples for downsteam components.
+
+ Args:
+ query: BigQuery sql string, query result will be treated as a single
+ split, can be overwritten by input_config.
+ input_config: An example_gen_pb2.Input instance with Split.pattern as
+ BigQuery sql string. If set, it overwrites the 'query' arg, and allows
+ different queries per split.
+ output_config: An example_gen_pb2.Output instance, providing output
+ configuration. If unset, default splits will be 'train' and 'eval' with
+ size 2:1.
+ Returns:
+ example_artifacts: Optional channel of 'ExamplesPath' for output train and
+ eval examples.
+
+ Raises:
+ RuntimeError: Only one of query and input_config should be set.
+ """
+ from tfx.components.example_gen.csv_example_gen.component import BigQueryExampleGen
+ component_class = BigQueryExampleGen
+ input_channels_with_splits = {}
+ output_channels_with_splits = {'example_artifacts'}
+
+ import json
+ import os
+ from google.protobuf import json_format, message
+ from tfx.types import Artifact, channel_utils
+
+ arguments = locals().copy()
+
+ component_class_args = {}
+
+ for name, execution_parameter in component_class.SPEC_CLASS.PARAMETERS.items():
+ argument_value_obj = argument_value = arguments.get(name, None)
+ if argument_value is None:
+ continue
+ parameter_type = execution_parameter.type
+ if isinstance(parameter_type, type) and issubclass(parameter_type, message.Message): # execution_parameter.type can also be a tuple
+ argument_value_obj = parameter_type()
+ json_format.Parse(argument_value, argument_value_obj)
+ component_class_args[name] = argument_value_obj
+
+ for name, channel_parameter in component_class.SPEC_CLASS.INPUTS.items():
+ artifact_path = arguments[name + '_path']
+ artifacts = []
+ if name in input_channels_with_splits:
+ # Recovering splits
+ splits = sorted(os.listdir(artifact_path))
+ for split in splits:
+ artifact = Artifact(type_name=channel_parameter.type_name)
+ artifact.split = split
+ artifact.uri = os.path.join(artifact_path, split) + '/'
+ artifacts.append(artifact)
+ else:
+ artifact = Artifact(type_name=channel_parameter.type_name)
+ artifact.uri = artifact_path + '/' # ?
+ artifacts.append(artifact)
+ component_class_args[name] = channel_utils.as_channel(artifacts)
+
+ component_class_instance = component_class(**component_class_args)
+
+ input_dict = {name: channel.get() for name, channel in component_class_instance.inputs.get_all().items()}
+ output_dict = {name: channel.get() for name, channel in component_class_instance.outputs.get_all().items()}
+ exec_properties = component_class_instance.exec_properties
+
+ # Generating paths for output artifacts
+ for name, artifacts in output_dict.items():
+ base_artifact_path = arguments[name + '_path']
+ for artifact in artifacts:
+ artifact.uri = os.path.join(base_artifact_path, artifact.split) # Default split is ''
+
+ print('component instance: ' + str(component_class_instance))
+
+ #executor = component_class.EXECUTOR_SPEC.executor_class() # Same
+ executor = component_class_instance.executor_spec.executor_class()
+ executor.Do(
+ input_dict=input_dict,
+ output_dict=output_dict,
+ exec_properties=exec_properties,
+ )
+
+ import argparse
+ _parser = argparse.ArgumentParser(prog='Bigqueryexamplegen', description="Official TFX BigQueryExampleGen component.\n\n The BigQuery examplegen component takes a query, and generates train\n and eval examples for downsteam components.\n\n\n Args:\n query: BigQuery sql string, query result will be treated as a single\n split, can be overwritten by input_config.\n input_config: An example_gen_pb2.Input instance with Split.pattern as\n BigQuery sql string. If set, it overwrites the 'query' arg, and allows\n different queries per split.\n output_config: An example_gen_pb2.Output instance, providing output\n configuration. If unset, default splits will be 'train' and 'eval' with\n size 2:1.\n Returns:\n example_artifacts: Optional channel of 'ExamplesPath' for output train and\n eval examples.\n\n Raises:\n RuntimeError: Only one of query and input_config should be set.\n")
+ _parser.add_argument("--query", dest="query", type=str, required=False, default=argparse.SUPPRESS)
+ _parser.add_argument("--input-config", dest="input_config", type=str, required=False, default=argparse.SUPPRESS)
+ _parser.add_argument("--output-config", dest="output_config", type=str, required=False, default=argparse.SUPPRESS)
+ _parser.add_argument("--example-artifacts", dest="example_artifacts_path", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS)
+ _parsed_args = vars(_parser.parse_args())
+ _output_files = _parsed_args.pop("_output_paths", [])
+
+ _outputs = BigQueryExampleGen(**_parsed_args)
+
+ if not hasattr(_outputs, '__getitem__') or isinstance(_outputs, str):
+ _outputs = [_outputs]
+
+ _output_serializers = [
+
+ ]
+
+ import os
+ for idx, output_file in enumerate(_output_files):
+ try:
+ os.makedirs(os.path.dirname(output_file))
+ except OSError:
+ pass
+ with open(output_file, 'w') as f:
+ f.write(_output_serializers[idx](_outputs[idx]))
+ args:
+ - if:
+ cond:
+ isPresent: query
+ then:
+ - --query
+ - inputValue: query
+ - if:
+ cond:
+ isPresent: input_config
+ then:
+ - --input-config
+ - inputValue: input_config
+ - if:
+ cond:
+ isPresent: output_config
+ then:
+ - --output-config
+ - inputValue: output_config
+ - --example-artifacts
+ - outputPath: example_artifacts
diff --git a/components/tfx/ExampleGen/CsvExampleGen/component.py b/components/tfx/ExampleGen/CsvExampleGen/component.py
new file mode 100644
index 00000000000..e956b29a883
--- /dev/null
+++ b/components/tfx/ExampleGen/CsvExampleGen/component.py
@@ -0,0 +1,96 @@
+# flake8: noqa TODO
+
+from kfp.components import InputPath, OutputPath
+
+def CsvExampleGen(
+ # Inputs
+ input_base_path: InputPath('ExternalPath'),
+ #input_base_path: 'ExternalPath', # A Channel of 'ExternalPath' type, which includes one artifact whose uri is an external directory with csv files inside (required).
+
+ # Outputs
+ example_artifacts_path: OutputPath('Examples'),
+ #example_artifacts_path: 'ExamplesPath',
+
+ # Execution properties
+ #input_config_splits: {'List' : {'item_type': 'ExampleGen.Input.Split'}},
+ input_config: 'ExampleGen.Input' = None, # = '{"splits": []}', # JSON-serialized example_gen_pb2.Input instance, providing input configuration. If unset, the files under input_base will be treated as a single split.
+ #output_config_splits: {'List' : {'item_type': 'ExampleGen.SplitConfig'}},
+ output_config: 'ExampleGen.Output' = None, # = '{"splitConfig": {"splits": []}}', # JSON-serialized example_gen_pb2.Output instance, providing output configuration. If unset, default splits will be 'train' and 'eval' with size 2:1.
+ #custom_config: 'ExampleGen.CustomConfig' = None,
+):
+ """Executes the CsvExampleGen component.
+
+ Args:
+ input_base: A Channel of 'ExternalPath' type, which includes one artifact
+ whose uri is an external directory with csv files inside (required).
+ input_config: An example_gen_pb2.Input instance, providing input
+ configuration. If unset, the files under input_base will be treated as a
+ single split.
+ output_config: An example_gen_pb2.Output instance, providing output
+ configuration. If unset, default splits will be 'train' and 'eval' with
+ size 2:1.
+ ??? input: Forwards compatibility alias for the 'input_base' argument.
+ Returns:
+ example_artifacts: Artifact of type 'Examples' for output train and
+ eval examples.
+ """
+
+ import json
+ import os
+ from google.protobuf import json_format
+ from tfx.components.example_gen.csv_example_gen.component import CsvExampleGen
+ from tfx.proto import example_gen_pb2
+ from tfx.types import standard_artifacts
+ from tfx.types import channel_utils
+
+ # Create input dict.
+ input_base = standard_artifacts.ExternalArtifact()
+ input_base.uri = input_base_path
+ input_base_channel = channel_utils.as_channel([input_base])
+
+ input_config_obj = None
+ if input_config:
+ input_config_obj = example_gen_pb2.Input()
+ json_format.Parse(input_config, input_config_obj)
+
+ output_config_obj = None
+ if output_config:
+ output_config_obj = example_gen_pb2.Output()
+ json_format.Parse(output_config, output_config_obj)
+
+ component_class_instance = CsvExampleGen(
+ input=input_base_channel,
+ input_config=input_config_obj,
+ output_config=output_config_obj,
+ )
+
+ # component_class_instance.inputs/outputs are wrappers that do not behave like real dictionaries. The underlying dict can be accessed using .get_all()
+ # Channel artifacts can be accessed by calling .get()
+ input_dict = {name: channel.get() for name, channel in component_class_instance.inputs.get_all().items()}
+ output_dict = {name: channel.get() for name, channel in component_class_instance.outputs.get_all().items()}
+
+ exec_properties = component_class_instance.exec_properties
+
+ # Generating paths for output artifacts
+ for output_artifact in output_dict['examples']:
+ output_artifact.uri = example_artifacts_path
+ if output_artifact.split:
+ output_artifact.uri = os.path.join(output_artifact.uri, output_artifact.split)
+
+ print('component instance: ' + str(component_class_instance))
+
+ executor = CsvExampleGen.EXECUTOR_SPEC.executor_class()
+ executor.Do(
+ input_dict=input_dict,
+ output_dict=output_dict,
+ exec_properties=exec_properties,
+ )
+
+
+if __name__ == '__main__':
+ import kfp
+ kfp.components.func_to_container_op(
+ CsvExampleGen,
+ base_image='tensorflow/tfx:0.15.0',
+ output_component_file='component.yaml'
+ )
diff --git a/components/tfx/ExampleGen/CsvExampleGen/component.yaml b/components/tfx/ExampleGen/CsvExampleGen/component.yaml
new file mode 100644
index 00000000000..b5a3c52a8d2
--- /dev/null
+++ b/components/tfx/ExampleGen/CsvExampleGen/component.yaml
@@ -0,0 +1,172 @@
+name: CsvExampleGen
+inputs:
+- {name: input_base, type: ExternalPath}
+- {name: input_config, optional: true, type: ExampleGen.Input}
+- {name: output_config, optional: true, type: ExampleGen.Output}
+outputs:
+- {name: example_artifacts, type: Examples}
+description: |
+ Executes the CsvExampleGen component.
+
+ Args:
+ input_base: A Channel of 'ExternalPath' type, which includes one artifact
+ whose uri is an external directory with csv files inside (required).
+ input_config: An example_gen_pb2.Input instance, providing input
+ configuration. If unset, the files under input_base will be treated as a
+ single split.
+ output_config: An example_gen_pb2.Output instance, providing output
+ configuration. If unset, default splits will be 'train' and 'eval' with
+ size 2:1.
+ ??? input: Forwards compatibility alias for the 'input_base' argument.
+ Returns:
+ example_artifacts: Artifact of type 'Examples' for output train and
+ eval examples.
+implementation:
+ container:
+ image: tensorflow/tfx:0.15.0
+ command:
+ - python3
+ - -u
+ - -c
+ - |
+ def _make_parent_dirs_and_return_path(file_path: str):
+ import os
+ os.makedirs(os.path.dirname(file_path), exist_ok=True)
+ return file_path
+
+ class OutputPath:
+ '''When creating component from function, OutputPath should be used as function parameter annotation to tell the system that the function wants to output data by writing it into a file with the given path instead of returning the data from the function.'''
+ def __init__(self, type=None):
+ self.type = type
+
+ class InputPath:
+ '''When creating component from function, InputPath should be used as function parameter annotation to tell the system to pass the *data file path* to the function instead of passing the actual data.'''
+ def __init__(self, type=None):
+ self.type = type
+
+ def CsvExampleGen(
+ # Inputs
+ input_base_path: InputPath('ExternalPath'),
+ #input_base_path: 'ExternalPath', # A Channel of 'ExternalPath' type, which includes one artifact whose uri is an external directory with csv files inside (required).
+
+ # Outputs
+ example_artifacts_path: OutputPath('ExamplesPath'),
+ #example_artifacts_path: 'ExamplesPath',
+
+ # Execution properties
+ #input_config_splits: {'List' : {'item_type': 'ExampleGen.Input.Split'}},
+ input_config: 'ExampleGen.Input' = None, # = '{"splits": []}', # JSON-serialized example_gen_pb2.Input instance, providing input configuration. If unset, the files under input_base will be treated as a single split.
+ #output_config_splits: {'List' : {'item_type': 'ExampleGen.SplitConfig'}},
+ output_config: 'ExampleGen.Output' = None, # = '{"splitConfig": {"splits": []}}', # JSON-serialized example_gen_pb2.Output instance, providing output configuration. If unset, default splits will be 'train' and 'eval' with size 2:1.
+ #custom_config: 'ExampleGen.CustomConfig' = None,
+ ):
+ """\
+ Executes the CsvExampleGen component.
+
+ Args:
+ input_base: A Channel of 'ExternalPath' type, which includes one artifact
+ whose uri is an external directory with csv files inside (required).
+ input_config: An example_gen_pb2.Input instance, providing input
+ configuration. If unset, the files under input_base will be treated as a
+ single split.
+ output_config: An example_gen_pb2.Output instance, providing output
+ configuration. If unset, default splits will be 'train' and 'eval' with
+ size 2:1.
+ ??? input: Forwards compatibility alias for the 'input_base' argument.
+ Returns:
+ example_artifacts: Artifact of type 'ExamplesPath' for output train and
+ eval examples.
+ """
+
+ import json
+ import os
+ from google.protobuf import json_format
+ from tfx.components.example_gen.csv_example_gen.component import CsvExampleGen
+ from tfx.proto import example_gen_pb2
+ from tfx.types import standard_artifacts
+ from tfx.types import channel_utils
+
+ # Create input dict.
+ input_base = standard_artifacts.ExternalArtifact()
+ input_base.uri = input_base_path
+ input_base_channel = channel_utils.as_channel([input_base])
+
+ input_config_obj = None
+ if input_config:
+ input_config_obj = example_gen_pb2.Input()
+ json_format.Parse(input_config, input_config_obj)
+
+ output_config_obj = None
+ if output_config:
+ output_config_obj = example_gen_pb2.Output()
+ json_format.Parse(output_config, output_config_obj)
+
+ component_class_instance = CsvExampleGen(
+ input=input_base_channel,
+ input_config=input_config_obj,
+ output_config=output_config_obj,
+ )
+
+ # component_class_instance.inputs/outputs are wrappers that do not behave like real dictionaries. The underlying dict can be accessed using .get_all()
+ # Channel artifacts can be accessed by calling .get()
+ input_dict = {name: channel.get() for name, channel in component_class_instance.inputs.get_all().items()}
+ output_dict = {name: channel.get() for name, channel in component_class_instance.outputs.get_all().items()}
+
+ exec_properties = component_class_instance.exec_properties
+
+ # Generating paths for output artifacts
+ for output_artifact in output_dict['examples']:
+ output_artifact.uri = example_artifacts_path
+ if output_artifact.split:
+ output_artifact.uri = os.path.join(output_artifact.uri, output_artifact.split)
+
+ print('component instance: ' + str(component_class_instance))
+
+ executor = CsvExampleGen.EXECUTOR_SPEC.executor_class()
+ executor.Do(
+ input_dict=input_dict,
+ output_dict=output_dict,
+ exec_properties=exec_properties,
+ )
+
+ import argparse
+ _parser = argparse.ArgumentParser(prog='Csvexamplegen', description="Executes the CsvExampleGen component.\n\n Args:\n input_base: A Channel of 'ExternalPath' type, which includes one artifact\n whose uri is an external directory with csv files inside (required).\n input_config: An example_gen_pb2.Input instance, providing input\n configuration. If unset, the files under input_base will be treated as a\n single split.\n output_config: An example_gen_pb2.Output instance, providing output\n configuration. If unset, default splits will be 'train' and 'eval' with\n size 2:1.\n ??? input: Forwards compatibility alias for the 'input_base' argument.\n Returns:\n example_artifacts: Artifact of type 'ExamplesPath' for output train and\n eval examples.\n")
+ _parser.add_argument("--input-base", dest="input_base_path", type=str, required=True, default=argparse.SUPPRESS)
+ _parser.add_argument("--input-config", dest="input_config", type=str, required=False, default=argparse.SUPPRESS)
+ _parser.add_argument("--output-config", dest="output_config", type=str, required=False, default=argparse.SUPPRESS)
+ _parser.add_argument("--example-artifacts", dest="example_artifacts_path", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS)
+ _parsed_args = vars(_parser.parse_args())
+ _output_files = _parsed_args.pop("_output_paths", [])
+
+ _outputs = CsvExampleGen(**_parsed_args)
+
+ if not hasattr(_outputs, '__getitem__') or isinstance(_outputs, str):
+ _outputs = [_outputs]
+
+ _output_serializers = [
+
+ ]
+
+ import os
+ for idx, output_file in enumerate(_output_files):
+ try:
+ os.makedirs(os.path.dirname(output_file))
+ except OSError:
+ pass
+ with open(output_file, 'w') as f:
+ f.write(_output_serializers[idx](_outputs[idx]))
+ args:
+ - --input-base
+ - {inputPath: input_base}
+ - if:
+ cond: {isPresent: input_config}
+ then:
+ - --input-config
+ - {inputValue: input_config}
+ - if:
+ cond: {isPresent: output_config}
+ then:
+ - --output-config
+ - {inputValue: output_config}
+ - --example-artifacts
+ - {outputPath: example_artifacts}
diff --git a/components/tfx/ExampleGen/CsvExampleGen/with_URI_IO/component.py b/components/tfx/ExampleGen/CsvExampleGen/with_URI_IO/component.py
new file mode 100644
index 00000000000..8e78dd05bfb
--- /dev/null
+++ b/components/tfx/ExampleGen/CsvExampleGen/with_URI_IO/component.py
@@ -0,0 +1,98 @@
+# flake8: noqa TODO
+
+from typing import NamedTuple
+
+def CsvExampleGen_GCS( #
+ # Inputs
+ #input_base_path: InputPath('ExternalPath'),
+ input_base_path: 'ExternalPath', # A Channel of 'ExternalPath' type, which includes one artifact whose uri is an external directory with csv files inside (required).
+
+ # Outputs
+ #example_artifacts_path: OutputPath('ExamplesPath'),
+ example_artifacts_path: 'ExamplesPath',
+
+ # Execution properties
+ #input_config_splits: {'List' : {'item_type': 'ExampleGen.Input.Split'}},
+ input_config: 'ExampleGen.Input' = None, # = '{"splits": []}', # JSON-serialized example_gen_pb2.Input instance, providing input configuration. If unset, the files under input_base will be treated as a single split.
+ #output_config_splits: {'List' : {'item_type': 'ExampleGen.SplitConfig'}},
+ output_config: 'ExampleGen.Output' = None, # = '{"splitConfig": {"splits": []}}', # JSON-serialized example_gen_pb2.Output instance, providing output configuration. If unset, default splits will be 'train' and 'eval' with size 2:1.
+ #custom_config: 'ExampleGen.CustomConfig' = None,
+) -> NamedTuple('Outputs', [
+ ('example_artifacts', 'ExamplesPath'),
+]):
+ """Executes the CsvExampleGen component.
+
+ Args:
+ input_base: A Channel of 'ExternalPath' type, which includes one artifact
+ whose uri is an external directory with csv files inside (required).
+ input_config: An example_gen_pb2.Input instance, providing input
+ configuration. If unset, the files under input_base will be treated as a
+ single split.
+ output_config: An example_gen_pb2.Output instance, providing output
+ configuration. If unset, default splits will be 'train' and 'eval' with
+ size 2:1.
+ ??? input: Forwards compatibility alias for the 'input_base' argument.
+ Returns:
+ example_artifacts: Artifact of type 'ExamplesPath' for output train and
+ eval examples.
+ """
+
+ import json
+ import os
+ from google.protobuf import json_format
+ from tfx.components.example_gen.csv_example_gen.component import CsvExampleGen
+ from tfx.proto import example_gen_pb2
+ from tfx.types import standard_artifacts
+ from tfx.types import channel_utils
+
+ # Create input dict.
+ input_base = standard_artifacts.ExternalArtifact()
+ input_base.uri = input_base_path
+ input_base_channel = channel_utils.as_channel([input_base])
+
+ input_config_obj = None
+ if input_config:
+ input_config_obj = example_gen_pb2.Input()
+ json_format.Parse(input_config, input_config_obj)
+
+ output_config_obj = None
+ if output_config:
+ output_config_obj = example_gen_pb2.Output()
+ json_format.Parse(output_config, output_config_obj)
+
+ component_class_instance = CsvExampleGen(
+ input=input_base_channel,
+ input_config=input_config_obj,
+ output_config=output_config_obj,
+ )
+
+ # component_class_instance.inputs/outputs are wrappers that do not behave like real dictionaries. The underlying dict can be accessed using .get_all()
+ # Channel artifacts can be accessed by calling .get()
+ input_dict = {name: channel.get() for name, channel in component_class_instance.inputs.get_all().items()}
+ output_dict = {name: channel.get() for name, channel in component_class_instance.outputs.get_all().items()}
+ exec_properties = component_class_instance.exec_properties
+
+ # Generating paths for output artifacts
+ for output_artifact in output_dict['examples']:
+ output_artifact.uri = example_artifacts_path
+ if output_artifact.split:
+ output_artifact.uri = os.path.join(output_artifact.uri, output_artifact.split)
+
+ print('component instance: ' + str(component_class_instance))
+
+ executor = CsvExampleGen.EXECUTOR_SPEC.executor_class()
+ executor.Do(
+ input_dict=input_dict,
+ output_dict=output_dict,
+ exec_properties=exec_properties,
+ )
+
+ return (example_artifacts_path,)
+
+if __name__ == '__main__':
+ import kfp
+ kfp.components.func_to_container_op(
+ CsvExampleGen_GCS,
+ base_image='tensorflow/tfx:0.15.0',
+ output_component_file='component.yaml'
+ )
diff --git a/components/tfx/ExampleGen/CsvExampleGen/with_URI_IO/component.yaml b/components/tfx/ExampleGen/CsvExampleGen/with_URI_IO/component.yaml
new file mode 100644
index 00000000000..54a6868ad67
--- /dev/null
+++ b/components/tfx/ExampleGen/CsvExampleGen/with_URI_IO/component.yaml
@@ -0,0 +1,166 @@
+name: CsvExampleGen GCS
+inputs:
+- {name: input_base_path, type: ExternalPath}
+- {name: example_artifacts_path, type: ExamplesPath}
+- {name: input_config, optional: true, type: ExampleGen.Input}
+- {name: output_config, optional: true, type: ExampleGen.Output}
+outputs:
+- {name: example_artifacts, type: ExamplesPath}
+description: |
+ Executes the CsvExampleGen component.
+
+ Args:
+ input_base: A Channel of 'ExternalPath' type, which includes one artifact
+ whose uri is an external directory with csv files inside (required).
+ input_config: An example_gen_pb2.Input instance, providing input
+ configuration. If unset, the files under input_base will be treated as a
+ single split.
+ output_config: An example_gen_pb2.Output instance, providing output
+ configuration. If unset, default splits will be 'train' and 'eval' with
+ size 2:1.
+ ??? input: Forwards compatibility alias for the 'input_base' argument.
+ Returns:
+ example_artifacts: Artifact of type 'ExamplesPath' for output train and
+ eval examples.
+implementation:
+ container:
+ image: tensorflow/tfx:0.15.0
+ command:
+ - python3
+ - -u
+ - -c
+ - |
+ from typing import NamedTuple
+
+ def CsvExampleGen_GCS( #
+ # Inputs
+ #input_base_path: InputPath('ExternalPath'),
+ input_base_path: 'ExternalPath', # A Channel of 'ExternalPath' type, which includes one artifact whose uri is an external directory with csv files inside (required).
+
+ # Outputs
+ #example_artifacts_path: OutputPath('ExamplesPath'),
+ example_artifacts_path: 'ExamplesPath',
+
+ # Execution properties
+ #input_config_splits: {'List' : {'item_type': 'ExampleGen.Input.Split'}},
+ input_config: 'ExampleGen.Input' = None, # = '{"splits": []}', # JSON-serialized example_gen_pb2.Input instance, providing input configuration. If unset, the files under input_base will be treated as a single split.
+ #output_config_splits: {'List' : {'item_type': 'ExampleGen.SplitConfig'}},
+ output_config: 'ExampleGen.Output' = None, # = '{"splitConfig": {"splits": []}}', # JSON-serialized example_gen_pb2.Output instance, providing output configuration. If unset, default splits will be 'train' and 'eval' with size 2:1.
+ #custom_config: 'ExampleGen.CustomConfig' = None,
+ ) -> NamedTuple('Outputs', [
+ ('example_artifacts', 'ExamplesPath'),
+ ]):
+ """Executes the CsvExampleGen component.
+
+ Args:
+ input_base: A Channel of 'ExternalPath' type, which includes one artifact
+ whose uri is an external directory with csv files inside (required).
+ input_config: An example_gen_pb2.Input instance, providing input
+ configuration. If unset, the files under input_base will be treated as a
+ single split.
+ output_config: An example_gen_pb2.Output instance, providing output
+ configuration. If unset, default splits will be 'train' and 'eval' with
+ size 2:1.
+ ??? input: Forwards compatibility alias for the 'input_base' argument.
+ Returns:
+ example_artifacts: Artifact of type 'ExamplesPath' for output train and
+ eval examples.
+ """
+
+ import json
+ import os
+ from google.protobuf import json_format
+ from tfx.components.example_gen.csv_example_gen.component import CsvExampleGen
+ from tfx.proto import example_gen_pb2
+ from tfx.types import standard_artifacts
+ from tfx.types import channel_utils
+
+ # Create input dict.
+ input_base = standard_artifacts.ExternalArtifact()
+ input_base.uri = input_base_path
+ input_base_channel = channel_utils.as_channel([input_base])
+
+ input_config_obj = None
+ if input_config:
+ input_config_obj = example_gen_pb2.Input()
+ json_format.Parse(input_config, input_config_obj)
+
+ output_config_obj = None
+ if output_config:
+ output_config_obj = example_gen_pb2.Output()
+ json_format.Parse(output_config, output_config_obj)
+
+ component_class_instance = CsvExampleGen(
+ input=input_base_channel,
+ input_config=input_config_obj,
+ output_config=output_config_obj,
+ )
+
+ # component_class_instance.inputs/outputs are wrappers that do not behave like real dictionaries. The underlying dict can be accessed using .get_all()
+ # Channel artifacts can be accessed by calling .get()
+ input_dict = {name: channel.get() for name, channel in component_class_instance.inputs.get_all().items()}
+ output_dict = {name: channel.get() for name, channel in component_class_instance.outputs.get_all().items()}
+ exec_properties = component_class_instance.exec_properties
+
+ # Generating paths for output artifacts
+ for output_artifact in output_dict['examples']:
+ output_artifact.uri = example_artifacts_path
+ if output_artifact.split:
+ output_artifact.uri = os.path.join(output_artifact.uri, output_artifact.split)
+
+ print('component instance: ' + str(component_class_instance))
+
+ executor = CsvExampleGen.EXECUTOR_SPEC.executor_class()
+ executor.Do(
+ input_dict=input_dict,
+ output_dict=output_dict,
+ exec_properties=exec_properties,
+ )
+
+ return (example_artifacts_path,)
+
+ import argparse
+ _parser = argparse.ArgumentParser(prog='Csvexamplegen gcs', description="Executes the CsvExampleGen component.\n\n Args:\n input_base: A Channel of 'ExternalPath' type, which includes one artifact\n whose uri is an external directory with csv files inside (required).\n input_config: An example_gen_pb2.Input instance, providing input\n configuration. If unset, the files under input_base will be treated as a\n single split.\n output_config: An example_gen_pb2.Output instance, providing output\n configuration. If unset, default splits will be 'train' and 'eval' with\n size 2:1.\n ??? input: Forwards compatibility alias for the 'input_base' argument.\n Returns:\n example_artifacts: Artifact of type 'ExamplesPath' for output train and\n eval examples.\n")
+ _parser.add_argument("--input-base-path", dest="input_base_path", type=str, required=True, default=argparse.SUPPRESS)
+ _parser.add_argument("--example-artifacts-path", dest="example_artifacts_path", type=str, required=True, default=argparse.SUPPRESS)
+ _parser.add_argument("--input-config", dest="input_config", type=str, required=False, default=argparse.SUPPRESS)
+ _parser.add_argument("--output-config", dest="output_config", type=str, required=False, default=argparse.SUPPRESS)
+ _parser.add_argument("----output-paths", dest="_output_paths", type=str, nargs=1)
+ _parsed_args = vars(_parser.parse_args())
+ _output_files = _parsed_args.pop("_output_paths", [])
+
+ _outputs = CsvExampleGen_GCS(**_parsed_args)
+
+ if not hasattr(_outputs, '__getitem__') or isinstance(_outputs, str):
+ _outputs = [_outputs]
+
+ _output_serializers = [
+ str,
+
+ ]
+
+ import os
+ for idx, output_file in enumerate(_output_files):
+ try:
+ os.makedirs(os.path.dirname(output_file))
+ except OSError:
+ pass
+ with open(output_file, 'w') as f:
+ f.write(_output_serializers[idx](_outputs[idx]))
+ args:
+ - --input-base-path
+ - {inputValue: input_base_path}
+ - --example-artifacts-path
+ - {inputValue: example_artifacts_path}
+ - if:
+ cond: {isPresent: input_config}
+ then:
+ - --input-config
+ - {inputValue: input_config}
+ - if:
+ cond: {isPresent: output_config}
+ then:
+ - --output-config
+ - {inputValue: output_config}
+ - '----output-paths'
+ - {outputPath: example_artifacts}
diff --git a/components/tfx/ExampleGen/ImportExampleGen/component.py b/components/tfx/ExampleGen/ImportExampleGen/component.py
new file mode 100644
index 00000000000..02c4dac8a8f
--- /dev/null
+++ b/components/tfx/ExampleGen/ImportExampleGen/component.py
@@ -0,0 +1,112 @@
+# flake8: noqa TODO
+
+from kfp.components import InputPath, OutputPath
+
+
+def ImportExampleGen(
+ input_base_path: InputPath('ExternalPath'),
+ #input_path: InputPath('ExternalPath'),
+
+ example_artifacts_path: OutputPath('Examples'),
+
+ input_config: 'JsonObject: example_gen_pb2.Input' = None,
+ output_config: 'JsonObject: example_gen_pb2.Output' = None,
+):
+ """
+ Official TFX ImportExampleGen component.
+
+ The ImportExampleGen component takes TFRecord files with TF Example data
+ format, and generates train and eval examples for downsteam components.
+ This component provides consistent and configurable partition, and it also
+ shuffle the dataset for ML best practice.
+
+ Args:
+ input_base: A Channel of 'ExternalPath' type, which includes one artifact
+ whose uri is an external directory with TFRecord files inside
+ (required).
+ #input: Forwards compatibility alias for the 'input_base' argument.
+ input_config: An example_gen_pb2.Input instance, providing input
+ configuration. If unset, the files under input_base will be treated as a
+ single split.
+ output_config: An example_gen_pb2.Output instance, providing output
+ configuration. If unset, default splits will be 'train' and 'eval' with
+ size 2:1.
+ Returns:
+ example_artifacts: Optional channel of 'ExamplesPath' for output train and
+ eval examples.
+
+ Raises:
+ RuntimeError: Only one of query and input_config should be set.
+ """
+ from tfx.components.example_gen.import_example_gen.component import ImportExampleGen
+ component_class = ImportExampleGen
+ input_channels_with_splits = {}
+ output_channels_with_splits = {'example_artifacts'}
+
+
+ import json
+ import os
+ from google.protobuf import json_format, message
+ from tfx.types import Artifact, channel_utils
+
+ arguments = locals().copy()
+
+ component_class_args = {}
+
+ for name, execution_parameter in component_class.SPEC_CLASS.PARAMETERS.items():
+ argument_value_obj = argument_value = arguments.get(name, None)
+ if argument_value is None:
+ continue
+ parameter_type = execution_parameter.type
+ if isinstance(parameter_type, type) and issubclass(parameter_type, message.Message): # execution_parameter.type can also be a tuple
+ argument_value_obj = parameter_type()
+ json_format.Parse(argument_value, argument_value_obj)
+ component_class_args[name] = argument_value_obj
+
+ for name, channel_parameter in component_class.SPEC_CLASS.INPUTS.items():
+ artifact_path = arguments[name + '_path']
+ artifacts = []
+ if name in input_channels_with_splits:
+ # Recovering splits
+ splits = sorted(os.listdir(artifact_path))
+ for split in splits:
+ artifact = Artifact(type_name=channel_parameter.type_name)
+ artifact.split = split
+ artifact.uri = os.path.join(artifact_path, split) + '/'
+ artifacts.append(artifact)
+ else:
+ artifact = Artifact(type_name=channel_parameter.type_name)
+ artifact.uri = artifact_path + '/' # ?
+ artifacts.append(artifact)
+ component_class_args[name] = channel_utils.as_channel(artifacts)
+
+ component_class_instance = component_class(**component_class_args)
+
+ input_dict = {name: channel.get() for name, channel in component_class_instance.inputs.get_all().items()}
+ output_dict = {name: channel.get() for name, channel in component_class_instance.outputs.get_all().items()}
+ exec_properties = component_class_instance.exec_properties
+
+ # Generating paths for output artifacts
+ for name, artifacts in output_dict.items():
+ base_artifact_path = arguments[name + '_path']
+ for artifact in artifacts:
+ artifact.uri = os.path.join(base_artifact_path, artifact.split) # Default split is ''
+
+ print('component instance: ' + str(component_class_instance))
+
+ #executor = component_class.EXECUTOR_SPEC.executor_class() # Same
+ executor = component_class_instance.executor_spec.executor_class()
+ executor.Do(
+ input_dict=input_dict,
+ output_dict=output_dict,
+ exec_properties=exec_properties,
+ )
+
+
+if __name__ == '__main__':
+ import kfp
+ kfp.components.func_to_container_op(
+ ImportExampleGen,
+ base_image='tensorflow/tfx:0.15.0',
+ output_component_file='component.yaml'
+ )
diff --git a/components/tfx/ExampleGen/ImportExampleGen/component.yaml b/components/tfx/ExampleGen/ImportExampleGen/component.yaml
new file mode 100644
index 00000000000..8328e455b50
--- /dev/null
+++ b/components/tfx/ExampleGen/ImportExampleGen/component.yaml
@@ -0,0 +1,202 @@
+name: Importexamplegen
+description: |
+ Official TFX ImportExampleGen component.
+
+ The ImportExampleGen component takes TFRecord files with TF Example data
+ format, and generates train and eval examples for downsteam components.
+ This component provides consistent and configurable partition, and it also
+ shuffle the dataset for ML best practice.
+
+ Args:
+ input_base: A Channel of 'ExternalPath' type, which includes one artifact
+ whose uri is an external directory with TFRecord files inside
+ (required).
+ #input: Forwards compatibility alias for the 'input_base' argument.
+ input_config: An example_gen_pb2.Input instance, providing input
+ configuration. If unset, the files under input_base will be treated as a
+ single split.
+ output_config: An example_gen_pb2.Output instance, providing output
+ configuration. If unset, default splits will be 'train' and 'eval' with
+ size 2:1.
+ Returns:
+ example_artifacts: Optional channel of 'ExamplesPath' for output train and
+ eval examples.
+
+ Raises:
+ RuntimeError: Only one of query and input_config should be set.
+inputs:
+- name: input_base
+ type: ExternalPath
+- name: input_config
+ type: 'JsonObject: example_gen_pb2.Input'
+ optional: true
+- name: output_config
+ type: 'JsonObject: example_gen_pb2.Output'
+ optional: true
+outputs:
+- name: example_artifacts
+ type: Examples
+implementation:
+ container:
+ image: tensorflow/tfx:0.15.0
+ command:
+ - python3
+ - -u
+ - -c
+ - |
+ class InputPath:
+ '''When creating component from function, InputPath should be used as function parameter annotation to tell the system to pass the *data file path* to the function instead of passing the actual data.'''
+ def __init__(self, type=None):
+ self.type = type
+
+ def _make_parent_dirs_and_return_path(file_path: str):
+ import os
+ os.makedirs(os.path.dirname(file_path), exist_ok=True)
+ return file_path
+
+ class OutputPath:
+ '''When creating component from function, OutputPath should be used as function parameter annotation to tell the system that the function wants to output data by writing it into a file with the given path instead of returning the data from the function.'''
+ def __init__(self, type=None):
+ self.type = type
+
+ def ImportExampleGen(
+ input_base_path: InputPath('ExternalPath'),
+ #input_path: InputPath('ExternalPath'),
+
+ example_artifacts_path: OutputPath('Examples'),
+
+ input_config: 'JsonObject: example_gen_pb2.Input' = None,
+ output_config: 'JsonObject: example_gen_pb2.Output' = None,
+ ):
+ """
+ Official TFX ImportExampleGen component.
+
+ The ImportExampleGen component takes TFRecord files with TF Example data
+ format, and generates train and eval examples for downsteam components.
+ This component provides consistent and configurable partition, and it also
+ shuffle the dataset for ML best practice.
+
+ Args:
+ input_base: A Channel of 'ExternalPath' type, which includes one artifact
+ whose uri is an external directory with TFRecord files inside
+ (required).
+ #input: Forwards compatibility alias for the 'input_base' argument.
+ input_config: An example_gen_pb2.Input instance, providing input
+ configuration. If unset, the files under input_base will be treated as a
+ single split.
+ output_config: An example_gen_pb2.Output instance, providing output
+ configuration. If unset, default splits will be 'train' and 'eval' with
+ size 2:1.
+ Returns:
+ example_artifacts: Optional channel of 'ExamplesPath' for output train and
+ eval examples.
+
+ Raises:
+ RuntimeError: Only one of query and input_config should be set.
+ """
+ from tfx.components.example_gen.import_example_gen.component import ImportExampleGen
+ component_class = ImportExampleGen
+ input_channels_with_splits = {}
+ output_channels_with_splits = {'example_artifacts'}
+
+ import json
+ import os
+ from google.protobuf import json_format, message
+ from tfx.types import Artifact, channel_utils
+
+ arguments = locals().copy()
+
+ component_class_args = {}
+
+ for name, execution_parameter in component_class.SPEC_CLASS.PARAMETERS.items():
+ argument_value_obj = argument_value = arguments.get(name, None)
+ if argument_value is None:
+ continue
+ parameter_type = execution_parameter.type
+ if isinstance(parameter_type, type) and issubclass(parameter_type, message.Message): # execution_parameter.type can also be a tuple
+ argument_value_obj = parameter_type()
+ json_format.Parse(argument_value, argument_value_obj)
+ component_class_args[name] = argument_value_obj
+
+ for name, channel_parameter in component_class.SPEC_CLASS.INPUTS.items():
+ artifact_path = arguments[name + '_path']
+ artifacts = []
+ if name in input_channels_with_splits:
+ # Recovering splits
+ splits = sorted(os.listdir(artifact_path))
+ for split in splits:
+ artifact = Artifact(type_name=channel_parameter.type_name)
+ artifact.split = split
+ artifact.uri = os.path.join(artifact_path, split) + '/'
+ artifacts.append(artifact)
+ else:
+ artifact = Artifact(type_name=channel_parameter.type_name)
+ artifact.uri = artifact_path + '/' # ?
+ artifacts.append(artifact)
+ component_class_args[name] = channel_utils.as_channel(artifacts)
+
+ component_class_instance = component_class(**component_class_args)
+
+ input_dict = {name: channel.get() for name, channel in component_class_instance.inputs.get_all().items()}
+ output_dict = {name: channel.get() for name, channel in component_class_instance.outputs.get_all().items()}
+ exec_properties = component_class_instance.exec_properties
+
+ # Generating paths for output artifacts
+ for name, artifacts in output_dict.items():
+ base_artifact_path = arguments[name + '_path']
+ for artifact in artifacts:
+ artifact.uri = os.path.join(base_artifact_path, artifact.split) # Default split is ''
+
+ print('component instance: ' + str(component_class_instance))
+
+ #executor = component_class.EXECUTOR_SPEC.executor_class() # Same
+ executor = component_class_instance.executor_spec.executor_class()
+ executor.Do(
+ input_dict=input_dict,
+ output_dict=output_dict,
+ exec_properties=exec_properties,
+ )
+
+ import argparse
+ _parser = argparse.ArgumentParser(prog='Importexamplegen', description="Official TFX ImportExampleGen component.\n\n The ImportExampleGen component takes TFRecord files with TF Example data\n format, and generates train and eval examples for downsteam components.\n This component provides consistent and configurable partition, and it also\n shuffle the dataset for ML best practice.\n\n Args:\n input_base: A Channel of 'ExternalPath' type, which includes one artifact\n whose uri is an external directory with TFRecord files inside\n (required).\n #input: Forwards compatibility alias for the 'input_base' argument.\n input_config: An example_gen_pb2.Input instance, providing input\n configuration. If unset, the files under input_base will be treated as a\n single split.\n output_config: An example_gen_pb2.Output instance, providing output\n configuration. If unset, default splits will be 'train' and 'eval' with\n size 2:1.\n Returns:\n example_artifacts: Optional channel of 'ExamplesPath' for output train and\n eval examples.\n\n Raises:\n RuntimeError: Only one of query and input_config should be set.\n")
+ _parser.add_argument("--input-base", dest="input_base_path", type=str, required=True, default=argparse.SUPPRESS)
+ _parser.add_argument("--input-config", dest="input_config", type=str, required=False, default=argparse.SUPPRESS)
+ _parser.add_argument("--output-config", dest="output_config", type=str, required=False, default=argparse.SUPPRESS)
+ _parser.add_argument("--example-artifacts", dest="example_artifacts_path", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS)
+ _parsed_args = vars(_parser.parse_args())
+ _output_files = _parsed_args.pop("_output_paths", [])
+
+ _outputs = ImportExampleGen(**_parsed_args)
+
+ if not hasattr(_outputs, '__getitem__') or isinstance(_outputs, str):
+ _outputs = [_outputs]
+
+ _output_serializers = [
+
+ ]
+
+ import os
+ for idx, output_file in enumerate(_output_files):
+ try:
+ os.makedirs(os.path.dirname(output_file))
+ except OSError:
+ pass
+ with open(output_file, 'w') as f:
+ f.write(_output_serializers[idx](_outputs[idx]))
+ args:
+ - --input-base
+ - inputPath: input_base
+ - if:
+ cond:
+ isPresent: input_config
+ then:
+ - --input-config
+ - inputValue: input_config
+ - if:
+ cond:
+ isPresent: output_config
+ then:
+ - --output-config
+ - inputValue: output_config
+ - --example-artifacts
+ - outputPath: example_artifacts
diff --git a/components/tfx/ExampleValidator/component.py b/components/tfx/ExampleValidator/component.py
new file mode 100644
index 00000000000..3d1f350d55b
--- /dev/null
+++ b/components/tfx/ExampleValidator/component.py
@@ -0,0 +1,117 @@
+from kfp.components import InputPath, OutputPath
+
+
+def ExampleValidator(
+ stats_path: InputPath('ExampleStatistics'),
+ #statistics_path: InputPath('ExampleStatistics'),
+ schema_path: InputPath('Schema'),
+
+ output_path: OutputPath('ExampleValidation'),
+):
+ """
+ A TFX component to validate input examples.
+
+ The ExampleValidator component uses [Tensorflow Data
+ Validation](https://www.tensorflow.org/tfx/data_validation) to
+ validate the statistics of some splits on input examples against a schema.
+
+ The ExampleValidator component identifies anomalies in training and serving
+ data. The component can be configured to detect different classes of anomalies
+ in the data. It can:
+ - perform validity checks by comparing data statistics against a schema that
+ codifies expectations of the user.
+ - detect data drift by looking at a series of data.
+ - detect changes in dataset-wide data (i.e., num_examples) across spans or
+ versions.
+
+ Schema Based Example Validation
+ The ExampleValidator component identifies any anomalies in the example data by
+ comparing data statistics computed by the StatisticsGen component against a
+ schema. The schema codifies properties which the input data is expected to
+ satisfy, and is provided and maintained by the user.
+
+ Please see https://www.tensorflow.org/tfx/data_validation for more details.
+
+ Args:
+ stats: A Channel of 'ExampleStatisticsPath` type. This should contain at
+ least 'eval' split. Other splits are ignored currently. Will be
+ deprecated in the future for the `statistics` parameter.
+ #statistics: Future replacement of the 'stats' argument.
+ schema: A Channel of "SchemaPath' type. _required_
+ Returns:
+ output: Output channel of 'ExampleValidationPath' type.
+
+ Either `stats` or `statistics` must be present in the arguments.
+ """
+ from tfx.components.example_validator.component import ExampleValidator
+ component_class = ExampleValidator
+ input_channels_with_splits = {'stats', 'statistics'}
+ output_channels_with_splits = {}
+
+
+ import json
+ import os
+ from google.protobuf import json_format, message
+ from tfx.types import Artifact, channel_utils
+
+ arguments = locals().copy()
+
+ component_class_args = {}
+
+ for name, execution_parameter in component_class.SPEC_CLASS.PARAMETERS.items():
+ argument_value_obj = argument_value = arguments.get(name, None)
+ if argument_value is None:
+ continue
+ parameter_type = execution_parameter.type
+ if isinstance(parameter_type, type) and issubclass(parameter_type, message.Message): # execution_parameter.type can also be a tuple
+ argument_value_obj = parameter_type()
+ json_format.Parse(argument_value, argument_value_obj)
+ component_class_args[name] = argument_value_obj
+
+ for name, channel_parameter in component_class.SPEC_CLASS.INPUTS.items():
+ artifact_path = arguments[name + '_path']
+ artifacts = []
+ if name in input_channels_with_splits:
+ # Recovering splits
+ splits = sorted(os.listdir(artifact_path))
+ for split in splits:
+ artifact = Artifact(type_name=channel_parameter.type_name)
+ artifact.split = split
+ artifact.uri = os.path.join(artifact_path, split) + '/'
+ artifacts.append(artifact)
+ else:
+ artifact = Artifact(type_name=channel_parameter.type_name)
+ artifact.uri = artifact_path + '/' # ?
+ artifacts.append(artifact)
+ component_class_args[name] = channel_utils.as_channel(artifacts)
+
+ component_class_instance = component_class(**component_class_args)
+
+ input_dict = {name: channel.get() for name, channel in component_class_instance.inputs.get_all().items()}
+ output_dict = {name: channel.get() for name, channel in component_class_instance.outputs.get_all().items()}
+ exec_properties = component_class_instance.exec_properties
+
+ # Generating paths for output artifacts
+ for name, artifacts in output_dict.items():
+ base_artifact_path = arguments[name + '_path']
+ for artifact in artifacts:
+ artifact.uri = os.path.join(base_artifact_path, artifact.split) # Default split is ''
+
+ print('component instance: ' + str(component_class_instance))
+
+ #executor = component_class.EXECUTOR_SPEC.executor_class() # Same
+ executor = component_class_instance.executor_spec.executor_class()
+ executor.Do(
+ input_dict=input_dict,
+ output_dict=output_dict,
+ exec_properties=exec_properties,
+ )
+
+
+if __name__ == '__main__':
+ import kfp
+ kfp.components.func_to_container_op(
+ ExampleValidator,
+ base_image='tensorflow/tfx:0.15.0',
+ output_component_file='component.yaml'
+ )
diff --git a/components/tfx/ExampleValidator/component.yaml b/components/tfx/ExampleValidator/component.yaml
new file mode 100644
index 00000000000..0c7036a2ef3
--- /dev/null
+++ b/components/tfx/ExampleValidator/component.yaml
@@ -0,0 +1,203 @@
+name: Examplevalidator
+description: |
+ A TFX component to validate input examples.
+
+ The ExampleValidator component uses [Tensorflow Data
+ Validation](https://www.tensorflow.org/tfx/data_validation) to
+ validate the statistics of some splits on input examples against a schema.
+
+ The ExampleValidator component identifies anomalies in training and serving
+ data. The component can be configured to detect different classes of anomalies
+ in the data. It can:
+ - perform validity checks by comparing data statistics against a schema that
+ codifies expectations of the user.
+ - detect data drift by looking at a series of data.
+ - detect changes in dataset-wide data (i.e., num_examples) across spans or
+ versions.
+
+ Schema Based Example Validation
+ The ExampleValidator component identifies any anomalies in the example data by
+ comparing data statistics computed by the StatisticsGen component against a
+ schema. The schema codifies properties which the input data is expected to
+ satisfy, and is provided and maintained by the user.
+
+ Please see https://www.tensorflow.org/tfx/data_validation for more details.
+
+ Args:
+ stats: A Channel of 'ExampleStatisticsPath` type. This should contain at
+ least 'eval' split. Other splits are ignored currently. Will be
+ deprecated in the future for the `statistics` parameter.
+ #statistics: Future replacement of the 'stats' argument.
+ schema: A Channel of "SchemaPath' type. _required_
+ Returns:
+ output: Output channel of 'ExampleValidationPath' type.
+
+ Either `stats` or `statistics` must be present in the arguments.
+inputs:
+- name: stats
+ type: ExampleStatistics
+- name: schema
+ type: Schema
+outputs:
+- name: output
+ type: ExampleValidation
+implementation:
+ container:
+ image: tensorflow/tfx:0.15.0
+ command:
+ - python3
+ - -u
+ - -c
+ - |
+ class InputPath:
+ '''When creating component from function, InputPath should be used as function parameter annotation to tell the system to pass the *data file path* to the function instead of passing the actual data.'''
+ def __init__(self, type=None):
+ self.type = type
+
+ def _make_parent_dirs_and_return_path(file_path: str):
+ import os
+ os.makedirs(os.path.dirname(file_path), exist_ok=True)
+ return file_path
+
+ class OutputPath:
+ '''When creating component from function, OutputPath should be used as function parameter annotation to tell the system that the function wants to output data by writing it into a file with the given path instead of returning the data from the function.'''
+ def __init__(self, type=None):
+ self.type = type
+
+ def ExampleValidator(
+ stats_path: InputPath('ExampleStatistics'),
+ #statistics_path: InputPath('ExampleStatistics'),
+ schema_path: InputPath('Schema'),
+
+ output_path: OutputPath('ExampleValidation'),
+ ):
+ """
+ A TFX component to validate input examples.
+
+ The ExampleValidator component uses [Tensorflow Data
+ Validation](https://www.tensorflow.org/tfx/data_validation) to
+ validate the statistics of some splits on input examples against a schema.
+
+ The ExampleValidator component identifies anomalies in training and serving
+ data. The component can be configured to detect different classes of anomalies
+ in the data. It can:
+ - perform validity checks by comparing data statistics against a schema that
+ codifies expectations of the user.
+ - detect data drift by looking at a series of data.
+ - detect changes in dataset-wide data (i.e., num_examples) across spans or
+ versions.
+
+ Schema Based Example Validation
+ The ExampleValidator component identifies any anomalies in the example data by
+ comparing data statistics computed by the StatisticsGen component against a
+ schema. The schema codifies properties which the input data is expected to
+ satisfy, and is provided and maintained by the user.
+
+ Please see https://www.tensorflow.org/tfx/data_validation for more details.
+
+ Args:
+ stats: A Channel of 'ExampleStatisticsPath` type. This should contain at
+ least 'eval' split. Other splits are ignored currently. Will be
+ deprecated in the future for the `statistics` parameter.
+ #statistics: Future replacement of the 'stats' argument.
+ schema: A Channel of "SchemaPath' type. _required_
+ Returns:
+ output: Output channel of 'ExampleValidationPath' type.
+
+ Either `stats` or `statistics` must be present in the arguments.
+ """
+ from tfx.components.example_validator.component import ExampleValidator
+ component_class = ExampleValidator
+ input_channels_with_splits = {'stats', 'statistics'}
+ output_channels_with_splits = {}
+
+ import json
+ import os
+ from google.protobuf import json_format, message
+ from tfx.types import Artifact, channel_utils
+
+ arguments = locals().copy()
+
+ component_class_args = {}
+
+ for name, execution_parameter in component_class.SPEC_CLASS.PARAMETERS.items():
+ argument_value_obj = argument_value = arguments.get(name, None)
+ if argument_value is None:
+ continue
+ parameter_type = execution_parameter.type
+ if isinstance(parameter_type, type) and issubclass(parameter_type, message.Message): # execution_parameter.type can also be a tuple
+ argument_value_obj = parameter_type()
+ json_format.Parse(argument_value, argument_value_obj)
+ component_class_args[name] = argument_value_obj
+
+ for name, channel_parameter in component_class.SPEC_CLASS.INPUTS.items():
+ artifact_path = arguments[name + '_path']
+ artifacts = []
+ if name in input_channels_with_splits:
+ # Recovering splits
+ splits = sorted(os.listdir(artifact_path))
+ for split in splits:
+ artifact = Artifact(type_name=channel_parameter.type_name)
+ artifact.split = split
+ artifact.uri = os.path.join(artifact_path, split) + '/'
+ artifacts.append(artifact)
+ else:
+ artifact = Artifact(type_name=channel_parameter.type_name)
+ artifact.uri = artifact_path + '/' # ?
+ artifacts.append(artifact)
+ component_class_args[name] = channel_utils.as_channel(artifacts)
+
+ component_class_instance = component_class(**component_class_args)
+
+ input_dict = {name: channel.get() for name, channel in component_class_instance.inputs.get_all().items()}
+ output_dict = {name: channel.get() for name, channel in component_class_instance.outputs.get_all().items()}
+ exec_properties = component_class_instance.exec_properties
+
+ # Generating paths for output artifacts
+ for name, artifacts in output_dict.items():
+ base_artifact_path = arguments[name + '_path']
+ for artifact in artifacts:
+ artifact.uri = os.path.join(base_artifact_path, artifact.split) # Default split is ''
+
+ print('component instance: ' + str(component_class_instance))
+
+ #executor = component_class.EXECUTOR_SPEC.executor_class() # Same
+ executor = component_class_instance.executor_spec.executor_class()
+ executor.Do(
+ input_dict=input_dict,
+ output_dict=output_dict,
+ exec_properties=exec_properties,
+ )
+
+ import argparse
+ _parser = argparse.ArgumentParser(prog='Examplevalidator', description='A TFX component to validate input examples.\n\n The ExampleValidator component uses [Tensorflow Data\n Validation](https://www.tensorflow.org/tfx/data_validation) to\n validate the statistics of some splits on input examples against a schema.\n\n The ExampleValidator component identifies anomalies in training and serving\n data. The component can be configured to detect different classes of anomalies\n in the data. It can:\n - perform validity checks by comparing data statistics against a schema that\n codifies expectations of the user.\n - detect data drift by looking at a series of data.\n - detect changes in dataset-wide data (i.e., num_examples) across spans or\n versions.\n\n Schema Based Example Validation\n The ExampleValidator component identifies any anomalies in the example data by\n comparing data statistics computed by the StatisticsGen component against a\n schema. The schema codifies properties which the input data is expected to\n satisfy, and is provided and maintained by the user.\n\n Please see https://www.tensorflow.org/tfx/data_validation for more details.\n\n Args:\n stats: A Channel of \'ExampleStatisticsPath` type. This should contain at\n least \'eval\' split. Other splits are ignored currently. Will be\n deprecated in the future for the `statistics` parameter.\n #statistics: Future replacement of the \'stats\' argument.\n schema: A Channel of "SchemaPath\' type. _required_\n Returns:\n output: Output channel of \'ExampleValidationPath\' type.\n\n Either `stats` or `statistics` must be present in the arguments.\n')
+ _parser.add_argument("--stats", dest="stats_path", type=str, required=True, default=argparse.SUPPRESS)
+ _parser.add_argument("--schema", dest="schema_path", type=str, required=True, default=argparse.SUPPRESS)
+ _parser.add_argument("--output", dest="output_path", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS)
+ _parsed_args = vars(_parser.parse_args())
+ _output_files = _parsed_args.pop("_output_paths", [])
+
+ _outputs = ExampleValidator(**_parsed_args)
+
+ if not hasattr(_outputs, '__getitem__') or isinstance(_outputs, str):
+ _outputs = [_outputs]
+
+ _output_serializers = [
+
+ ]
+
+ import os
+ for idx, output_file in enumerate(_output_files):
+ try:
+ os.makedirs(os.path.dirname(output_file))
+ except OSError:
+ pass
+ with open(output_file, 'w') as f:
+ f.write(_output_serializers[idx](_outputs[idx]))
+ args:
+ - --stats
+ - inputPath: stats
+ - --schema
+ - inputPath: schema
+ - --output
+ - outputPath: output
diff --git a/components/tfx/SchemaGen/component.py b/components/tfx/SchemaGen/component.py
new file mode 100644
index 00000000000..ba630436e5c
--- /dev/null
+++ b/components/tfx/SchemaGen/component.py
@@ -0,0 +1,80 @@
+from kfp.components import InputPath, OutputPath
+
+
+def SchemaGen(
+ stats_path: InputPath('ExampleStatistics'),
+ #statistics_path: InputPath('ExampleStatistics'),
+ output_path: OutputPath('Schema'),
+ #schema_path: InputPath('Schema') = None,
+ infer_feature_shape: bool = False,
+):
+ """Constructs a SchemaGen component.
+
+ Args:
+ stats: A Channel of `ExampleStatistics` type (required if spec is not
+ passed). This should contain at least a `train` split. Other splits are
+ currently ignored.
+ # Exactly one of 'stats'/'statistics' or 'schema' is required.
+ #schema: A Channel of `Schema` type that provides an instance of Schema.
+ # If provided, pass through this schema artifact as the output. Exactly
+ # one of 'stats'/'statistics' or 'schema' is required.
+ infer_feature_shape: Boolean value indicating whether or not to infer the
+ shape of features. If the feature shape is not inferred, downstream
+ Tensorflow Transform component using the schema will parse input
+ as tf.SparseTensor.
+ #statistics: Future replacement of the 'stats' argument.
+ #Either `statistics` or `stats` must be present in the input arguments.
+ Returns:
+ output: Output `Schema` channel for schema result.
+ """
+
+ import json
+ import os
+ from google.protobuf import json_format
+ from tfx.types import standard_artifacts
+ from tfx.types import channel_utils
+
+ # Create input dict.
+ input_base_path = stats_path
+ input_artifact_class = standard_artifacts.ExampleStatistics
+ # Recovering splits
+ splits = sorted(os.listdir(input_base_path))
+ input_data_artifacts = []
+ for split in splits:
+ artifact = input_artifact_class()
+ artifact.split = split
+ artifact.uri = os.path.join(input_base_path, split) + '/'
+ input_data_artifacts.append(artifact)
+ input_data_channel = channel_utils.as_channel(input_data_artifacts)
+
+ from tfx.components.schema_gen.component import SchemaGen
+ component_class_instance = SchemaGen(
+ stats=input_data_channel,
+ )
+
+ input_dict = {name: channel.get() for name, channel in component_class_instance.inputs.get_all().items()}
+ output_dict = {name: channel.get() for name, channel in component_class_instance.outputs.get_all().items()}
+ exec_properties = component_class_instance.exec_properties
+
+ # Generating paths for output artifacts
+ for output_artifact in output_dict['output']:
+ output_artifact.uri = os.path.join(output_path, output_artifact.split) # Default split is ''
+
+ print('component instance: ' + str(component_class_instance))
+
+ executor = component_class_instance.executor_spec.executor_class()
+ executor.Do(
+ input_dict=input_dict,
+ output_dict=output_dict,
+ exec_properties=exec_properties,
+ )
+ #return (output_path,)
+
+
+if __name__ == '__main__':
+ import kfp
+ kfp.components.func_to_container_op(
+ SchemaGen,
+ base_image='tensorflow/tfx:0.15.0',
+ output_component_file='component.yaml'
+ )
diff --git a/components/tfx/SchemaGen/component.yaml b/components/tfx/SchemaGen/component.yaml
new file mode 100644
index 00000000000..dc0a5a32a5f
--- /dev/null
+++ b/components/tfx/SchemaGen/component.yaml
@@ -0,0 +1,156 @@
+name: Schemagen
+inputs:
+- {name: stats, type: ExampleStatistics}
+- {default: 'False', name: infer_feature_shape, optional: true, type: Boolean}
+outputs:
+- {name: output, type: Schema}
+description: |
+ Constructs a SchemaGen component.
+
+ Args:
+ stats: A Channel of `ExampleStatistics` type (required if spec is not
+ passed). This should contain at least a `train` split. Other splits are
+ currently ignored.
+ # Exactly one of 'stats'/'statistics' or 'schema' is required.
+ #schema: A Channel of `Schema` type that provides an instance of Schema.
+ # If provided, pass through this schema artifact as the output. Exactly
+ # one of 'stats'/'statistics' or 'schema' is required.
+ infer_feature_shape: Boolean value indicating whether or not to infer the
+ shape of features. If the feature shape is not inferred, downstream
+ Tensorflow Transform component using the schema will parse input
+ as tf.SparseTensor.
+ #statistics: Future replacement of the 'stats' argument.
+ #Either `statistics` or `stats` must be present in the input arguments.
+ Returns:
+ output: Output `Schema` channel for schema result.
+implementation:
+ container:
+ image: tensorflow/tfx:0.15.0
+ command:
+ - python3
+ - -u
+ - -c
+ - |
+ class OutputPath:
+ '''When creating component from function, OutputPath should be used as function parameter annotation to tell the system that the function wants to output data by writing it into a file with the given path instead of returning the data from the function.'''
+ def __init__(self, type=None):
+ self.type = type
+
+ class InputPath:
+ '''When creating component from function, InputPath should be used as function parameter annotation to tell the system to pass the *data file path* to the function instead of passing the actual data.'''
+ def __init__(self, type=None):
+ self.type = type
+
+ def _make_parent_dirs_and_return_path(file_path: str):
+ import os
+ os.makedirs(os.path.dirname(file_path), exist_ok=True)
+ return file_path
+
+ def SchemaGen(
+ stats_path: InputPath('ExampleStatistics'),
+ #statistics_path: InputPath('ExampleStatistics'),
+ output_path: OutputPath('Schema'),
+ #schema_path: InputPath('Schema') = None,
+ infer_feature_shape: bool = False,
+ ):
+ """Constructs a SchemaGen component.
+
+ Args:
+ stats: A Channel of `ExampleStatistics` type (required if spec is not
+ passed). This should contain at least a `train` split. Other splits are
+ currently ignored.
+ # Exactly one of 'stats'/'statistics' or 'schema' is required.
+ #schema: A Channel of `Schema` type that provides an instance of Schema.
+ # If provided, pass through this schema artifact as the output. Exactly
+ # one of 'stats'/'statistics' or 'schema' is required.
+ infer_feature_shape: Boolean value indicating whether or not to infer the
+ shape of features. If the feature shape is not inferred, downstream
+ Tensorflow Transform component using the schema will parse input
+ as tf.SparseTensor.
+ #statistics: Future replacement of the 'stats' argument.
+ #Either `statistics` or `stats` must be present in the input arguments.
+ Returns:
+ output: Output `Schema` channel for schema result.
+ """
+
+ import json
+ import os
+ from google.protobuf import json_format
+ from tfx.types import standard_artifacts
+ from tfx.types import channel_utils
+
+ # Create input dict.
+ input_base_path = stats_path
+ input_artifact_class = standard_artifacts.ExampleStatistics
+ # Recovering splits
+ splits = sorted(os.listdir(input_base_path))
+ input_data_artifacts = []
+ for split in splits:
+ artifact = input_artifact_class()
+ artifact.split = split
+ artifact.uri = os.path.join(input_base_path, split) + '/'
+ input_data_artifacts.append(artifact)
+ input_data_channel = channel_utils.as_channel(input_data_artifacts)
+
+ from tfx.components.schema_gen.component import SchemaGen
+ component_class_instance = SchemaGen(
+ stats=input_data_channel,
+ )
+
+ input_dict = {name: channel.get() for name, channel in component_class_instance.inputs.get_all().items()}
+ output_dict = {name: channel.get() for name, channel in component_class_instance.outputs.get_all().items()}
+ exec_properties = component_class_instance.exec_properties
+
+ # Generating paths for output artifacts
+ for output_artifact in output_dict['output']:
+ output_artifact.uri = os.path.join(output_path, output_artifact.split) # Default split is ''
+
+ print('component instance: ' + str(component_class_instance))
+
+ executor = component_class_instance.executor_spec.executor_class()
+ executor.Do(
+ input_dict=input_dict,
+ output_dict=output_dict,
+ exec_properties=exec_properties,
+ )
+ #return (output_path,)
+
+ def _deserialize_bool(s) -> bool:
+ from distutils.util import strtobool
+ return strtobool(s) == 1
+
+ import argparse
+ _parser = argparse.ArgumentParser(prog='Schemagen', description="Constructs a SchemaGen component.\n\n Args:\n stats: A Channel of `ExampleStatistics` type (required if spec is not\n passed). This should contain at least a `train` split. Other splits are\n currently ignored.\n # Exactly one of 'stats'/'statistics' or 'schema' is required.\n #schema: A Channel of `Schema` type that provides an instance of Schema.\n # If provided, pass through this schema artifact as the output. Exactly\n # one of 'stats'/'statistics' or 'schema' is required.\n infer_feature_shape: Boolean value indicating whether or not to infer the\n shape of features. If the feature shape is not inferred, downstream\n Tensorflow Transform component using the schema will parse input\n as tf.SparseTensor.\n #statistics: Future replacement of the 'stats' argument.\n #Either `statistics` or `stats` must be present in the input arguments.\n Returns:\n output: Output `Schema` channel for schema result.\n")
+ _parser.add_argument("--stats", dest="stats_path", type=str, required=True, default=argparse.SUPPRESS)
+ _parser.add_argument("--infer-feature-shape", dest="infer_feature_shape", type=_deserialize_bool, required=False, default=argparse.SUPPRESS)
+ _parser.add_argument("--output", dest="output_path", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS)
+ _parsed_args = vars(_parser.parse_args())
+ _output_files = _parsed_args.pop("_output_paths", [])
+
+ _outputs = SchemaGen(**_parsed_args)
+
+ if not hasattr(_outputs, '__getitem__') or isinstance(_outputs, str):
+ _outputs = [_outputs]
+
+ _output_serializers = [
+
+ ]
+
+ import os
+ for idx, output_file in enumerate(_output_files):
+ try:
+ os.makedirs(os.path.dirname(output_file))
+ except OSError:
+ pass
+ with open(output_file, 'w') as f:
+ f.write(_output_serializers[idx](_outputs[idx]))
+ args:
+ - --stats
+ - {inputPath: stats}
+ - if:
+ cond: {isPresent: infer_feature_shape}
+ then:
+ - --infer-feature-shape
+ - {inputValue: infer_feature_shape}
+ - --output
+ - {outputPath: output}
diff --git a/components/tfx/StatisticsGen/component.py b/components/tfx/StatisticsGen/component.py
new file mode 100644
index 00000000000..acdffed8eab
--- /dev/null
+++ b/components/tfx/StatisticsGen/component.py
@@ -0,0 +1,77 @@
+from kfp.components import InputPath, OutputPath
+
+
+def StatisticsGen(
+ # Inputs
+ input_data_path: InputPath('Examples'),
+ #input_data_path: 'ExamplesPath',
+
+ # Outputs
+ output_path: OutputPath('ExampleStatistics'),
+ #output_path: 'ExampleStatistics',
+):
+#) -> NamedTuple('Outputs', [
+# ('output', 'ExampleStatistics'),
+#]):
+ """Construct a StatisticsGen component.
+
+ Args:
+ input_data: A Channel of `Examples` type, likely generated by the
+ [ExampleGen component](https://www.tensorflow.org/tfx/guide/examplegen).
+ This needs to contain two splits labeled `train` and `eval`. _required_
+ # examples: Forwards compatibility alias for the `input_data` argument.
+ Returns:
+ output: `ExampleStatistics` channel for statistics of each split
+ provided in the input examples.
+ """
+
+ import json
+ import os
+ from google.protobuf import json_format
+ from tfx.types import standard_artifacts
+ from tfx.types import channel_utils
+
+ # Create input dict.
+ input_base_path = input_data_path
+ input_artifact_class = standard_artifacts.Examples
+ # Recovering splits
+ splits = sorted(os.listdir(input_data_path))
+ input_data_artifacts = []
+ for split in splits:
+ artifact = input_artifact_class()
+ artifact.split = split
+ artifact.uri = os.path.join(input_base_path, split) + '/'
+ input_data_artifacts.append(artifact)
+ input_data_channel = channel_utils.as_channel(input_data_artifacts)
+
+ from tfx.components.statistics_gen.component import StatisticsGen
+ component_class_instance = StatisticsGen(
+ input_data=input_data_channel,
+ )
+
+ input_dict = {name: channel.get() for name, channel in component_class_instance.inputs.get_all().items()}
+ output_dict = {name: channel.get() for name, channel in component_class_instance.outputs.get_all().items()}
+ exec_properties = component_class_instance.exec_properties
+
+ # Generating paths for output artifacts
+ for output_artifact in output_dict['output']:
+ output_artifact.uri = os.path.join(output_path, output_artifact.split) # Default split is ''
+
+ print('Component instance: ' + str(component_class_instance))
+
+ executor = component_class_instance.executor_spec.executor_class()
+ executor.Do(
+ input_dict=input_dict,
+ output_dict=output_dict,
+ exec_properties=exec_properties,
+ )
+ #return (output_path,)
+
+
+if __name__ == '__main__':
+ import kfp
+ kfp.components.func_to_container_op(
+ StatisticsGen,
+ base_image='tensorflow/tfx:0.15.0',
+ output_component_file='component.yaml'
+ )
diff --git a/components/tfx/StatisticsGen/component.yaml b/components/tfx/StatisticsGen/component.yaml
new file mode 100644
index 00000000000..5d21d7d3d0f
--- /dev/null
+++ b/components/tfx/StatisticsGen/component.yaml
@@ -0,0 +1,136 @@
+name: Statisticsgen
+inputs:
+- name: input_data
+ type: Examples
+outputs:
+- name: output
+ type: ExampleStatistics
+description: |
+ Construct a StatisticsGen component.
+
+ Args:
+ input_data: A Channel of `ExamplesPath` type, likely generated by the
+ [ExampleGen component](https://www.tensorflow.org/tfx/guide/examplegen).
+ This needs to contain two splits labeled `train` and `eval`. _required_
+ # examples: Forwards compatibility alias for the `input_data` argument.
+ Returns:
+ output: `ExampleStatistics` channel for statistics of each split
+ provided in the input examples.
+implementation:
+ container:
+ image: tensorflow/tfx:0.15.0
+ command:
+ - python3
+ - -u
+ - -c
+ - |
+ class OutputPath:
+ '''When creating component from function, OutputPath should be used as function parameter annotation to tell the system that the function wants to output data by writing it into a file with the given path instead of returning the data from the function.'''
+ def __init__(self, type=None):
+ self.type = type
+
+ class InputPath:
+ '''When creating component from function, InputPath should be used as function parameter annotation to tell the system to pass the *data file path* to the function instead of passing the actual data.'''
+ def __init__(self, type=None):
+ self.type = type
+
+ def _make_parent_dirs_and_return_path(file_path: str):
+ import os
+ os.makedirs(os.path.dirname(file_path), exist_ok=True)
+ return file_path
+
+ def StatisticsGen(
+ # Inputs
+ input_data_path: InputPath('ExamplesPath'),
+ #input_data_path: 'ExamplesPath',
+
+ # Outputs
+ output_path: OutputPath('ExampleStatistics'),
+ #output_path: 'ExampleStatistics',
+ ):
+ #) -> NamedTuple('Outputs', [
+ # ('output', 'ExampleStatistics'),
+ #]):
+ """Construct a StatisticsGen component.
+
+ Args:
+ input_data: A Channel of `ExamplesPath` type, likely generated by the
+ [ExampleGen component](https://www.tensorflow.org/tfx/guide/examplegen).
+ This needs to contain two splits labeled `train` and `eval`. _required_
+ # examples: Forwards compatibility alias for the `input_data` argument.
+ Returns:
+ output: `ExampleStatistics` channel for statistics of each split
+ provided in the input examples.
+ """
+
+ import json
+ import os
+ from google.protobuf import json_format
+ from tfx.types import standard_artifacts
+ from tfx.types import channel_utils
+
+ # Create input dict.
+ input_base_path = input_data_path
+ input_artifact_class = standard_artifacts.Examples
+ # Recovering splits
+ splits = sorted(os.listdir(input_data_path))
+ input_data_artifacts = []
+ for split in splits:
+ artifact = input_artifact_class()
+ artifact.split = split
+ artifact.uri = os.path.join(input_base_path, split) + '/'
+ input_data_artifacts.append(artifact)
+ input_data_channel = channel_utils.as_channel(input_data_artifacts)
+
+ from tfx.components.statistics_gen.component import StatisticsGen
+ component_class_instance = StatisticsGen(
+ input_data=input_data_channel,
+ )
+
+ input_dict = {name: channel.get() for name, channel in component_class_instance.inputs.get_all().items()}
+ output_dict = {name: channel.get() for name, channel in component_class_instance.outputs.get_all().items()}
+ exec_properties = component_class_instance.exec_properties
+
+ # Generating paths for output artifacts
+ for output_artifact in output_dict['output']:
+ output_artifact.uri = os.path.join(output_path, output_artifact.split) # Default split is ''
+
+ print('Component instance: ' + str(component_class_instance))
+
+ executor = component_class_instance.executor_spec.executor_class()
+ executor.Do(
+ input_dict=input_dict,
+ output_dict=output_dict,
+ exec_properties=exec_properties,
+ )
+ #return (output_path,)
+
+ import argparse
+ _parser = argparse.ArgumentParser(prog='Statisticsgen', description='Construct a StatisticsGen component.\n\n Args:\n input_data: A Channel of `ExamplesPath` type, likely generated by the\n [ExampleGen component](https://www.tensorflow.org/tfx/guide/examplegen).\n This needs to contain two splits labeled `train` and `eval`. _required_\n # examples: Forwards compatibility alias for the `input_data` argument.\n Returns:\n output: `ExampleStatistics` channel for statistics of each split\n provided in the input examples.\n')
+ _parser.add_argument("--input-data", dest="input_data_path", type=str, required=True, default=argparse.SUPPRESS)
+ _parser.add_argument("--output", dest="output_path", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS)
+ _parsed_args = vars(_parser.parse_args())
+ _output_files = _parsed_args.pop("_output_paths", [])
+
+ _outputs = StatisticsGen(**_parsed_args)
+
+ if not hasattr(_outputs, '__getitem__') or isinstance(_outputs, str):
+ _outputs = [_outputs]
+
+ _output_serializers = [
+
+ ]
+
+ import os
+ for idx, output_file in enumerate(_output_files):
+ try:
+ os.makedirs(os.path.dirname(output_file))
+ except OSError:
+ pass
+ with open(output_file, 'w') as f:
+ f.write(_output_serializers[idx](_outputs[idx]))
+ args:
+ - --input-data
+ - inputPath: input_data
+ - --output
+ - outputPath: output
diff --git a/components/tfx/Trainer/component.py b/components/tfx/Trainer/component.py
new file mode 100644
index 00000000000..d5cc637aa5f
--- /dev/null
+++ b/components/tfx/Trainer/component.py
@@ -0,0 +1,173 @@
+# flake8: noqa TODO
+
+from kfp.components import InputPath, OutputPath
+
+
+def Trainer(
+ examples_path: InputPath('Examples'),
+ transform_output_path: InputPath('TransformGraph'), # ? = None
+ #transform_graph_path: InputPath('TransformGraph'),
+ schema_path: InputPath('Schema'),
+
+ output_path: OutputPath('Model'),
+
+ module_file: str = None,
+ trainer_fn: str = None,
+ train_args: 'JsonObject: tfx.proto.trainer_pb2.TrainArgs' = None,
+ eval_args: 'JsonObject: tfx.proto.trainer_pb2.EvalArgs' = None,
+ #custom_config: dict = None,
+ #custom_executor_spec: Optional[executor_spec.ExecutorSpec] = None,
+):
+ """
+ A TFX component to train a TensorFlow model.
+
+ The Trainer component is used to train and eval a model using given inputs and
+ a user-supplied estimator. This component includes a custom driver to
+ optionally grab previous model to warm start from.
+
+ ## Providing an estimator
+ The TFX executor will use the estimator provided in the `module_file` file
+ to train the model. The Trainer executor will look specifically for the
+ `trainer_fn()` function within that file. Before training, the executor will
+ call that function expecting the following returned as a dictionary:
+
+ - estimator: The
+ [estimator](https://www.tensorflow.org/api_docs/python/tf/estimator/Estimator)
+ to be used by TensorFlow to train the model.
+ - train_spec: The
+ [configuration](https://www.tensorflow.org/api_docs/python/tf/estimator/TrainSpec)
+ to be used by the "train" part of the TensorFlow `train_and_evaluate()`
+ call.
+ - eval_spec: The
+ [configuration](https://www.tensorflow.org/api_docs/python/tf/estimator/EvalSpec)
+ to be used by the "eval" part of the TensorFlow `train_and_evaluate()` call.
+ - eval_input_receiver_fn: The
+ [configuration](https://www.tensorflow.org/tfx/model_analysis/get_started#modify_an_existing_model)
+ to be used
+ by the [ModelValidator](https://www.tensorflow.org/tfx/guide/modelval)
+ component when validating the model.
+
+ An example of `trainer_fn()` can be found in the [user-supplied
+ code]((https://github.com/tensorflow/tfx/blob/master/tfx/examples/chicago_taxi_pipeline/taxi_utils.py))
+ of the TFX Chicago Taxi pipeline example.
+
+
+ Args:
+ examples: A Channel of 'ExamplesPath' type, serving as the source of
+ examples that are used in training (required). May be raw or
+ transformed.
+ transform_output: An optional Channel of 'TransformPath' type, serving as
+ the input transform graph if present.
+ #transform_graph: Forwards compatibility alias for the 'transform_output'
+ # argument.
+ schema: A Channel of 'SchemaPath' type, serving as the schema of training
+ and eval data.
+ module_file: A path to python module file containing UDF model definition.
+ The module_file must implement a function named `trainer_fn` at its
+ top level. The function must have the following signature.
+
+ def trainer_fn(tf.contrib.training.HParams,
+ tensorflow_metadata.proto.v0.schema_pb2) -> Dict:
+ ...
+
+ where the returned Dict has the following key-values.
+ 'estimator': an instance of tf.estimator.Estimator
+ 'train_spec': an instance of tf.estimator.TrainSpec
+ 'eval_spec': an instance of tf.estimator.EvalSpec
+ 'eval_input_receiver_fn': an instance of tfma.export.EvalInputReceiver
+
+ Exactly one of 'module_file' or 'trainer_fn' must be supplied.
+ trainer_fn: A python path to UDF model definition function. See
+ 'module_file' for the required signature of the UDF.
+ Exactly one of 'module_file' or 'trainer_fn' must be supplied.
+ train_args: A trainer_pb2.TrainArgs instance, containing args used for
+ training. Current only num_steps is available.
+ eval_args: A trainer_pb2.EvalArgs instance, containing args used for eval.
+ Current only num_steps is available.
+ #custom_config: A dict which contains the training job parameters to be
+ # passed to Google Cloud ML Engine. For the full set of parameters
+ # supported by Google Cloud ML Engine, refer to
+ # https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#Job
+ #custom_executor_spec: Optional custom executor spec.
+ Returns:
+ output: Optional 'ModelExportPath' channel for result of exported models.
+ Raises:
+ ValueError:
+ - When both or neither of 'module_file' and 'trainer_fn' is supplied.
+ - When both or neither of 'examples' and 'transformed_examples'
+ is supplied.
+ - When 'transformed_examples' is supplied but 'transform_output'
+ is not supplied.
+ """
+ from tfx.components.trainer.component import Trainer
+ component_class = Trainer
+ input_channels_with_splits = {'examples'}
+ output_channels_with_splits = {}
+
+
+ import json
+ import os
+ from google.protobuf import json_format, message
+ from tfx.types import Artifact, channel_utils
+
+ arguments = locals().copy()
+
+ component_class_args = {}
+
+ for name, execution_parameter in component_class.SPEC_CLASS.PARAMETERS.items():
+ argument_value_obj = argument_value = arguments.get(name, None)
+ if argument_value is None:
+ continue
+ parameter_type = execution_parameter.type
+ if isinstance(parameter_type, type) and issubclass(parameter_type, message.Message): # execution_parameter.type can also be a tuple
+ argument_value_obj = parameter_type()
+ json_format.Parse(argument_value, argument_value_obj)
+ component_class_args[name] = argument_value_obj
+
+ for name, channel_parameter in component_class.SPEC_CLASS.INPUTS.items():
+ artifact_path = arguments[name + '_path']
+ artifacts = []
+ if name in input_channels_with_splits:
+ # Recovering splits
+ splits = sorted(os.listdir(artifact_path))
+ for split in splits:
+ artifact = Artifact(type_name=channel_parameter.type_name)
+ artifact.split = split
+ artifact.uri = os.path.join(artifact_path, split) + '/'
+ artifacts.append(artifact)
+ else:
+ artifact = Artifact(type_name=channel_parameter.type_name)
+ artifact.uri = artifact_path + '/' # ?
+ artifacts.append(artifact)
+ component_class_args[name] = channel_utils.as_channel(artifacts)
+
+ component_class_instance = component_class(**component_class_args)
+
+ input_dict = {name: channel.get() for name, channel in component_class_instance.inputs.get_all().items()}
+ output_dict = {name: channel.get() for name, channel in component_class_instance.outputs.get_all().items()}
+ exec_properties = component_class_instance.exec_properties
+
+ # Generating paths for output artifacts
+ for name, artifacts in output_dict.items():
+ base_artifact_path = arguments[name + '_path']
+ for artifact in artifacts:
+ artifact.uri = os.path.join(base_artifact_path, artifact.split) # Default split is ''
+
+ print('component instance: ' + str(component_class_instance))
+
+ #executor = component_class.EXECUTOR_SPEC.executor_class() # Same
+ executor = component_class_instance.executor_spec.executor_class()
+ executor.Do(
+ input_dict=input_dict,
+ output_dict=output_dict,
+ exec_properties=exec_properties,
+ )
+
+
+if __name__ == '__main__':
+ import kfp
+ kfp.components.func_to_container_op(
+ Trainer,
+ base_image='tensorflow/tfx:0.15.0',
+ output_component_file='component.yaml'
+ )
diff --git a/components/tfx/Trainer/component.yaml b/components/tfx/Trainer/component.yaml
new file mode 100644
index 00000000000..0e45ee12ceb
--- /dev/null
+++ b/components/tfx/Trainer/component.yaml
@@ -0,0 +1,347 @@
+name: Trainer
+description: |
+ A TFX component to train a TensorFlow model.
+
+ The Trainer component is used to train and eval a model using given inputs and
+ a user-supplied estimator. This component includes a custom driver to
+ optionally grab previous model to warm start from.
+
+ ## Providing an estimator
+ The TFX executor will use the estimator provided in the `module_file` file
+ to train the model. The Trainer executor will look specifically for the
+ `trainer_fn()` function within that file. Before training, the executor will
+ call that function expecting the following returned as a dictionary:
+
+ - estimator: The
+ [estimator](https://www.tensorflow.org/api_docs/python/tf/estimator/Estimator)
+ to be used by TensorFlow to train the model.
+ - train_spec: The
+ [configuration](https://www.tensorflow.org/api_docs/python/tf/estimator/TrainSpec)
+ to be used by the "train" part of the TensorFlow `train_and_evaluate()`
+ call.
+ - eval_spec: The
+ [configuration](https://www.tensorflow.org/api_docs/python/tf/estimator/EvalSpec)
+ to be used by the "eval" part of the TensorFlow `train_and_evaluate()` call.
+ - eval_input_receiver_fn: The
+ [configuration](https://www.tensorflow.org/tfx/model_analysis/get_started#modify_an_existing_model)
+ to be used
+ by the [ModelValidator](https://www.tensorflow.org/tfx/guide/modelval)
+ component when validating the model.
+
+ An example of `trainer_fn()` can be found in the [user-supplied
+ code]((https://github.com/tensorflow/tfx/blob/master/tfx/examples/chicago_taxi_pipeline/taxi_utils.py))
+ of the TFX Chicago Taxi pipeline example.
+
+
+ Args:
+ examples: A Channel of 'ExamplesPath' type, serving as the source of
+ examples that are used in training (required). May be raw or
+ transformed.
+ transform_output: An optional Channel of 'TransformPath' type, serving as
+ the input transform graph if present.
+ #transform_graph: Forwards compatibility alias for the 'transform_output'
+ # argument.
+ schema: A Channel of 'SchemaPath' type, serving as the schema of training
+ and eval data.
+ module_file: A path to python module file containing UDF model definition.
+ The module_file must implement a function named `trainer_fn` at its
+ top level. The function must have the following signature.
+
+ def trainer_fn(tf.contrib.training.HParams,
+ tensorflow_metadata.proto.v0.schema_pb2) -> Dict:
+ ...
+
+ where the returned Dict has the following key-values.
+ 'estimator': an instance of tf.estimator.Estimator
+ 'train_spec': an instance of tf.estimator.TrainSpec
+ 'eval_spec': an instance of tf.estimator.EvalSpec
+ 'eval_input_receiver_fn': an instance of tfma.export.EvalInputReceiver
+
+ Exactly one of 'module_file' or 'trainer_fn' must be supplied.
+ trainer_fn: A python path to UDF model definition function. See
+ 'module_file' for the required signature of the UDF.
+ Exactly one of 'module_file' or 'trainer_fn' must be supplied.
+ train_args: A trainer_pb2.TrainArgs instance, containing args used for
+ training. Current only num_steps is available.
+ eval_args: A trainer_pb2.EvalArgs instance, containing args used for eval.
+ Current only num_steps is available.
+ #custom_config: A dict which contains the training job parameters to be
+ # passed to Google Cloud ML Engine. For the full set of parameters
+ # supported by Google Cloud ML Engine, refer to
+ # https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#Job
+ #custom_executor_spec: Optional custom executor spec.
+ Returns:
+ output: Optional 'ModelExportPath' channel for result of exported models.
+ Raises:
+ ValueError:
+ - When both or neither of 'module_file' and 'trainer_fn' is supplied.
+ - When both or neither of 'examples' and 'transformed_examples'
+ is supplied.
+ - When 'transformed_examples' is supplied but 'transform_output'
+ is not supplied.
+inputs:
+- name: examples
+ type: Examples
+- name: transform_output
+ type: TransformGraph
+- name: schema
+ type: Schema
+- name: module_file
+ type: String
+ optional: true
+- name: trainer_fn
+ type: String
+ optional: true
+- name: train_args
+ type: 'JsonObject: tfx.proto.trainer_pb2.TrainArgs'
+ optional: true
+- name: eval_args
+ type: 'JsonObject: tfx.proto.trainer_pb2.EvalArgs'
+ optional: true
+outputs:
+- name: output
+ type: Model
+implementation:
+ container:
+ image: tensorflow/tfx:0.15.0
+ command:
+ - python3
+ - -u
+ - -c
+ - |
+ class OutputPath:
+ '''When creating component from function, OutputPath should be used as function parameter annotation to tell the system that the function wants to output data by writing it into a file with the given path instead of returning the data from the function.'''
+ def __init__(self, type=None):
+ self.type = type
+
+ class InputPath:
+ '''When creating component from function, InputPath should be used as function parameter annotation to tell the system to pass the *data file path* to the function instead of passing the actual data.'''
+ def __init__(self, type=None):
+ self.type = type
+
+ def _make_parent_dirs_and_return_path(file_path: str):
+ import os
+ os.makedirs(os.path.dirname(file_path), exist_ok=True)
+ return file_path
+
+ def Trainer(
+ examples_path: InputPath('Examples'),
+ transform_output_path: InputPath('TransformGraph'), # ? = None
+ #transform_graph_path: InputPath('TransformGraph'),
+ schema_path: InputPath('Schema'),
+
+ output_path: OutputPath('Model'),
+
+ module_file: str = None,
+ trainer_fn: str = None,
+ train_args: 'JsonObject: tfx.proto.trainer_pb2.TrainArgs' = None,
+ eval_args: 'JsonObject: tfx.proto.trainer_pb2.EvalArgs' = None,
+ #custom_config: dict = None,
+ #custom_executor_spec: Optional[executor_spec.ExecutorSpec] = None,
+ ):
+ """
+ A TFX component to train a TensorFlow model.
+
+ The Trainer component is used to train and eval a model using given inputs and
+ a user-supplied estimator. This component includes a custom driver to
+ optionally grab previous model to warm start from.
+
+ ## Providing an estimator
+ The TFX executor will use the estimator provided in the `module_file` file
+ to train the model. The Trainer executor will look specifically for the
+ `trainer_fn()` function within that file. Before training, the executor will
+ call that function expecting the following returned as a dictionary:
+
+ - estimator: The
+ [estimator](https://www.tensorflow.org/api_docs/python/tf/estimator/Estimator)
+ to be used by TensorFlow to train the model.
+ - train_spec: The
+ [configuration](https://www.tensorflow.org/api_docs/python/tf/estimator/TrainSpec)
+ to be used by the "train" part of the TensorFlow `train_and_evaluate()`
+ call.
+ - eval_spec: The
+ [configuration](https://www.tensorflow.org/api_docs/python/tf/estimator/EvalSpec)
+ to be used by the "eval" part of the TensorFlow `train_and_evaluate()` call.
+ - eval_input_receiver_fn: The
+ [configuration](https://www.tensorflow.org/tfx/model_analysis/get_started#modify_an_existing_model)
+ to be used
+ by the [ModelValidator](https://www.tensorflow.org/tfx/guide/modelval)
+ component when validating the model.
+
+ An example of `trainer_fn()` can be found in the [user-supplied
+ code]((https://github.com/tensorflow/tfx/blob/master/tfx/examples/chicago_taxi_pipeline/taxi_utils.py))
+ of the TFX Chicago Taxi pipeline example.
+
+ Args:
+ examples: A Channel of 'ExamplesPath' type, serving as the source of
+ examples that are used in training (required). May be raw or
+ transformed.
+ transform_output: An optional Channel of 'TransformPath' type, serving as
+ the input transform graph if present.
+ #transform_graph: Forwards compatibility alias for the 'transform_output'
+ # argument.
+ schema: A Channel of 'SchemaPath' type, serving as the schema of training
+ and eval data.
+ module_file: A path to python module file containing UDF model definition.
+ The module_file must implement a function named `trainer_fn` at its
+ top level. The function must have the following signature.
+
+ def trainer_fn(tf.contrib.training.HParams,
+ tensorflow_metadata.proto.v0.schema_pb2) -> Dict:
+ ...
+
+ where the returned Dict has the following key-values.
+ 'estimator': an instance of tf.estimator.Estimator
+ 'train_spec': an instance of tf.estimator.TrainSpec
+ 'eval_spec': an instance of tf.estimator.EvalSpec
+ 'eval_input_receiver_fn': an instance of tfma.export.EvalInputReceiver
+
+ Exactly one of 'module_file' or 'trainer_fn' must be supplied.
+ trainer_fn: A python path to UDF model definition function. See
+ 'module_file' for the required signature of the UDF.
+ Exactly one of 'module_file' or 'trainer_fn' must be supplied.
+ train_args: A trainer_pb2.TrainArgs instance, containing args used for
+ training. Current only num_steps is available.
+ eval_args: A trainer_pb2.EvalArgs instance, containing args used for eval.
+ Current only num_steps is available.
+ #custom_config: A dict which contains the training job parameters to be
+ # passed to Google Cloud ML Engine. For the full set of parameters
+ # supported by Google Cloud ML Engine, refer to
+ # https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#Job
+ #custom_executor_spec: Optional custom executor spec.
+ Returns:
+ output: Optional 'ModelExportPath' channel for result of exported models.
+ Raises:
+ ValueError:
+ - When both or neither of 'module_file' and 'trainer_fn' is supplied.
+ - When both or neither of 'examples' and 'transformed_examples'
+ is supplied.
+ - When 'transformed_examples' is supplied but 'transform_output'
+ is not supplied.
+ """
+ from tfx.components.trainer.component import Trainer
+ component_class = Trainer
+ input_channels_with_splits = {'examples'}
+ output_channels_with_splits = {}
+
+ import json
+ import os
+ from google.protobuf import json_format, message
+ from tfx.types import Artifact, channel_utils
+
+ arguments = locals().copy()
+
+ component_class_args = {}
+
+ for name, execution_parameter in component_class.SPEC_CLASS.PARAMETERS.items():
+ argument_value_obj = argument_value = arguments.get(name, None)
+ if argument_value is None:
+ continue
+ parameter_type = execution_parameter.type
+ if isinstance(parameter_type, type) and issubclass(parameter_type, message.Message): # execution_parameter.type can also be a tuple
+ argument_value_obj = parameter_type()
+ json_format.Parse(argument_value, argument_value_obj)
+ component_class_args[name] = argument_value_obj
+
+ for name, channel_parameter in component_class.SPEC_CLASS.INPUTS.items():
+ artifact_path = arguments[name + '_path']
+ artifacts = []
+ if name in input_channels_with_splits:
+ # Recovering splits
+ splits = sorted(os.listdir(artifact_path))
+ for split in splits:
+ artifact = Artifact(type_name=channel_parameter.type_name)
+ artifact.split = split
+ artifact.uri = os.path.join(artifact_path, split) + '/'
+ artifacts.append(artifact)
+ else:
+ artifact = Artifact(type_name=channel_parameter.type_name)
+ artifact.uri = artifact_path + '/' # ?
+ artifacts.append(artifact)
+ component_class_args[name] = channel_utils.as_channel(artifacts)
+
+ component_class_instance = component_class(**component_class_args)
+
+ input_dict = {name: channel.get() for name, channel in component_class_instance.inputs.get_all().items()}
+ output_dict = {name: channel.get() for name, channel in component_class_instance.outputs.get_all().items()}
+ exec_properties = component_class_instance.exec_properties
+
+ # Generating paths for output artifacts
+ for name, artifacts in output_dict.items():
+ base_artifact_path = arguments[name + '_path']
+ for artifact in artifacts:
+ artifact.uri = os.path.join(base_artifact_path, artifact.split) # Default split is ''
+
+ print('component instance: ' + str(component_class_instance))
+
+ #executor = component_class.EXECUTOR_SPEC.executor_class() # Same
+ executor = component_class_instance.executor_spec.executor_class()
+ executor.Do(
+ input_dict=input_dict,
+ output_dict=output_dict,
+ exec_properties=exec_properties,
+ )
+
+ import argparse
+ _parser = argparse.ArgumentParser(prog='Trainer', description='A TFX component to train a TensorFlow model.\n\n The Trainer component is used to train and eval a model using given inputs and\n a user-supplied estimator. This component includes a custom driver to\n optionally grab previous model to warm start from.\n\n ## Providing an estimator\n The TFX executor will use the estimator provided in the `module_file` file\n to train the model. The Trainer executor will look specifically for the\n `trainer_fn()` function within that file. Before training, the executor will\n call that function expecting the following returned as a dictionary:\n\n - estimator: The\n [estimator](https://www.tensorflow.org/api_docs/python/tf/estimator/Estimator)\n to be used by TensorFlow to train the model.\n - train_spec: The\n [configuration](https://www.tensorflow.org/api_docs/python/tf/estimator/TrainSpec)\n to be used by the "train" part of the TensorFlow `train_and_evaluate()`\n call.\n - eval_spec: The\n [configuration](https://www.tensorflow.org/api_docs/python/tf/estimator/EvalSpec)\n to be used by the "eval" part of the TensorFlow `train_and_evaluate()` call.\n - eval_input_receiver_fn: The\n [configuration](https://www.tensorflow.org/tfx/model_analysis/get_started#modify_an_existing_model)\n to be used\n by the [ModelValidator](https://www.tensorflow.org/tfx/guide/modelval)\n component when validating the model.\n\n An example of `trainer_fn()` can be found in the [user-supplied\n code]((https://github.com/tensorflow/tfx/blob/master/tfx/examples/chicago_taxi_pipeline/taxi_utils.py))\n of the TFX Chicago Taxi pipeline example.\n\n\n Args:\n examples: A Channel of \'ExamplesPath\' type, serving as the source of\n examples that are used in training (required). May be raw or\n transformed.\n transform_output: An optional Channel of \'TransformPath\' type, serving as\n the input transform graph if present.\n #transform_graph: Forwards compatibility alias for the \'transform_output\'\n # argument.\n schema: A Channel of \'SchemaPath\' type, serving as the schema of training\n and eval data.\n module_file: A path to python module file containing UDF model definition.\n The module_file must implement a function named `trainer_fn` at its\n top level. The function must have the following signature.\n\n def trainer_fn(tf.contrib.training.HParams,\n tensorflow_metadata.proto.v0.schema_pb2) -> Dict:\n ...\n\n where the returned Dict has the following key-values.\n \'estimator\': an instance of tf.estimator.Estimator\n \'train_spec\': an instance of tf.estimator.TrainSpec\n \'eval_spec\': an instance of tf.estimator.EvalSpec\n \'eval_input_receiver_fn\': an instance of tfma.export.EvalInputReceiver\n\n Exactly one of \'module_file\' or \'trainer_fn\' must be supplied.\n trainer_fn: A python path to UDF model definition function. See\n \'module_file\' for the required signature of the UDF.\n Exactly one of \'module_file\' or \'trainer_fn\' must be supplied.\n train_args: A trainer_pb2.TrainArgs instance, containing args used for\n training. Current only num_steps is available.\n eval_args: A trainer_pb2.EvalArgs instance, containing args used for eval.\n Current only num_steps is available.\n #custom_config: A dict which contains the training job parameters to be\n # passed to Google Cloud ML Engine. For the full set of parameters\n # supported by Google Cloud ML Engine, refer to\n # https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#Job\n #custom_executor_spec: Optional custom executor spec.\n Returns:\n output: Optional \'ModelExportPath\' channel for result of exported models.\n Raises:\n ValueError:\n - When both or neither of \'module_file\' and \'trainer_fn\' is supplied.\n - When both or neither of \'examples\' and \'transformed_examples\'\n is supplied.\n - When \'transformed_examples\' is supplied but \'transform_output\'\n is not supplied.\n')
+ _parser.add_argument("--examples", dest="examples_path", type=str, required=True, default=argparse.SUPPRESS)
+ _parser.add_argument("--transform-output", dest="transform_output_path", type=str, required=True, default=argparse.SUPPRESS)
+ _parser.add_argument("--schema", dest="schema_path", type=str, required=True, default=argparse.SUPPRESS)
+ _parser.add_argument("--module-file", dest="module_file", type=str, required=False, default=argparse.SUPPRESS)
+ _parser.add_argument("--trainer-fn", dest="trainer_fn", type=str, required=False, default=argparse.SUPPRESS)
+ _parser.add_argument("--train-args", dest="train_args", type=str, required=False, default=argparse.SUPPRESS)
+ _parser.add_argument("--eval-args", dest="eval_args", type=str, required=False, default=argparse.SUPPRESS)
+ _parser.add_argument("--output", dest="output_path", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS)
+ _parsed_args = vars(_parser.parse_args())
+ _output_files = _parsed_args.pop("_output_paths", [])
+
+ _outputs = Trainer(**_parsed_args)
+
+ if not hasattr(_outputs, '__getitem__') or isinstance(_outputs, str):
+ _outputs = [_outputs]
+
+ _output_serializers = [
+
+ ]
+
+ import os
+ for idx, output_file in enumerate(_output_files):
+ try:
+ os.makedirs(os.path.dirname(output_file))
+ except OSError:
+ pass
+ with open(output_file, 'w') as f:
+ f.write(_output_serializers[idx](_outputs[idx]))
+ args:
+ - --examples
+ - inputPath: examples
+ - --transform-output
+ - inputPath: transform_output
+ - --schema
+ - inputPath: schema
+ - if:
+ cond:
+ isPresent: module_file
+ then:
+ - --module-file
+ - inputValue: module_file
+ - if:
+ cond:
+ isPresent: trainer_fn
+ then:
+ - --trainer-fn
+ - inputValue: trainer_fn
+ - if:
+ cond:
+ isPresent: train_args
+ then:
+ - --train-args
+ - inputValue: train_args
+ - if:
+ cond:
+ isPresent: eval_args
+ then:
+ - --eval-args
+ - inputValue: eval_args
+ - --output
+ - outputPath: output
diff --git a/components/tfx/Transform/component.py b/components/tfx/Transform/component.py
new file mode 100644
index 00000000000..ed4d6300c4d
--- /dev/null
+++ b/components/tfx/Transform/component.py
@@ -0,0 +1,141 @@
+# flake8: noqa TODO
+
+from kfp.components import InputPath, OutputPath
+
+
+def Transform(
+ input_data_path: InputPath('Examples'),
+ #examples: InputPath('Examples'),
+ schema_path: InputPath('Schema'),
+
+ transform_output_path: OutputPath('TransformGraph'),
+ #transform_graph_path: OutputPath('TransformGraph'),
+ transformed_examples_path: OutputPath('Examples'),
+
+ module_file: 'Uri' = None,
+ preprocessing_fn: str = None,
+):
+ """A TFX component to transform the input examples.
+
+ The Transform component wraps TensorFlow Transform (tf.Transform) to
+ preprocess data in a TFX pipeline. This component will load the
+ preprocessing_fn from input module file, preprocess both 'train' and 'eval'
+ splits of input examples, generate the `tf.Transform` output, and save both
+ transform function and transformed examples to orchestrator desired locations.
+
+ ## Providing a preprocessing function
+ The TFX executor will use the estimator provided in the `module_file` file
+ to train the model. The Transform executor will look specifically for the
+ `preprocessing_fn()` function within that file.
+
+ An example of `preprocessing_fn()` can be found in the [user-supplied
+ code]((https://github.com/tensorflow/tfx/blob/master/tfx/examples/chicago_taxi_pipeline/taxi_utils.py))
+ of the TFX Chicago Taxi pipeline example.
+
+ Args:
+ input_data: A Channel of 'Examples' type (required). This should
+ contain the two splits 'train' and 'eval'.
+ #examples: Forwards compatibility alias for the 'input_data' argument.
+ schema: A Channel of 'SchemaPath' type. This should contain a single
+ schema artifact.
+ module_file: The file path to a python module file, from which the
+ 'preprocessing_fn' function will be loaded. The function must have the
+ following signature.
+
+ def preprocessing_fn(inputs: Dict[Text, Any]) -> Dict[Text, Any]:
+ ...
+
+ where the values of input and returned Dict are either tf.Tensor or
+ tf.SparseTensor. Exactly one of 'module_file' or 'preprocessing_fn'
+ must be supplied.
+ preprocessing_fn: The path to python function that implements a
+ 'preprocessing_fn'. See 'module_file' for expected signature of the
+ function. Exactly one of 'module_file' or 'preprocessing_fn' must
+ be supplied.
+
+ Returns:
+ transform_output: Optional output 'TransformPath' channel for output of
+ 'tf.Transform', which includes an exported Tensorflow graph suitable for
+ both training and serving;
+ transformed_examples: Optional output 'ExamplesPath' channel for
+ materialized transformed examples, which includes both 'train' and
+ 'eval' splits.
+
+ Raises:
+ ValueError: When both or neither of 'module_file' and 'preprocessing_fn'
+ is supplied.
+ """
+ from tfx.components.transform.component import Transform
+ component_class = Transform
+ input_channels_with_splits = {'input_data', 'examples'}
+ output_channels_with_splits = {'transformed_examples'}
+
+
+ import json
+ import os
+ import tfx
+ from google.protobuf import json_format, message
+ from tfx.types import Artifact, channel_utils
+
+ arguments = locals().copy()
+
+ component_class_args = {}
+
+ for name, execution_parameter in component_class.SPEC_CLASS.PARAMETERS.items():
+ argument_value_obj = argument_value = arguments.get(name, None)
+ if argument_value is None:
+ continue
+ parameter_type = execution_parameter.type
+ if isinstance(parameter_type, type) and issubclass(parameter_type, message.Message): # Maybe FIX: execution_parameter.type can also be a tuple
+ argument_value_obj = parameter_type()
+ json_format.Parse(argument_value, argument_value_obj)
+ component_class_args[name] = argument_value_obj
+
+ for name, channel_parameter in component_class.SPEC_CLASS.INPUTS.items():
+ artifact_path = arguments[name + '_path']
+ artifacts = []
+ if name in input_channels_with_splits:
+ # Recovering splits
+ splits = sorted(os.listdir(artifact_path))
+ for split in splits:
+ artifact = Artifact(type_name=channel_parameter.type_name)
+ artifact.split = split
+ artifact.uri = os.path.join(artifact_path, split) + '/'
+ artifacts.append(artifact)
+ else:
+ artifact = Artifact(type_name=channel_parameter.type_name)
+ artifact.uri = artifact_path + '/' # ?
+ artifacts.append(artifact)
+ component_class_args[name] = channel_utils.as_channel(artifacts)
+
+ component_class_instance = component_class(**component_class_args)
+
+ input_dict = {name: channel.get() for name, channel in component_class_instance.inputs.get_all().items()}
+ output_dict = {name: channel.get() for name, channel in component_class_instance.outputs.get_all().items()}
+ exec_properties = component_class_instance.exec_properties
+
+ # Generating paths for output artifacts
+ for name, artifacts in output_dict.items():
+ base_artifact_path = arguments[name + '_path']
+ for artifact in artifacts:
+ artifact.uri = os.path.join(base_artifact_path, artifact.split) # Default split is ''
+
+ print('component instance: ' + str(component_class_instance))
+
+ #executor = component_class.EXECUTOR_SPEC.executor_class() # Same
+ executor = component_class_instance.executor_spec.executor_class()
+ executor.Do(
+ input_dict=input_dict,
+ output_dict=output_dict,
+ exec_properties=exec_properties,
+ )
+
+
+
+if __name__ == '__main__':
+ import kfp
+ kfp.components.func_to_container_op(
+ Transform,
+ base_image='tensorflow/tfx:0.15.0',
+ output_component_file='component.yaml'
+ )
diff --git a/components/tfx/Transform/component.yaml b/components/tfx/Transform/component.yaml
new file mode 100644
index 00000000000..b3302d14387
--- /dev/null
+++ b/components/tfx/Transform/component.yaml
@@ -0,0 +1,264 @@
+name: Transform
+description: |
+ A TFX component to transform the input examples.
+
+ The Transform component wraps TensorFlow Transform (tf.Transform) to
+ preprocess data in a TFX pipeline. This component will load the
+ preprocessing_fn from input module file, preprocess both 'train' and 'eval'
+ splits of input examples, generate the `tf.Transform` output, and save both
+ transform function and transformed examples to orchestrator desired locations.
+
+ ## Providing a preprocessing function
+ The TFX executor will use the estimator provided in the `module_file` file
+ to train the model. The Transform executor will look specifically for the
+ `preprocessing_fn()` function within that file.
+
+ An example of `preprocessing_fn()` can be found in the [user-supplied
+ code]((https://github.com/tensorflow/tfx/blob/master/tfx/examples/chicago_taxi_pipeline/taxi_utils.py))
+ of the TFX Chicago Taxi pipeline example.
+
+ Args:
+ input_data: A Channel of 'Examples' type (required). This should
+ contain the two splits 'train' and 'eval'.
+ #examples: Forwards compatibility alias for the 'input_data' argument.
+ schema: A Channel of 'SchemaPath' type. This should contain a single
+ schema artifact.
+ module_file: The file path to a python module file, from which the
+ 'preprocessing_fn' function will be loaded. The function must have the
+ following signature.
+
+ def preprocessing_fn(inputs: Dict[Text, Any]) -> Dict[Text, Any]:
+ ...
+
+ where the values of input and returned Dict are either tf.Tensor or
+ tf.SparseTensor. Exactly one of 'module_file' or 'preprocessing_fn'
+ must be supplied.
+ preprocessing_fn: The path to python function that implements a
+ 'preprocessing_fn'. See 'module_file' for expected signature of the
+ function. Exactly one of 'module_file' or 'preprocessing_fn' must
+ be supplied.
+
+ Returns:
+ transform_output: Optional output 'TransformPath' channel for output of
+ 'tf.Transform', which includes an exported Tensorflow graph suitable for
+ both training and serving;
+ transformed_examples: Optional output 'ExamplesPath' channel for
+ materialized transformed examples, which includes both 'train' and
+ 'eval' splits.
+
+ Raises:
+ ValueError: When both or neither of 'module_file' and 'preprocessing_fn'
+ is supplied.
+inputs:
+- name: input_data
+ type: Examples
+- name: schema
+ type: Schema
+- name: module_file
+ type: Uri
+ optional: true
+- name: preprocessing_fn
+ type: String
+ optional: true
+outputs:
+- name: transform_output
+ type: TransformGraph
+- name: transformed_examples
+ type: Examples
+implementation:
+ container:
+ image: tensorflow/tfx:0.15.0
+ command:
+ - python3
+ - -u
+ - -c
+ - |
+ class OutputPath:
+ '''When creating component from function, OutputPath should be used as function parameter annotation to tell the system that the function wants to output data by writing it into a file with the given path instead of returning the data from the function.'''
+ def __init__(self, type=None):
+ self.type = type
+
+ class InputPath:
+ '''When creating component from function, InputPath should be used as function parameter annotation to tell the system to pass the *data file path* to the function instead of passing the actual data.'''
+ def __init__(self, type=None):
+ self.type = type
+
+ def _make_parent_dirs_and_return_path(file_path: str):
+ import os
+ os.makedirs(os.path.dirname(file_path), exist_ok=True)
+ return file_path
+
+ def Transform(
+ input_data_path: InputPath('Examples'),
+ #examples: InputPath('Examples'),
+ schema_path: InputPath('Schema'),
+
+ transform_output_path: OutputPath('TransformGraph'),
+ #transform_graph_path: OutputPath('TransformGraph'),
+ transformed_examples_path: OutputPath('Examples'),
+
+ module_file: 'Uri' = None,
+ preprocessing_fn: str = None,
+ ):
+ """A TFX component to transform the input examples.
+
+ The Transform component wraps TensorFlow Transform (tf.Transform) to
+ preprocess data in a TFX pipeline. This component will load the
+ preprocessing_fn from input module file, preprocess both 'train' and 'eval'
+ splits of input examples, generate the `tf.Transform` output, and save both
+ transform function and transformed examples to orchestrator desired locations.
+
+ ## Providing a preprocessing function
+ The TFX executor will use the estimator provided in the `module_file` file
+ to train the model. The Transform executor will look specifically for the
+ `preprocessing_fn()` function within that file.
+
+ An example of `preprocessing_fn()` can be found in the [user-supplied
+ code]((https://github.com/tensorflow/tfx/blob/master/tfx/examples/chicago_taxi_pipeline/taxi_utils.py))
+ of the TFX Chicago Taxi pipeline example.
+
+ Args:
+ input_data: A Channel of 'Examples' type (required). This should
+ contain the two splits 'train' and 'eval'.
+ #examples: Forwards compatibility alias for the 'input_data' argument.
+ schema: A Channel of 'SchemaPath' type. This should contain a single
+ schema artifact.
+ module_file: The file path to a python module file, from which the
+ 'preprocessing_fn' function will be loaded. The function must have the
+ following signature.
+
+ def preprocessing_fn(inputs: Dict[Text, Any]) -> Dict[Text, Any]:
+ ...
+
+ where the values of input and returned Dict are either tf.Tensor or
+ tf.SparseTensor. Exactly one of 'module_file' or 'preprocessing_fn'
+ must be supplied.
+ preprocessing_fn: The path to python function that implements a
+ 'preprocessing_fn'. See 'module_file' for expected signature of the
+ function. Exactly one of 'module_file' or 'preprocessing_fn' must
+ be supplied.
+
+ Returns:
+ transform_output: Optional output 'TransformPath' channel for output of
+ 'tf.Transform', which includes an exported Tensorflow graph suitable for
+ both training and serving;
+ transformed_examples: Optional output 'ExamplesPath' channel for
+ materialized transformed examples, which includes both 'train' and
+ 'eval' splits.
+
+ Raises:
+ ValueError: When both or neither of 'module_file' and 'preprocessing_fn'
+ is supplied.
+ """
+ from tfx.components.transform.component import Transform
+ component_class = Transform
+ input_channels_with_splits = {'input_data', 'examples'}
+ output_channels_with_splits = {'transformed_examples'}
+
+ import json
+ import os
+ from google.protobuf import json_format, message
+ from tfx.types import Artifact, channel_utils
+
+ arguments = locals().copy()
+
+ component_class_args = {}
+
+ for name, execution_parameter in component_class.SPEC_CLASS.PARAMETERS.items():
+ argument_value_obj = argument_value = arguments.get(name, None)
+ if argument_value is None:
+ continue
+ parameter_type = execution_parameter.type
+ if isinstance(parameter_type, type) and issubclass(parameter_type, message.Message): # execution_parameter.type can also be a tuple
+ argument_value_obj = parameter_type()
+ json_format.Parse(argument_value, argument_value_obj)
+ component_class_args[name] = argument_value_obj
+
+ for name, channel_parameter in component_class.SPEC_CLASS.INPUTS.items():
+ artifact_path = arguments[name + '_path']
+ artifacts = []
+ if name in input_channels_with_splits:
+ # Recovering splits
+ splits = sorted(os.listdir(artifact_path))
+ for split in splits:
+ artifact = Artifact(type_name=channel_parameter.type_name)
+ artifact.split = split
+ artifact.uri = os.path.join(artifact_path, split) + '/'
+ artifacts.append(artifact)
+ else:
+ artifact = Artifact(type_name=channel_parameter.type_name)
+ artifact.uri = artifact_path + '/' # ?
+ artifacts.append(artifact)
+ component_class_args[name] = channel_utils.as_channel(artifacts)
+
+ component_class_instance = component_class(**component_class_args)
+
+ input_dict = {name: channel.get() for name, channel in component_class_instance.inputs.get_all().items()}
+ output_dict = {name: channel.get() for name, channel in component_class_instance.outputs.get_all().items()}
+ exec_properties = component_class_instance.exec_properties
+
+ # Generating paths for output artifacts
+ for name, artifacts in output_dict.items():
+ base_artifact_path = arguments[name + '_path']
+ for artifact in artifacts:
+ artifact.uri = os.path.join(base_artifact_path, artifact.split) # Default split is ''
+
+ print('component instance: ' + str(component_class_instance))
+
+ #executor = component_class.EXECUTOR_SPEC.executor_class() # Same
+ executor = component_class_instance.executor_spec.executor_class()
+ executor.Do(
+ input_dict=input_dict,
+ output_dict=output_dict,
+ exec_properties=exec_properties,
+ )
+
+ import argparse
+ _parser = argparse.ArgumentParser(prog='Transform', description="A TFX component to transform the input examples.\n\n The Transform component wraps TensorFlow Transform (tf.Transform) to\n preprocess data in a TFX pipeline. This component will load the\n preprocessing_fn from input module file, preprocess both 'train' and 'eval'\n splits of input examples, generate the `tf.Transform` output, and save both\n transform function and transformed examples to orchestrator desired locations.\n\n ## Providing a preprocessing function\n The TFX executor will use the estimator provided in the `module_file` file\n to train the model. The Transform executor will look specifically for the\n `preprocessing_fn()` function within that file.\n\n An example of `preprocessing_fn()` can be found in the [user-supplied\n code]((https://github.com/tensorflow/tfx/blob/master/tfx/examples/chicago_taxi_pipeline/taxi_utils.py))\n of the TFX Chicago Taxi pipeline example.\n\n Args:\n input_data: A Channel of 'Examples' type (required). This should\n contain the two splits 'train' and 'eval'.\n #examples: Forwards compatibility alias for the 'input_data' argument.\n schema: A Channel of 'SchemaPath' type. This should contain a single\n schema artifact.\n module_file: The file path to a python module file, from which the\n 'preprocessing_fn' function will be loaded. The function must have the\n following signature.\n\n def preprocessing_fn(inputs: Dict[Text, Any]) -> Dict[Text, Any]:\n ...\n\n where the values of input and returned Dict are either tf.Tensor or\n tf.SparseTensor. Exactly one of 'module_file' or 'preprocessing_fn'\n must be supplied.\n preprocessing_fn: The path to python function that implements a\n 'preprocessing_fn'. See 'module_file' for expected signature of the\n function. Exactly one of 'module_file' or 'preprocessing_fn' must\n be supplied.\n\n Returns:\n transform_output: Optional output 'TransformPath' channel for output of\n 'tf.Transform', which includes an exported Tensorflow graph suitable for\n both training and serving;\n transformed_examples: Optional output 'ExamplesPath' channel for\n materialized transformed examples, which includes both 'train' and\n 'eval' splits.\n\n Raises:\n ValueError: When both or neither of 'module_file' and 'preprocessing_fn'\n is supplied.\n")
+ _parser.add_argument("--input-data", dest="input_data_path", type=str, required=True, default=argparse.SUPPRESS)
+ _parser.add_argument("--schema", dest="schema_path", type=str, required=True, default=argparse.SUPPRESS)
+ _parser.add_argument("--module-file", dest="module_file", type=str, required=False, default=argparse.SUPPRESS)
+ _parser.add_argument("--preprocessing-fn", dest="preprocessing_fn", type=str, required=False, default=argparse.SUPPRESS)
+ _parser.add_argument("--transform-output", dest="transform_output_path", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS)
+ _parser.add_argument("--transformed-examples", dest="transformed_examples_path", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS)
+ _parsed_args = vars(_parser.parse_args())
+ _output_files = _parsed_args.pop("_output_paths", [])
+
+ _outputs = Transform(**_parsed_args)
+
+ if not hasattr(_outputs, '__getitem__') or isinstance(_outputs, str):
+ _outputs = [_outputs]
+
+ _output_serializers = [
+
+ ]
+
+ import os
+ for idx, output_file in enumerate(_output_files):
+ try:
+ os.makedirs(os.path.dirname(output_file))
+ except OSError:
+ pass
+ with open(output_file, 'w') as f:
+ f.write(_output_serializers[idx](_outputs[idx]))
+ args:
+ - --input-data
+ - inputPath: input_data
+ - --schema
+ - inputPath: schema
+ - if:
+ cond:
+ isPresent: module_file
+ then:
+ - --module-file
+ - inputValue: module_file
+ - if:
+ cond:
+ isPresent: preprocessing_fn
+ then:
+ - --preprocessing-fn
+ - inputValue: preprocessing_fn
+ - --transform-output
+ - outputPath: transform_output
+ - --transformed-examples
+ - outputPath: transformed_examples
diff --git a/components/tfx/_samples/TFX_pipeline.ipynb b/components/tfx/_samples/TFX_pipeline.ipynb
new file mode 100644
index 00000000000..aed8b875c3f
--- /dev/null
+++ b/components/tfx/_samples/TFX_pipeline.ipynb
@@ -0,0 +1,166 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### TFX Components\n",
+ "\n",
+ "This notebook shows how to create pipeline that uses TFX components:\n",
+ "\n",
+ "* CsvExampleGen\n",
+ "* StatisticsGen\n",
+ "* SchemaGen\n",
+ "* ExampleValidator\n",
+ "* Transform\n",
+ "* Trainer\n",
+ "* Evaluator"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Put your KFP cluster endpoint URL here if working from GCP notebooks (or local notebooks). ('https://xxxxx.notebooks.googleusercontent.com/')\n",
+ "kfp_endpoint='https://XXXXX.notebooks.googleusercontent.com/'"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "input_data_uri = 'gs://ml-pipeline-playground/tensorflow-tfx-repo/tfx/components/testdata/external/csv'\n",
+ "\n",
+ "#Only S3/GCS is supported for now.\n",
+ "module_file = 'gs://ml-pipeline-playground/tensorflow-tfx-repo/tfx/examples/chicago_taxi_pipeline/taxi_utils.py'"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import kfp"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import json\n",
+ "from kfp.components import load_component_from_url\n",
+ "\n",
+ "download_from_gcs_op = load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/d013b8535666641ca5a5be6ce67e69e044bbf076/components/google-cloud/storage/download/component.yaml')\n",
+ "\n",
+ "CsvExampleGen_op = load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/025c424a/components/tfx/ExampleGen/CsvExampleGen/component.yaml')\n",
+ "StatisticsGen_op = load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/025c424a/components/tfx/StatisticsGen/component.yaml')\n",
+ "SchemaGen_op = load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/025c424a/components/tfx/SchemaGen/component.yaml')\n",
+ "ExampleValidator_op = load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/025c424a/components/tfx/ExampleValidator/component.yaml')\n",
+ "Transform_op = load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/025c424a/components/tfx/Transform/component.yaml')\n",
+ "Trainer_op = load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/025c424a/components/tfx/Trainer/component.yaml')\n",
+ "Evaluator_op = load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/025c424a/components/tfx/Evaluator/component.yaml')\n",
+ "\n",
+ "def tfx_pipeline(\n",
+ " input_data_uri,\n",
+ "):\n",
+ " download_task = download_from_gcs_op(\n",
+ " input_data_uri,\n",
+ " )\n",
+ "\n",
+ " examples_task = CsvExampleGen_op(\n",
+ " input_base=download_task.output,\n",
+ " input_config=json.dumps({\n",
+ " \"splits\": [\n",
+ " {'name': 'data', 'pattern': '*.csv'},\n",
+ " ]\n",
+ " }),\n",
+ " output_config=json.dumps({\n",
+ " \"splitConfig\": {\n",
+ " \"splits\": [\n",
+ " {'name': 'train', 'hash_buckets': 2},\n",
+ " {'name': 'eval', 'hash_buckets': 1},\n",
+ " ]\n",
+ " }\n",
+ " }),\n",
+ " )\n",
+ " \n",
+ " statistics_task = StatisticsGen_op(\n",
+ " examples_task.output,\n",
+ " )\n",
+ " \n",
+ " schema_task = SchemaGen_op(\n",
+ " statistics_task.output,\n",
+ " )\n",
+ "\n",
+ " # Performs anomaly detection based on statistics and data schema.\n",
+ " validator_task = ExampleValidator_op(\n",
+ " stats=statistics_task.outputs['output'],\n",
+ " schema=schema_task.outputs['output'],\n",
+ " )\n",
+ "\n",
+ " # Performs transformations and feature engineering in training and serving.\n",
+ " transform_task = Transform_op(\n",
+ " input_data=examples_task.outputs['example_artifacts'],\n",
+ " schema=schema_task.outputs['output'],\n",
+ " module_file=module_file,\n",
+ " )\n",
+ "\n",
+ " trainer_task = Trainer_op(\n",
+ " module_file=module_file,\n",
+ " examples=transform_task.outputs['transformed_examples'],\n",
+ " schema=schema_task.outputs['output'],\n",
+ " transform_output=transform_task.outputs['transform_output'],\n",
+ " train_args=json.dumps({'num_steps': 10000}),\n",
+ " eval_args=json.dumps({'num_steps': 5000}),\n",
+ " )\n",
+ "\n",
+ " # Uses TFMA to compute a evaluation statistics over features of a model.\n",
+ " model_analyzer = Evaluator_op(\n",
+ " examples=examples_task.outputs['example_artifacts'],\n",
+ " model_exports=trainer_task.outputs['output'],\n",
+ " feature_slicing_spec=json.dumps({\n",
+ " 'specs': [\n",
+ " {'column_for_slicing': ['trip_start_hour']},\n",
+ " ],\n",
+ " }),\n",
+ " )\n",
+ "\n",
+ "\n",
+ "kfp.Client(host=kfp_endpoint).create_run_from_pipeline_func(\n",
+ " tfx_pipeline,\n",
+ " arguments=dict(\n",
+ " input_data_uri=input_data_uri,\n",
+ " ),\n",
+ ")"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.5.3"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 4
+}
diff --git a/components/third_party_licenses.csv b/components/third_party_licenses.csv
index 693ffdcb783..b956e6f1d4a 100644
--- a/components/third_party_licenses.csv
+++ b/components/third_party_licenses.csv
@@ -194,3 +194,10 @@ grpc-google-logging-v2,https://raw.githubusercontent.com/googleapis/googleapis/m
grpc-google-pubsub-v1,https://raw.githubusercontent.com/googleapis/googleapis/master/LICENSE,Apache 2.0
google-cloud-datastore,https://raw.githubusercontent.com/GoogleCloudPlatform/google-cloud-datastore/master/LICENSE,Apache 2.0
pymongo,https://raw.githubusercontent.com/mongodb/mongo-python-driver/master/LICENSE,Apache 2.0
+google-auth-oauthlib,https://raw.githubusercontent.com/googleapis/google-auth-library-python-oauthlib/master/LICENSE,Apache 2.0
+google-pasta,https://raw.githubusercontent.com/google/pasta/master/LICENSE,Apache 2.0
+Keras-Preprocessing,https://raw.githubusercontent.com/keras-team/keras-preprocessing/master/LICENSE,MIT
+ml-metadata,https://raw.githubusercontent.com/google/ml-metadata/master/LICENSE,Apache 2.0
+opt-einsum,https://raw.githubusercontent.com/dgasmith/opt_einsum/master/LICENSE,MIT
+tensorflow-estimator,https://raw.githubusercontent.com/tensorflow/estimator/master/LICENSE,Apache 2.0
+wrapt,https://github.com/GrahamDumpleton/wrapt/blob/develop/LICENSE,2-Clause BSD
diff --git a/developer_guide.md b/developer_guide.md
index afcc1dad7f0..a9e6a37f4bb 100644
--- a/developer_guide.md
+++ b/developer_guide.md
@@ -80,7 +80,7 @@ $ docker build -t ml-pipeline-api-server -f backend/Dockerfile .
Python based visualizations are a new method to visualize results within the
Kubeflow Pipelines UI. For more information about Python based visualizations
please visit the [documentation page](https://www.kubeflow.org/docs/pipelines/sdk/python-based-visualizations).
-To create predefine visualizations please check the [developer guide](https://github.com/kubeflow/pipelines/blob/master/backend/src/apiserver/visualization/developer_guide.md).
+To create predefine visualizations please check the [developer guide](https://github.com/kubeflow/pipelines/blob/master/backend/src/apiserver/visualization/README.md).
## Unit test
diff --git a/frontend/.gitignore b/frontend/.gitignore
index 73f4e092e1e..f76f50f9cac 100644
--- a/frontend/.gitignore
+++ b/frontend/.gitignore
@@ -24,3 +24,9 @@ backstop_data/bitmaps_test/
npm-debug.log*
yarn-debug.log*
yarn-error.log*
+
+# coverage reports
+coverage
+
+# vscode
+.vscode
diff --git a/frontend/.nvmrc b/frontend/.nvmrc
new file mode 100644
index 00000000000..cae54a258e6
--- /dev/null
+++ b/frontend/.nvmrc
@@ -0,0 +1 @@
+v12.14.1
diff --git a/frontend/.prettierignore b/frontend/.prettierignore
index 650570754b3..c83f90a2b01 100644
--- a/frontend/.prettierignore
+++ b/frontend/.prettierignore
@@ -1,2 +1 @@
src/generated
-server
diff --git a/frontend/Dockerfile b/frontend/Dockerfile
index d27c3accd90..7fe73ebc234 100644
--- a/frontend/Dockerfile
+++ b/frontend/Dockerfile
@@ -1,4 +1,4 @@
-FROM node:9.4.0 as build
+FROM node:12.14.1 as build
ARG COMMIT_HASH
ARG DATE
@@ -37,7 +37,7 @@ RUN npm i -D license-checker
RUN node gen_licenses . && node gen_licenses server && \
cat dependency-licenses.txt >> server/dependency-licenses.txt
-FROM node:9.4.0-alpine
+FROM node:12.14.1-alpine
COPY --from=build ./src/frontend/server /server
COPY --from=build ./src/frontend/build /client
diff --git a/frontend/README.md b/frontend/README.md
index edf9b769791..6e754d27295 100644
--- a/frontend/README.md
+++ b/frontend/README.md
@@ -76,6 +76,12 @@ To understand more what prettier is: [What is Prettier](https://prettier.io/docs
},
"editor.formatOnSave": true,
},
+ "[typescriptreact]": {
+ "editor.codeActionsOnSave": {
+ "source.organizeImports": true,
+ },
+ "editor.formatOnSave": true,
+ },
```
- For others, refer to https://prettier.io/docs/en/editors.html
diff --git a/frontend/backstop.ts b/frontend/backstop.ts
index 9d58c9b39f8..f3b96de76ae 100644
--- a/frontend/backstop.ts
+++ b/frontend/backstop.ts
@@ -25,7 +25,7 @@ const config = {
debugWindow: false,
engine: 'puppeteer',
engineOptions: {
- args: ['--no-sandbox']
+ args: ['--no-sandbox'],
},
id: 'pipelines',
onReadyScript: 'steps.js',
@@ -44,16 +44,12 @@ const config = {
},
{
label: 'hover on first row',
- steps: [
- { action: 'hover', selector: '.tableRow' },
- ],
+ steps: [{ action: 'hover', selector: '.tableRow' }],
url,
},
{
label: 'select one row',
- steps: [
- { action: 'click', selector: '.tableRow' }
- ],
+ steps: [{ action: 'click', selector: '.tableRow' }],
url,
},
{
@@ -61,18 +57,15 @@ const config = {
steps: [
{ action: 'click', selector: '.tableRow' },
{ action: 'click', selector: `.tableRow:nth-of-type(2)` },
- { action: 'click', selector: `.tableRow:nth-of-type(5)` }
+ { action: 'click', selector: `.tableRow:nth-of-type(5)` },
],
url,
},
{
label: 'open upload dialog',
- steps: [
- { action: 'click', selector: '#uploadBtn' },
- { action: 'pause' }
- ],
+ steps: [{ action: 'click', selector: '#uploadBtn' }, { action: 'pause' }],
url,
- }
+ },
],
viewports: [{ width: 1024, height: 768 }],
};
diff --git a/frontend/mock-backend/fixed-data.ts b/frontend/mock-backend/fixed-data.ts
index 415afb2512f..25704bc4557 100644
--- a/frontend/mock-backend/fixed-data.ts
+++ b/frontend/mock-backend/fixed-data.ts
@@ -113,6 +113,13 @@ const pipelines: ApiPipeline[] = [
name: 'Markdown description',
parameters: [],
},
+ {
+ created_at: new Date('2020-01-22T20:59:23.000Z'),
+ description: 'A pipeline with a long name',
+ id: '9fbe3bd6-a01f-11e8-98d0-529269fb1462',
+ name: 'A pipeline with a very very very very very very very long name',
+ parameters: [],
+ },
];
pipelines.push(...generateNPipelines());
@@ -278,6 +285,11 @@ const experiments: ApiExperiment[] = [
id: 'a4d4f8c6-ce9c-4200-a92e-c48ec759b733',
name: 'Experiment Number 2',
},
+ {
+ description: 'This experiment has a very very very long name',
+ id: 'z4d4f8c6-ce9c-4200-a92e-c48ec759b733',
+ name: 'Experiment with a very very very very very very very very very very very very long name',
+ },
];
const runs: ApiRunDetail[] = [
diff --git a/frontend/mock-backend/hello-world-runtime.ts b/frontend/mock-backend/hello-world-runtime.ts
index 02f554ee1f6..ab74a1d601a 100644
--- a/frontend/mock-backend/hello-world-runtime.ts
+++ b/frontend/mock-backend/hello-world-runtime.ts
@@ -24,8 +24,8 @@ export default {
creationTimestamp: '2018-06-06T00:04:49Z',
labels: {
'workflows.argoproj.io/completed': 'true',
- 'workflows.argoproj.io/phase': 'Succeeded'
- }
+ 'workflows.argoproj.io/phase': 'Succeeded',
+ },
},
spec: {
templates: [
@@ -37,25 +37,21 @@ export default {
container: {
name: '',
image: 'docker/whalesay:latest',
- command: [
- 'cowsay'
- ],
- args: [
- '{{workflow.parameters.message}}'
- ],
- resources: {}
- }
- }
+ command: ['cowsay'],
+ args: ['{{workflow.parameters.message}}'],
+ resources: {},
+ },
+ },
],
entrypoint: 'whalesay1',
arguments: {
parameters: [
{
name: 'message',
- value: 'hello world'
- }
- ]
- }
+ value: 'hello world',
+ },
+ ],
+ },
},
status: {
phase: 'Succeeded',
@@ -70,8 +66,8 @@ export default {
templateName: 'whalesay1',
phase: 'Succeeded',
startedAt: '2018-06-06T00:04:49Z',
- finishedAt: '2018-06-06T00:05:23Z'
- }
- }
- }
+ finishedAt: '2018-06-06T00:05:23Z',
+ },
+ },
+ },
};
diff --git a/frontend/mock-backend/hello-world-with-steps-runtime.ts b/frontend/mock-backend/hello-world-with-steps-runtime.ts
index a7c672a87a7..3c581afe112 100644
--- a/frontend/mock-backend/hello-world-with-steps-runtime.ts
+++ b/frontend/mock-backend/hello-world-with-steps-runtime.ts
@@ -18,14 +18,15 @@ export default {
name: 'hello-world-61985dbf-4299-458b-a183-1f2c2436c21c',
namespace: 'default',
// tslint:disable-next-line:max-line-length
- selfLink: '/apis/argoproj.io/v1alpha1/namespaces/default/workflows/hello-world-61985dbf-4299-458b-a183-1f2c2436c21c',
+ selfLink:
+ '/apis/argoproj.io/v1alpha1/namespaces/default/workflows/hello-world-61985dbf-4299-458b-a183-1f2c2436c21c',
uid: 'ef2a4a61-6e84-11e8-bba7-42010a8a0fc2',
resourceVersion: '10690686',
creationTimestamp: '2018-06-12T21:09:46Z',
labels: {
'workflows.argoproj.io/completed': 'true',
- 'workflows.argoproj.io/phase': 'Succeeded'
- }
+ 'workflows.argoproj.io/phase': 'Succeeded',
+ },
},
spec: {
templates: [
@@ -39,10 +40,10 @@ export default {
{
name: 'say',
template: 'say',
- arguments: {}
- }
- ]
- ]
+ arguments: {},
+ },
+ ],
+ ],
},
{
name: 'say',
@@ -52,18 +53,14 @@ export default {
container: {
name: '',
image: 'docker/whalesay:latest',
- command: [
- 'cowsay'
- ],
- args: [
- 'hello world'
- ],
- resources: {}
- }
- }
+ command: ['cowsay'],
+ args: ['hello world'],
+ resources: {},
+ },
+ },
],
entrypoint: 'whalesay',
- arguments: {}
+ arguments: {},
},
status: {
phase: 'Succeeded',
@@ -79,9 +76,7 @@ export default {
phase: 'Succeeded',
startedAt: '2018-06-12T21:09:46Z',
finishedAt: '2018-06-12T21:09:47Z',
- children: [
- 'hello-world-61985dbf-4299-458b-a183-1f2c2436c21c-2303694156'
- ]
+ children: ['hello-world-61985dbf-4299-458b-a183-1f2c2436c21c-2303694156'],
},
'hello-world-61985dbf-4299-458b-a183-1f2c2436c21c-2303694156': {
id: 'hello-world-61985dbf-4299-458b-a183-1f2c2436c21c-2303694156',
@@ -92,9 +87,7 @@ export default {
boundaryID: 'hello-world-61985dbf-4299-458b-a183-1f2c2436c21c',
startedAt: '2018-06-12T21:09:46Z',
finishedAt: '2018-06-12T21:09:47Z',
- children: [
- 'hello-world-61985dbf-4299-458b-a183-1f2c2436c21c-3584189705'
- ]
+ children: ['hello-world-61985dbf-4299-458b-a183-1f2c2436c21c-3584189705'],
},
'hello-world-61985dbf-4299-458b-a183-1f2c2436c21c-3584189705': {
id: 'hello-world-61985dbf-4299-458b-a183-1f2c2436c21c-3584189705',
@@ -105,8 +98,8 @@ export default {
phase: 'Succeeded',
boundaryID: 'hello-world-61985dbf-4299-458b-a183-1f2c2436c21c',
startedAt: '2018-06-12T21:09:46Z',
- finishedAt: '2018-06-12T21:09:47Z'
- }
- }
- }
+ finishedAt: '2018-06-12T21:09:47Z',
+ },
+ },
+ },
};
diff --git a/frontend/mock-backend/integration-test-runtime.ts b/frontend/mock-backend/integration-test-runtime.ts
index 6716f345487..b63cd071bee 100644
--- a/frontend/mock-backend/integration-test-runtime.ts
+++ b/frontend/mock-backend/integration-test-runtime.ts
@@ -17,7 +17,8 @@ export default {
metadata: {
name: 'job-cloneofhelloworldls94q-1-3667110102',
namespace: 'kubeflow',
- selfLink: '/apis/argoproj.io/v1alpha1/namespaces/kubeflow/workflows/job-cloneofhelloworldls94q-1-3667110102',
+ selfLink:
+ '/apis/argoproj.io/v1alpha1/namespaces/kubeflow/workflows/job-cloneofhelloworldls94q-1-3667110102',
uid: '55dc2b6d-d688-11e8-83db-42010a800093',
resourceVersion: '128069',
creationTimestamp: '2018-10-23T05:56:07Z',
@@ -27,7 +28,7 @@ export default {
'scheduledworkflows.kubeflow.org/workflowEpoch': '1540274157',
'scheduledworkflows.kubeflow.org/workflowIndex': '1',
'workflows.argoproj.io/completed': 'true',
- 'workflows.argoproj.io/phase': 'Succeeded'
+ 'workflows.argoproj.io/phase': 'Succeeded',
},
ownerReferences: [
{
@@ -36,9 +37,9 @@ export default {
name: 'job-cloneofhelloworldls94q',
uid: '4fac8e0f-d688-11e8-83db-42010a800093',
controller: true,
- blockOwnerDeletion: true
- }
- ]
+ blockOwnerDeletion: true,
+ },
+ ],
},
spec: {
templates: [
@@ -56,10 +57,10 @@ export default {
parameters: [
{
name: 'message',
- value: '{{workflow.parameters.message}} from node: A'
- }
- ]
- }
+ value: '{{workflow.parameters.message}} from node: A',
+ },
+ ],
+ },
},
{
name: 'B',
@@ -68,13 +69,11 @@ export default {
parameters: [
{
name: 'message',
- value: '{{workflow.parameters.message}} from node: B'
- }
- ]
+ value: '{{workflow.parameters.message}} from node: B',
+ },
+ ],
},
- dependencies: [
- 'A'
- ]
+ dependencies: ['A'],
},
{
name: 'C',
@@ -83,13 +82,11 @@ export default {
parameters: [
{
name: 'message',
- value: '{{workflow.parameters.message}} from node: C'
- }
- ]
+ value: '{{workflow.parameters.message}} from node: C',
+ },
+ ],
},
- dependencies: [
- 'A'
- ]
+ dependencies: ['A'],
},
{
name: 'D',
@@ -98,49 +95,43 @@ export default {
parameters: [
{
name: 'message',
- value: '{{workflow.parameters.message}} from node: D'
- }
- ]
+ value: '{{workflow.parameters.message}} from node: D',
+ },
+ ],
},
- dependencies: [
- 'B',
- 'C'
- ]
- }
- ]
- }
+ dependencies: ['B', 'C'],
+ },
+ ],
+ },
},
{
name: 'echo',
inputs: {
parameters: [
{
- name: 'message'
- }
- ]
+ name: 'message',
+ },
+ ],
},
outputs: {},
metadata: {},
container: {
name: '',
image: 'alpine:3.7',
- command: [
- 'echo',
- '{{inputs.parameters.message}}'
- ],
- resources: {}
- }
- }
+ command: ['echo', '{{inputs.parameters.message}}'],
+ resources: {},
+ },
+ },
],
entrypoint: 'diamond',
arguments: {
parameters: [
{
name: 'message',
- value: 'hello world'
- }
- ]
- }
+ value: 'hello world',
+ },
+ ],
+ },
},
status: {
phase: 'Succeeded',
@@ -156,12 +147,8 @@ export default {
phase: 'Succeeded',
startedAt: '2018-10-23T05:56:07Z',
finishedAt: '2018-10-23T05:56:25Z',
- children: [
- 'job-cloneofhelloworldls94q-1-3667110102-3867833025'
- ],
- outboundNodes: [
- 'job-cloneofhelloworldls94q-1-3667110102-3918165882'
- ]
+ children: ['job-cloneofhelloworldls94q-1-3667110102-3867833025'],
+ outboundNodes: ['job-cloneofhelloworldls94q-1-3667110102-3918165882'],
},
'job-cloneofhelloworldls94q-1-3667110102-3817500168': {
id: 'job-cloneofhelloworldls94q-1-3667110102-3817500168',
@@ -177,13 +164,11 @@ export default {
parameters: [
{
name: 'message',
- value: 'hello world from node: B'
- }
- ]
+ value: 'hello world from node: B',
+ },
+ ],
},
- children: [
- 'job-cloneofhelloworldls94q-1-3667110102-3918165882'
- ]
+ children: ['job-cloneofhelloworldls94q-1-3667110102-3918165882'],
},
'job-cloneofhelloworldls94q-1-3667110102-3834277787': {
id: 'job-cloneofhelloworldls94q-1-3667110102-3834277787',
@@ -199,13 +184,11 @@ export default {
parameters: [
{
name: 'message',
- value: 'hello world from node: C'
- }
- ]
+ value: 'hello world from node: C',
+ },
+ ],
},
- children: [
- 'job-cloneofhelloworldls94q-1-3667110102-3918165882'
- ]
+ children: ['job-cloneofhelloworldls94q-1-3667110102-3918165882'],
},
'job-cloneofhelloworldls94q-1-3667110102-3867833025': {
id: 'job-cloneofhelloworldls94q-1-3667110102-3867833025',
@@ -221,14 +204,14 @@ export default {
parameters: [
{
name: 'message',
- value: 'hello world from node: A'
- }
- ]
+ value: 'hello world from node: A',
+ },
+ ],
},
children: [
'job-cloneofhelloworldls94q-1-3667110102-3817500168',
- 'job-cloneofhelloworldls94q-1-3667110102-3834277787'
- ]
+ 'job-cloneofhelloworldls94q-1-3667110102-3834277787',
+ ],
},
'job-cloneofhelloworldls94q-1-3667110102-3918165882': {
id: 'job-cloneofhelloworldls94q-1-3667110102-3918165882',
@@ -244,11 +227,11 @@ export default {
parameters: [
{
name: 'message',
- value: 'hello world from node: D'
- }
- ]
- }
- }
- }
- }
+ value: 'hello world from node: D',
+ },
+ ],
+ },
+ },
+ },
+ },
};
diff --git a/frontend/mock-backend/json-runtime.ts b/frontend/mock-backend/json-runtime.ts
index f4f8351e107..f68c4ff98e0 100644
--- a/frontend/mock-backend/json-runtime.ts
+++ b/frontend/mock-backend/json-runtime.ts
@@ -24,8 +24,8 @@ export default {
creationTimestamp: '2018-06-06T00:04:49Z',
labels: {
'workflows.argoproj.io/completed': 'true',
- 'workflows.argoproj.io/phase': 'Succeeded'
- }
+ 'workflows.argoproj.io/phase': 'Succeeded',
+ },
},
spec: {
templates: [
@@ -37,25 +37,21 @@ export default {
container: {
name: '',
image: 'docker/whalesay:latest',
- command: [
- 'cowsay'
- ],
- args: [
- '{{workflow.parameters.message}}'
- ],
- resources: {}
- }
- }
+ command: ['cowsay'],
+ args: ['{{workflow.parameters.message}}'],
+ resources: {},
+ },
+ },
],
entrypoint: 'whalesay1',
arguments: {
parameters: [
{
name: 'message',
- value: 'hello world'
- }
- ]
- }
+ value: 'hello world',
+ },
+ ],
+ },
},
status: {
phase: 'Succeeded',
@@ -76,24 +72,20 @@ export default {
{
name: 'JSON Data',
value: JSON.stringify({
- 'string1': 'a',
- 'string2': 'b',
- 'number1': 1,
- 'number2': 2.2,
- 'object': {
- 'string': 'a',
- 'number': 2
+ string1: 'a',
+ string2: 'b',
+ number1: 1,
+ number2: 2.2,
+ object: {
+ string: 'a',
+ number: 2,
},
- 'array': [
- 'a',
- 'b',
- 'c'
- ]
- })
- }
- ]
- }
- }
- }
- }
+ array: ['a', 'b', 'c'],
+ }),
+ },
+ ],
+ },
+ },
+ },
+ },
};
diff --git a/frontend/mock-backend/mock-api-middleware.ts b/frontend/mock-backend/mock-api-middleware.ts
index 477cf811cf6..892a6e977f2 100644
--- a/frontend/mock-backend/mock-api-middleware.ts
+++ b/frontend/mock-backend/mock-api-middleware.ts
@@ -53,7 +53,6 @@ interface BaseResource {
// tslint:disable-next-line:no-default-export
export default (app: express.Application) => {
-
app.use((req, _, next) => {
// tslint:disable-next-line:no-console
console.info(req.method + ' ' + req.originalUrl);
@@ -71,7 +70,7 @@ export default (app: express.Application) => {
apiServerCommitHash: 'd3c4add0a95e930c70a330466d0923827784eb9a',
apiServerReady: true,
buildDate: 'Wed Jan 9 19:40:24 UTC 2019',
- frontendCommitHash: '8efb2fcff9f666ba5b101647e909dc9c6889cecb'
+ frontendCommitHash: '8efb2fcff9f666ba5b101647e909dc9c6889cecb',
});
});
@@ -79,7 +78,10 @@ export default (app: express.Application) => {
res.sendStatus(200);
});
- function getSortKeyAndOrder(defaultSortKey: string, queryParam?: string): { desc: boolean, key: string } {
+ function getSortKeyAndOrder(
+ defaultSortKey: string,
+ queryParam?: string,
+ ): { desc: boolean; key: string } {
let key = defaultSortKey;
let desc = false;
@@ -88,8 +90,10 @@ export default (app: express.Application) => {
key = keyParts[0];
// Check that the key is properly formatted.
- if (keyParts.length > 2 ||
- (keyParts.length === 2 && keyParts[1] !== 'asc' && keyParts[1] !== 'desc')) {
+ if (
+ keyParts.length > 2 ||
+ (keyParts.length === 2 && keyParts[1] !== 'asc' && keyParts[1] !== 'desc')
+ ) {
throw new Error(`Invalid sort string: ${queryParam}`);
}
@@ -124,7 +128,7 @@ export default (app: express.Application) => {
return result * (desc ? -1 : 1);
});
- const start = (req.query.page_token ? +req.query.page_token : 0);
+ const start = req.query.page_token ? +req.query.page_token : 0;
const end = start + (+req.query.page_size || 20);
response.jobs = jobs.slice(start, end);
@@ -161,7 +165,7 @@ export default (app: express.Application) => {
return result * (desc ? -1 : 1);
});
- const start = (req.query.pageToken ? +req.query.pageToken : 0);
+ const start = req.query.pageToken ? +req.query.pageToken : 0;
const end = start + (+req.query.pageSize || 20);
response.experiments = experiments.slice(start, end);
@@ -186,10 +190,9 @@ export default (app: express.Application) => {
}, 1000);
});
-
app.get(v1beta1Prefix + '/experiments/:eid', (req, res) => {
res.header('Content-Type', 'application/json');
- const experiment = fixedData.experiments.find((exp) => exp.id === req.params.eid);
+ const experiment = fixedData.experiments.find(exp => exp.id === req.params.eid);
if (!experiment) {
res.status(404).send(`No experiment was found with ID: ${req.params.eid}`);
return;
@@ -227,7 +230,7 @@ export default (app: express.Application) => {
res.header('Content-Type', 'application/json');
switch (req.method) {
case 'DELETE':
- const i = fixedData.jobs.findIndex((j) => j.id === req.params.jid);
+ const i = fixedData.jobs.findIndex(j => j.id === req.params.jid);
if (fixedData.jobs[i].name!.startsWith('Cannot be deleted')) {
res.status(502).send(`Deletion failed for job: '${fixedData.jobs[i].name}'`);
} else {
@@ -237,7 +240,7 @@ export default (app: express.Application) => {
}
break;
case 'GET':
- const job = fixedData.jobs.find((j) => j.id === req.params.jid);
+ const job = fixedData.jobs.find(j => j.id === req.params.jid);
if (job) {
res.json(job);
} else {
@@ -257,15 +260,20 @@ export default (app: express.Application) => {
runs: [],
};
- let runs: ApiRun[] = fixedData.runs.map((r) => r.run!);
+ let runs: ApiRun[] = fixedData.runs.map(r => r.run!);
if (req.query.filter) {
runs = filterResources(runs, req.query.filter);
}
if (req.query['resource_reference_key.type'] === ApiResourceType.EXPERIMENT) {
- runs = runs.filter((r) => RunUtils.getAllExperimentReferences(r)
- .some((ref) => ref.key && ref.key.id && ref.key.id === req.query['resource_reference_key.id'] || false));
+ runs = runs.filter(r =>
+ RunUtils.getAllExperimentReferences(r).some(
+ ref =>
+ (ref.key && ref.key.id && ref.key.id === req.query['resource_reference_key.id']) ||
+ false,
+ ),
+ );
}
const { desc, key } = getSortKeyAndOrder(RunSortKeys.CREATED_AT, req.query.sort_by);
@@ -281,7 +289,7 @@ export default (app: express.Application) => {
return result * (desc ? -1 : 1);
});
- const start = (req.query.page_token ? +req.query.page_token : 0);
+ const start = req.query.page_token ? +req.query.page_token : 0;
const end = start + (+req.query.page_size || 20);
response.runs = runs.slice(start, end);
@@ -294,7 +302,7 @@ export default (app: express.Application) => {
app.get(v1beta1Prefix + '/runs/:rid', (req, res) => {
const rid = req.params.rid;
- const run = fixedData.runs.find((r) => r.run!.id === rid);
+ const run = fixedData.runs.find(r => r.run!.id === rid);
if (!run) {
res.status(404).send('Cannot find a run with id: ' + rid);
return;
@@ -327,8 +335,8 @@ export default (app: express.Application) => {
}
const runDetail = fixedData.runs.find(r => r.run!.id === req.params.rid);
if (runDetail) {
- runDetail.run!.storage_state = req.params.method === 'archive' ?
- RunStorageState.ARCHIVED : RunStorageState.AVAILABLE;
+ runDetail.run!.storage_state =
+ req.params.method === 'archive' ? RunStorageState.ARCHIVED : RunStorageState.AVAILABLE;
res.json({});
} else {
res.status(500).send('Cannot find a run with id ' + req.params.rid);
@@ -337,7 +345,7 @@ export default (app: express.Application) => {
app.post(v1beta1Prefix + '/jobs/:jid/enable', (req, res) => {
setTimeout(() => {
- const job = fixedData.jobs.find((j) => j.id === req.params.jid);
+ const job = fixedData.jobs.find(j => j.id === req.params.jid);
if (job) {
job.enabled = true;
res.json({});
@@ -349,7 +357,7 @@ export default (app: express.Application) => {
app.post(v1beta1Prefix + '/jobs/:jid/disable', (req, res) => {
setTimeout(() => {
- const job = fixedData.jobs.find((j) => j.id === req.params.jid);
+ const job = fixedData.jobs.find(j => j.id === req.params.jid);
if (job) {
job.enabled = false;
res.json({});
@@ -369,15 +377,22 @@ export default (app: express.Application) => {
switch (p.op) {
case PredicateOp.EQUALS:
if (p.key === 'name') {
- return r.name && r.name.toLocaleLowerCase() === (p.string_value || '').toLocaleLowerCase();
+ return (
+ r.name && r.name.toLocaleLowerCase() === (p.string_value || '').toLocaleLowerCase()
+ );
} else if (p.key === 'storage_state') {
- return (r as ApiRun).storage_state && (r as ApiRun).storage_state!.toString() === p.string_value;
+ return (
+ (r as ApiRun).storage_state &&
+ (r as ApiRun).storage_state!.toString() === p.string_value
+ );
} else {
throw new Error(`Key: ${p.key} is not yet supported by the mock API server`);
}
case PredicateOp.NOTEQUALS:
if (p.key === 'name') {
- return r.name && r.name.toLocaleLowerCase() !== (p.string_value || '').toLocaleLowerCase();
+ return (
+ r.name && r.name.toLocaleLowerCase() !== (p.string_value || '').toLocaleLowerCase()
+ );
} else if (p.key === 'storage_state') {
return ((r as ApiRun).storage_state || {}).toString() !== p.string_value;
} else {
@@ -387,7 +402,10 @@ export default (app: express.Application) => {
if (p.key !== 'name') {
throw new Error(`Key: ${p.key} is not yet supported by the mock API server`);
}
- return r.name && r.name.toLocaleLowerCase().includes((p.string_value || '').toLocaleLowerCase());
+ return (
+ r.name &&
+ r.name.toLocaleLowerCase().includes((p.string_value || '').toLocaleLowerCase())
+ );
case PredicateOp.NOTEQUALS:
// Fall through
case PredicateOp.GREATERTHAN:
@@ -432,7 +450,7 @@ export default (app: express.Application) => {
return result * (desc ? -1 : 1);
});
- const start = (req.query.page_token ? +req.query.page_token : 0);
+ const start = req.query.page_token ? +req.query.page_token : 0;
const end = start + (+req.query.page_size || 20);
response.pipelines = pipelines.slice(start, end);
@@ -445,7 +463,7 @@ export default (app: express.Application) => {
app.delete(v1beta1Prefix + '/pipelines/:pid', (req, res) => {
res.header('Content-Type', 'application/json');
- const i = fixedData.pipelines.findIndex((p) => p.id === req.params.pid);
+ const i = fixedData.pipelines.findIndex(p => p.id === req.params.pid);
if (i === -1) {
res.status(404).send(`No pipelines was found with ID: ${req.params.pid}`);
@@ -463,7 +481,7 @@ export default (app: express.Application) => {
app.get(v1beta1Prefix + '/pipelines/:pid', (req, res) => {
res.header('Content-Type', 'application/json');
- const pipeline = fixedData.pipelines.find((p) => p.id === req.params.pid);
+ const pipeline = fixedData.pipelines.find(p => p.id === req.params.pid);
if (!pipeline) {
res.status(404).send(`No pipeline was found with ID: ${req.params.pid}`);
return;
@@ -473,7 +491,7 @@ export default (app: express.Application) => {
app.get(v1beta1Prefix + '/pipelines/:pid/templates', (req, res) => {
res.header('Content-Type', 'text/x-yaml');
- const pipeline = fixedData.pipelines.find((p) => p.id === req.params.pid);
+ const pipeline = fixedData.pipelines.find(p => p.id === req.params.pid);
if (!pipeline) {
res.status(404).send(`No pipeline was found with ID: ${req.params.pid}`);
return;
@@ -492,9 +510,10 @@ export default (app: express.Application) => {
function mockCreatePipeline(res: Response, name: string, body?: any): void {
res.header('Content-Type', 'application/json');
// Don't allow uploading multiple pipelines with the same name
- if (fixedData.pipelines.find((p) => p.name === name)) {
- res.status(502).send(
- `A Pipeline named: "${name}" already exists. Please choose a different name.`);
+ if (fixedData.pipelines.find(p => p.name === name)) {
+ res
+ .status(502)
+ .send(`A Pipeline named: "${name}" already exists. Please choose a different name.`);
} else {
const pipeline = body || {};
pipeline.id = 'new-pipeline-' + (fixedData.pipelines.length + 1);
@@ -504,13 +523,13 @@ export default (app: express.Application) => {
'TODO: the mock middleware does not actually use the uploaded pipeline';
pipeline.parameters = [
{
- name: 'output'
+ name: 'output',
},
{
- name: 'param-1'
+ name: 'param-1',
},
{
- name: 'param-2'
+ name: 'param-2',
},
];
fixedData.pipelines.push(pipeline);
diff --git a/frontend/mock-backend/mock-coinflip-runtime.ts b/frontend/mock-backend/mock-coinflip-runtime.ts
index 6f7dc1e6869..e8d2d4abce4 100644
--- a/frontend/mock-backend/mock-coinflip-runtime.ts
+++ b/frontend/mock-backend/mock-coinflip-runtime.ts
@@ -24,8 +24,8 @@ export default {
creationTimestamp: '2018-04-17T20:58:23Z',
labels: {
'workflows.argoproj.io/completed': 'true',
- 'workflows.argoproj.io/phase': 'Succeeded'
- }
+ 'workflows.argoproj.io/phase': 'Succeeded',
+ },
},
spec: {
templates: [
@@ -39,24 +39,24 @@ export default {
{
name: 'flip-coin',
template: 'flip-coin',
- arguments: {}
- }
+ arguments: {},
+ },
],
[
{
name: 'heads',
template: 'heads',
arguments: {},
- when: '{{steps.flip-coin.outputs.result}} == heads'
+ when: '{{steps.flip-coin.outputs.result}} == heads',
},
{
name: 'tails',
template: 'coinflip',
arguments: {},
- when: '{{steps.flip-coin.outputs.result}} == tails'
- }
- ]
- ]
+ when: '{{steps.flip-coin.outputs.result}} == tails',
+ },
+ ],
+ ],
},
{
name: 'flip-coin',
@@ -66,13 +66,12 @@ export default {
script: {
name: '',
image: 'python:alpine3.6',
- command: [
- 'python'
- ],
+ command: ['python'],
resources: {},
// tslint:disable-next-line:max-line-length
- source: 'import random\nresult = "heads" if random.randint(0,1) == 0 else "tails"\nprint(result)\n'
- }
+ source:
+ 'import random\nresult = "heads" if random.randint(0,1) == 0 else "tails"\nprint(result)\n',
+ },
},
{
name: 'heads',
@@ -82,30 +81,25 @@ export default {
container: {
name: '',
image: 'alpine:3.6',
- command: [
- 'sh',
- '-c'
- ],
- args: [
- 'echo "it was heads"'
- ],
- resources: {}
- }
- }
+ command: ['sh', '-c'],
+ args: ['echo "it was heads"'],
+ resources: {},
+ },
+ },
],
entrypoint: 'coinflip',
arguments: {
parameters: [
{
name: 'x',
- value: 10
+ value: 10,
},
{
name: 'y',
- value: 20
- }
- ]
- }
+ value: 20,
+ },
+ ],
+ },
},
status: {
phase: 'Succeeded',
@@ -121,78 +115,74 @@ export default {
phase: 'Succeeded',
startedAt: '2018-04-17T20:58:23Z',
finishedAt: '2018-04-17T20:58:38Z',
- children: [
- 'coinflip-recursive-q7dqb-1787723858',
- 'coinflip-recursive-q7dqb-1720466287'
- ],
- outboundNodes: [
- 'coinflip-recursive-q7dqb-3721646052'
- ]
+ children: ['coinflip-recursive-q7dqb-1787723858', 'coinflip-recursive-q7dqb-1720466287'],
+ outboundNodes: ['coinflip-recursive-q7dqb-3721646052'],
},
'coinflip-recursive-q7dqb-1720466287': {
id: 'coinflip-recursive-q7dqb-1720466287',
name: 'coinflip-recursive-q7dqb[1]',
displayName: '[1]',
outputs: {
- artifacts: [{
- name: 'mlpipeline-ui-metadata',
- s3: {
- bucket: 'somebucket',
- key: 'staging',
+ artifacts: [
+ {
+ name: 'mlpipeline-ui-metadata',
+ s3: {
+ bucket: 'somebucket',
+ key: 'staging',
+ },
},
- }],
+ ],
},
type: 'StepGroup',
phase: 'Succeeded',
boundaryID: 'coinflip-recursive-q7dqb',
startedAt: '2018-04-17T20:58:28Z',
finishedAt: '2018-04-17T20:58:38Z',
- children: [
- 'coinflip-recursive-q7dqb-4011569486',
- 'coinflip-recursive-q7dqb-3266226990'
- ]
+ children: ['coinflip-recursive-q7dqb-4011569486', 'coinflip-recursive-q7dqb-3266226990'],
},
'coinflip-recursive-q7dqb-1787723858': {
id: 'coinflip-recursive-q7dqb-1787723858',
name: 'coinflip-recursive-q7dqb[0]',
displayName: '[0]',
outputs: {
- artifacts: [{
- name: 'mlpipeline-ui-metadata',
- s3: {
- bucket: 'somebucket',
- key: 'analysis2',
+ artifacts: [
+ {
+ name: 'mlpipeline-ui-metadata',
+ s3: {
+ bucket: 'somebucket',
+ key: 'analysis2',
+ },
},
- }],
+ ],
},
type: 'StepGroup',
phase: 'Succeeded',
boundaryID: 'coinflip-recursive-q7dqb',
startedAt: '2018-04-17T20:58:23Z',
finishedAt: '2018-04-17T20:58:28Z',
- children: [
- 'coinflip-recursive-q7dqb-311338607'
- ]
+ children: ['coinflip-recursive-q7dqb-311338607'],
},
'coinflip-recursive-q7dqb-2934726852': {
id: 'coinflip-recursive-q7dqb-2934726852',
name: 'coinflip-recursive-q7dqb[1].tails[1].tails',
displayName: 'tails',
outputs: {
- artifacts: [{
- name: 'mlpipeline-ui-metadata',
- s3: {
- bucket: 'somebucket',
- key: 'transform',
+ artifacts: [
+ {
+ name: 'mlpipeline-ui-metadata',
+ s3: {
+ bucket: 'somebucket',
+ key: 'transform',
+ },
},
- }],
+ ],
},
type: 'Skipped',
phase: 'Skipped',
boundaryID: 'coinflip-recursive-q7dqb-3266226990',
- message: 'when \'heads == tails\' evaluated false',
+ message: "when 'heads == tails' evaluated false",
startedAt: '2018-04-17T20:58:34Z',
- finishedAt: '2018-04-17T20:58:34Z'
+ finishedAt: '2018-04-17T20:58:34Z',
},
'coinflip-recursive-q7dqb-311338607': {
id: 'coinflip-recursive-q7dqb-311338607',
@@ -205,23 +195,23 @@ export default {
startedAt: '2018-04-17T20:58:23Z',
finishedAt: '2018-04-17T20:58:28Z',
outputs: {
- artifacts: [{
- name: 'mlpipeline-ui-metadata',
- s3: {
- bucket: 'somebucket',
- key: 'model2',
+ artifacts: [
+ {
+ name: 'mlpipeline-ui-metadata',
+ s3: {
+ bucket: 'somebucket',
+ key: 'model2',
+ },
},
- }],
+ ],
parameters: [
{
name: 'result',
- value: 'tails'
- }
- ]
+ value: 'tails',
+ },
+ ],
},
- children: [
- 'coinflip-recursive-q7dqb-1720466287'
- ]
+ children: ['coinflip-recursive-q7dqb-1720466287'],
},
'coinflip-recursive-q7dqb-3266226990': {
id: 'coinflip-recursive-q7dqb-3266226990',
@@ -233,13 +223,8 @@ export default {
boundaryID: 'coinflip-recursive-q7dqb',
startedAt: '2018-04-17T20:58:28Z',
finishedAt: '2018-04-17T20:58:38Z',
- children: [
- 'coinflip-recursive-q7dqb-4010083248',
- 'coinflip-recursive-q7dqb-855846949'
- ],
- outboundNodes: [
- 'coinflip-recursive-q7dqb-3721646052'
- ]
+ children: ['coinflip-recursive-q7dqb-4010083248', 'coinflip-recursive-q7dqb-855846949'],
+ outboundNodes: ['coinflip-recursive-q7dqb-3721646052'],
},
'coinflip-recursive-q7dqb-3466727817': {
id: 'coinflip-recursive-q7dqb-3466727817',
@@ -255,13 +240,11 @@ export default {
parameters: [
{
name: 'result',
- value: 'heads'
- }
- ]
+ value: 'heads',
+ },
+ ],
},
- children: [
- 'coinflip-recursive-q7dqb-855846949'
- ]
+ children: ['coinflip-recursive-q7dqb-855846949'],
},
'coinflip-recursive-q7dqb-3721646052': {
id: 'coinflip-recursive-q7dqb-3721646052',
@@ -272,7 +255,7 @@ export default {
phase: 'Succeeded',
boundaryID: 'coinflip-recursive-q7dqb-3266226990',
startedAt: '2018-04-17T20:58:34Z',
- finishedAt: '2018-04-17T20:58:37Z'
+ finishedAt: '2018-04-17T20:58:37Z',
},
'coinflip-recursive-q7dqb-4010083248': {
id: 'coinflip-recursive-q7dqb-4010083248',
@@ -283,9 +266,7 @@ export default {
boundaryID: 'coinflip-recursive-q7dqb-3266226990',
startedAt: '2018-04-17T20:58:28Z',
finishedAt: '2018-04-17T20:58:34Z',
- children: [
- 'coinflip-recursive-q7dqb-3466727817'
- ]
+ children: ['coinflip-recursive-q7dqb-3466727817'],
},
'coinflip-recursive-q7dqb-4011569486': {
id: 'coinflip-recursive-q7dqb-4011569486',
@@ -294,9 +275,9 @@ export default {
type: 'Skipped',
phase: 'Skipped',
boundaryID: 'coinflip-recursive-q7dqb',
- message: 'when \'tails == heads\' evaluated false',
+ message: "when 'tails == heads' evaluated false",
startedAt: '2018-04-17T20:58:28Z',
- finishedAt: '2018-04-17T20:58:28Z'
+ finishedAt: '2018-04-17T20:58:28Z',
},
'coinflip-recursive-q7dqb-855846949': {
id: 'coinflip-recursive-q7dqb-855846949',
@@ -307,11 +288,8 @@ export default {
boundaryID: 'coinflip-recursive-q7dqb-3266226990',
startedAt: '2018-04-17T20:58:34Z',
finishedAt: '2018-04-17T20:58:38Z',
- children: [
- 'coinflip-recursive-q7dqb-3721646052',
- 'coinflip-recursive-q7dqb-2934726852'
- ]
- }
- }
- }
+ children: ['coinflip-recursive-q7dqb-3721646052', 'coinflip-recursive-q7dqb-2934726852'],
+ },
+ },
+ },
};
diff --git a/frontend/mock-backend/mock-error-runtime.ts b/frontend/mock-backend/mock-error-runtime.ts
index bc46971dbfb..05c93e03f0e 100644
--- a/frontend/mock-backend/mock-error-runtime.ts
+++ b/frontend/mock-backend/mock-error-runtime.ts
@@ -18,14 +18,15 @@ export default {
name: 'coinflip-error-nklng2',
namespace: 'default',
// tslint:disable-next-line:max-line-length
- selfLink: '/apis/argoproj.io/v1alpha1/namespaces/default/workflows/coinflip-heads-c085010d-771a-4cdf-979c-257e991501b5',
+ selfLink:
+ '/apis/argoproj.io/v1alpha1/namespaces/default/workflows/coinflip-heads-c085010d-771a-4cdf-979c-257e991501b5',
uid: '47a3d09c-7db4-4788-ac55-3f8d908574aa',
resourceVersion: '10527150',
creationTimestamp: '2018-06-11T22:49:26Z',
labels: {
'workflows.argoproj.io/completed': 'true',
- 'workflows.argoproj.io/phase': 'Failed'
- }
+ 'workflows.argoproj.io/phase': 'Failed',
+ },
},
spec: {
templates: [
@@ -40,10 +41,10 @@ export default {
name: 'heads',
template: 'heads',
arguments: {},
- when: '{{steps.flip-coin.outputs.result}} == heads'
- }
- ]
- ]
+ when: '{{steps.flip-coin.outputs.result}} == heads',
+ },
+ ],
+ ],
},
{
name: 'heads',
@@ -53,25 +54,21 @@ export default {
container: {
name: '',
image: 'alpine:3.6',
- command: [
- 'sh',
- '-c'
- ],
- args: [
- 'echo "it was heads"'
- ],
- resources: {}
- }
- }
+ command: ['sh', '-c'],
+ args: ['echo "it was heads"'],
+ resources: {},
+ },
+ },
],
entrypoint: 'coinflip',
- arguments: {}
+ arguments: {},
},
status: {
phase: 'Failed',
startedAt: '2018-06-11T22:49:26Z',
finishedAt: '2018-06-11T22:49:26Z',
// tslint:disable-next-line:max-line-length
- message: 'invalid spec: templates.coinflip.steps[0].heads failed to resolve {{steps.flip-coin.outputs.result}}'
- }
+ message:
+ 'invalid spec: templates.coinflip.steps[0].heads failed to resolve {{steps.flip-coin.outputs.result}}',
+ },
};
diff --git a/frontend/mock-backend/mock-xgboost-runtime.ts b/frontend/mock-backend/mock-xgboost-runtime.ts
index 928dac43c02..a25784eadc5 100644
--- a/frontend/mock-backend/mock-xgboost-runtime.ts
+++ b/frontend/mock-backend/mock-xgboost-runtime.ts
@@ -24,8 +24,8 @@ export default {
creationTimestamp: '2018-04-16T23:37:48Z',
labels: {
'workflows.argoproj.io/completed': 'true',
- 'workflows.argoproj.io/phase': 'Succeeded'
- }
+ 'workflows.argoproj.io/phase': 'Succeeded',
+ },
},
spec: {
templates: [
@@ -34,15 +34,15 @@ export default {
inputs: {
parameters: [
{
- name: 'project'
+ name: 'project',
},
{
- name: 'region'
+ name: 'region',
},
{
- name: 'cluster'
- }
- ]
+ name: 'cluster',
+ },
+ ],
},
outputs: {},
metadata: {},
@@ -55,63 +55,63 @@ export default {
parameters: [
{
name: 'project',
- value: '{{inputs.parameters.project}}'
+ value: '{{inputs.parameters.project}}',
},
{
name: 'region',
- value: '{{inputs.parameters.region}}'
+ value: '{{inputs.parameters.region}}',
},
{
name: 'name',
- value: '{{inputs.parameters.cluster}}'
- }
- ]
- }
- }
- ]
- ]
+ value: '{{inputs.parameters.cluster}}',
+ },
+ ],
+ },
+ },
+ ],
+ ],
},
{
name: 'xgboost-training',
inputs: {
parameters: [
{
- name: 'project'
+ name: 'project',
},
{
- name: 'region'
+ name: 'region',
},
{
- name: 'cluster'
+ name: 'cluster',
},
{
- name: 'output'
+ name: 'output',
},
{
- name: 'train'
+ name: 'train',
},
{
- name: 'eval'
+ name: 'eval',
},
{
- name: 'schema'
+ name: 'schema',
},
{
- name: 'target'
+ name: 'target',
},
{
- name: 'package'
+ name: 'package',
},
{
- name: 'workers'
+ name: 'workers',
},
{
- name: 'rounds'
+ name: 'rounds',
},
{
- name: 'conf'
- }
- ]
+ name: 'conf',
+ },
+ ],
},
outputs: {},
metadata: {},
@@ -124,23 +124,23 @@ export default {
parameters: [
{
name: 'project',
- value: '{{inputs.parameters.project}}'
+ value: '{{inputs.parameters.project}}',
},
{
name: 'region',
- value: '{{inputs.parameters.region}}'
+ value: '{{inputs.parameters.region}}',
},
{
name: 'name',
- value: '{{inputs.parameters.cluster}}'
+ value: '{{inputs.parameters.cluster}}',
},
{
name: 'staging',
- value: '{{inputs.parameters.output}}/{{workflow.name}}/staging'
- }
- ]
- }
- }
+ value: '{{inputs.parameters.output}}/{{workflow.name}}/staging',
+ },
+ ],
+ },
+ },
],
[
{
@@ -150,31 +150,31 @@ export default {
parameters: [
{
name: 'project',
- value: '{{inputs.parameters.project}}'
+ value: '{{inputs.parameters.project}}',
},
{
name: 'region',
- value: '{{inputs.parameters.region}}'
+ value: '{{inputs.parameters.region}}',
},
{
name: 'cluster',
- value: '{{inputs.parameters.cluster}}'
+ value: '{{inputs.parameters.cluster}}',
},
{
name: 'output',
- value: '{{inputs.parameters.output}}/{{workflow.name}}/analysis'
+ value: '{{inputs.parameters.output}}/{{workflow.name}}/analysis',
},
{
name: 'train',
- value: '{{inputs.parameters.train}}'
+ value: '{{inputs.parameters.train}}',
},
{
name: 'schema',
- value: '{{inputs.parameters.schema}}'
- }
- ]
- }
- }
+ value: '{{inputs.parameters.schema}}',
+ },
+ ],
+ },
+ },
],
[
{
@@ -184,39 +184,39 @@ export default {
parameters: [
{
name: 'project',
- value: '{{inputs.parameters.project}}'
+ value: '{{inputs.parameters.project}}',
},
{
name: 'region',
- value: '{{inputs.parameters.region}}'
+ value: '{{inputs.parameters.region}}',
},
{
name: 'cluster',
- value: '{{inputs.parameters.cluster}}'
+ value: '{{inputs.parameters.cluster}}',
},
{
name: 'output',
- value: '{{inputs.parameters.output}}/{{workflow.name}}/transform'
+ value: '{{inputs.parameters.output}}/{{workflow.name}}/transform',
},
{
name: 'train',
- value: '{{inputs.parameters.train}}'
+ value: '{{inputs.parameters.train}}',
},
{
name: 'eval',
- value: '{{inputs.parameters.eval}}'
+ value: '{{inputs.parameters.eval}}',
},
{
name: 'target',
- value: '{{inputs.parameters.target}}'
+ value: '{{inputs.parameters.target}}',
},
{
name: 'analysis',
- value: '{{inputs.parameters.output}}/{{workflow.name}}/analysis'
- }
- ]
- }
- }
+ value: '{{inputs.parameters.output}}/{{workflow.name}}/analysis',
+ },
+ ],
+ },
+ },
],
[
{
@@ -226,55 +226,55 @@ export default {
parameters: [
{
name: 'project',
- value: '{{inputs.parameters.project}}'
+ value: '{{inputs.parameters.project}}',
},
{
name: 'region',
- value: '{{inputs.parameters.region}}'
+ value: '{{inputs.parameters.region}}',
},
{
name: 'cluster',
- value: '{{inputs.parameters.cluster}}'
+ value: '{{inputs.parameters.cluster}}',
},
{
name: 'output',
- value: '{{inputs.parameters.output}}/{{workflow.name}}/model'
+ value: '{{inputs.parameters.output}}/{{workflow.name}}/model',
},
{
name: 'train',
- value: '{{inputs.parameters.output}}/{{workflow.name}}/transform/train/part-*'
+ value: '{{inputs.parameters.output}}/{{workflow.name}}/transform/train/part-*',
},
{
name: 'eval',
- value: '{{inputs.parameters.output}}/{{workflow.name}}/transform/eval/part-*'
+ value: '{{inputs.parameters.output}}/{{workflow.name}}/transform/eval/part-*',
},
{
name: 'target',
- value: '{{inputs.parameters.target}}'
+ value: '{{inputs.parameters.target}}',
},
{
name: 'analysis',
- value: '{{inputs.parameters.output}}/{{workflow.name}}/analysis'
+ value: '{{inputs.parameters.output}}/{{workflow.name}}/analysis',
},
{
name: 'package',
- value: '{{inputs.parameters.package}}'
+ value: '{{inputs.parameters.package}}',
},
{
name: 'workers',
- value: '{{inputs.parameters.workers}}'
+ value: '{{inputs.parameters.workers}}',
},
{
name: 'rounds',
- value: '{{inputs.parameters.rounds}}'
+ value: '{{inputs.parameters.rounds}}',
},
{
name: 'conf',
- value: '{{inputs.parameters.conf}}'
- }
- ]
- }
- }
+ value: '{{inputs.parameters.conf}}',
+ },
+ ],
+ },
+ },
],
[
{
@@ -284,43 +284,43 @@ export default {
parameters: [
{
name: 'project',
- value: '{{inputs.parameters.project}}'
+ value: '{{inputs.parameters.project}}',
},
{
name: 'region',
- value: '{{inputs.parameters.region}}'
+ value: '{{inputs.parameters.region}}',
},
{
name: 'cluster',
- value: '{{inputs.parameters.cluster}}'
+ value: '{{inputs.parameters.cluster}}',
},
{
name: 'output',
- value: '{{inputs.parameters.output}}/{{workflow.name}}/batchpredict'
+ value: '{{inputs.parameters.output}}/{{workflow.name}}/batchpredict',
},
{
name: 'eval',
- value: '{{inputs.parameters.output}}/{{workflow.name}}/transform/eval/part-*'
+ value: '{{inputs.parameters.output}}/{{workflow.name}}/transform/eval/part-*',
},
{
name: 'target',
- value: '{{inputs.parameters.target}}'
+ value: '{{inputs.parameters.target}}',
},
{
name: 'analysis',
- value: '{{inputs.parameters.output}}/{{workflow.name}}/analysis'
+ value: '{{inputs.parameters.output}}/{{workflow.name}}/analysis',
},
{
name: 'package',
- value: '{{inputs.parameters.package}}'
+ value: '{{inputs.parameters.package}}',
},
{
name: 'model',
- value: '{{inputs.parameters.output}}/{{workflow.name}}/model'
- }
- ]
- }
- }
+ value: '{{inputs.parameters.output}}/{{workflow.name}}/model',
+ },
+ ],
+ },
+ },
],
[
{
@@ -330,383 +330,363 @@ export default {
parameters: [
{
name: 'output',
- value: '{{inputs.parameters.output}}/{{workflow.name}}/confusionmatrix'
+ value: '{{inputs.parameters.output}}/{{workflow.name}}/confusionmatrix',
},
{
name: 'predictions',
- value: '{{inputs.parameters.output}}/{{workflow.name}}/batchpredict/part-*.csv'
+ value: '{{inputs.parameters.output}}/{{workflow.name}}/batchpredict/part-*.csv',
},
{
name: 'analysis',
- value: '{{inputs.parameters.output}}/{{workflow.name}}/analysis'
+ value: '{{inputs.parameters.output}}/{{workflow.name}}/analysis',
},
{
name: 'target',
- value: '{{inputs.parameters.target}}'
- }
- ]
- }
- }
- ]
- ]
+ value: '{{inputs.parameters.target}}',
+ },
+ ],
+ },
+ },
+ ],
+ ],
},
{
name: 'createcluster',
inputs: {
parameters: [
{
- name: 'project'
+ name: 'project',
},
{
- name: 'region'
+ name: 'region',
},
{
- name: 'name'
+ name: 'name',
},
{
- name: 'staging'
- }
- ]
+ name: 'staging',
+ },
+ ],
},
outputs: {},
metadata: {},
container: {
name: '',
image: 'gcr.io/ml-pipeline/ml-pipeline-dataproc-xgboost',
- command: [
- 'sh',
- '-c'
- ],
+ command: ['sh', '-c'],
args: [
'python /ml/create_cluster.py --project {{inputs.parameters.project}} ' +
- '--region {{inputs.parameters.region}} --name {{inputs.parameters.name}} ' +
- '--staging {{inputs.parameters.staging}}'
+ '--region {{inputs.parameters.region}} --name {{inputs.parameters.name}} ' +
+ '--staging {{inputs.parameters.staging}}',
],
- resources: {}
- }
+ resources: {},
+ },
},
{
name: 'analyze',
inputs: {
parameters: [
{
- name: 'project'
+ name: 'project',
},
{
- name: 'region'
+ name: 'region',
},
{
- name: 'cluster'
+ name: 'cluster',
},
{
- name: 'output'
+ name: 'output',
},
{
- name: 'train'
+ name: 'train',
},
{
- name: 'schema'
- }
- ]
+ name: 'schema',
+ },
+ ],
},
outputs: {},
metadata: {},
container: {
name: '',
image: 'gcr.io/ml-pipeline/ml-pipeline-dataproc-xgboost',
- command: [
- 'sh',
- '-c'
- ],
+ command: ['sh', '-c'],
args: [
'python /ml/analyze.py --project {{inputs.parameters.project}} ' +
- '--region {{inputs.parameters.region}} --cluster ' +
- '{{inputs.parameters.cluster}} --output ' +
- '{{inputs.parameters.output}} --train {{inputs.parameters.train}} ' +
- '--schema {{inputs.parameters.schema}}'
+ '--region {{inputs.parameters.region}} --cluster ' +
+ '{{inputs.parameters.cluster}} --output ' +
+ '{{inputs.parameters.output}} --train {{inputs.parameters.train}} ' +
+ '--schema {{inputs.parameters.schema}}',
],
- resources: {}
- }
+ resources: {},
+ },
},
{
name: 'transform',
inputs: {
parameters: [
{
- name: 'project'
+ name: 'project',
},
{
- name: 'region'
+ name: 'region',
},
{
- name: 'cluster'
+ name: 'cluster',
},
{
- name: 'output'
+ name: 'output',
},
{
- name: 'train'
+ name: 'train',
},
{
- name: 'eval'
+ name: 'eval',
},
{
- name: 'target'
+ name: 'target',
},
{
- name: 'analysis'
- }
- ]
+ name: 'analysis',
+ },
+ ],
},
outputs: {},
metadata: {},
container: {
name: '',
image: 'gcr.io/ml-pipeline/ml-pipeline-dataproc-xgboost',
- command: [
- 'sh',
- '-c'
- ],
+ command: ['sh', '-c'],
args: [
'python /ml/transform.py --project {{inputs.parameters.project}} ' +
- '--region {{inputs.parameters.region}} --cluster ' +
- '{{inputs.parameters.cluster}} --output ' +
- '{{inputs.parameters.output}} --train {{inputs.parameters.train}} ' +
- '--eval {{inputs.parameters.eval}} --target ' +
- '{{inputs.parameters.target}} --analysis ' +
- '{{inputs.parameters.analysis}}'
+ '--region {{inputs.parameters.region}} --cluster ' +
+ '{{inputs.parameters.cluster}} --output ' +
+ '{{inputs.parameters.output}} --train {{inputs.parameters.train}} ' +
+ '--eval {{inputs.parameters.eval}} --target ' +
+ '{{inputs.parameters.target}} --analysis ' +
+ '{{inputs.parameters.analysis}}',
],
- resources: {}
- }
+ resources: {},
+ },
},
{
name: 'train',
inputs: {
parameters: [
{
- name: 'project'
+ name: 'project',
},
{
- name: 'region'
+ name: 'region',
},
{
- name: 'cluster'
+ name: 'cluster',
},
{
- name: 'output'
+ name: 'output',
},
{
- name: 'train'
+ name: 'train',
},
{
- name: 'eval'
+ name: 'eval',
},
{
- name: 'target'
+ name: 'target',
},
{
- name: 'analysis'
+ name: 'analysis',
},
{
- name: 'package'
+ name: 'package',
},
{
- name: 'workers'
+ name: 'workers',
},
{
- name: 'rounds'
+ name: 'rounds',
},
{
- name: 'conf'
- }
- ]
+ name: 'conf',
+ },
+ ],
},
outputs: {},
metadata: {},
container: {
name: '',
image: 'gcr.io/ml-pipeline/ml-pipeline-dataproc-xgboost',
- command: [
- 'sh',
- '-c'
- ],
+ command: ['sh', '-c'],
args: [
// tslint:disable-next-line:max-line-length
- 'python /ml/train.py --project {{inputs.parameters.project}} --region {{inputs.parameters.region}} --cluster {{inputs.parameters.cluster}} --output {{inputs.parameters.output}} --train {{inputs.parameters.train}} --eval {{inputs.parameters.eval}} --target {{inputs.parameters.target}} --analysis {{inputs.parameters.analysis}} --package {{inputs.parameters.package}} --workers {{inputs.parameters.workers}} --rounds {{inputs.parameters.rounds}} --conf {{inputs.parameters.conf}}'
+ 'python /ml/train.py --project {{inputs.parameters.project}} --region {{inputs.parameters.region}} --cluster {{inputs.parameters.cluster}} --output {{inputs.parameters.output}} --train {{inputs.parameters.train}} --eval {{inputs.parameters.eval}} --target {{inputs.parameters.target}} --analysis {{inputs.parameters.analysis}} --package {{inputs.parameters.package}} --workers {{inputs.parameters.workers}} --rounds {{inputs.parameters.rounds}} --conf {{inputs.parameters.conf}}',
],
- resources: {}
- }
+ resources: {},
+ },
},
{
name: 'batchpredict',
inputs: {
parameters: [
{
- name: 'project'
+ name: 'project',
},
{
- name: 'region'
+ name: 'region',
},
{
- name: 'cluster'
+ name: 'cluster',
},
{
- name: 'output'
+ name: 'output',
},
{
- name: 'eval'
+ name: 'eval',
},
{
- name: 'model'
+ name: 'model',
},
{
- name: 'target'
+ name: 'target',
},
{
- name: 'package'
+ name: 'package',
},
{
- name: 'analysis'
- }
- ]
+ name: 'analysis',
+ },
+ ],
},
outputs: {},
metadata: {},
container: {
name: '',
image: 'gcr.io/ml-pipeline/ml-pipeline-dataproc-xgboost',
- command: [
- 'sh',
- '-c'
- ],
+ command: ['sh', '-c'],
args: [
// tslint:disable-next-line:max-line-length
- 'python /ml/predict.py --project {{inputs.parameters.project}} --region {{inputs.parameters.region}} --cluster {{inputs.parameters.cluster}} --output {{inputs.parameters.output}} --predict {{inputs.parameters.eval}} --analysis {{inputs.parameters.analysis}} --target {{inputs.parameters.target}} --model {{inputs.parameters.model}} --package {{inputs.parameters.package}} '
+ 'python /ml/predict.py --project {{inputs.parameters.project}} --region {{inputs.parameters.region}} --cluster {{inputs.parameters.cluster}} --output {{inputs.parameters.output}} --predict {{inputs.parameters.eval}} --analysis {{inputs.parameters.analysis}} --target {{inputs.parameters.target}} --model {{inputs.parameters.model}} --package {{inputs.parameters.package}} ',
],
- resources: {}
- }
+ resources: {},
+ },
},
{
name: 'confusionmatrix',
inputs: {
parameters: [
{
- name: 'output'
+ name: 'output',
},
{
- name: 'analysis'
+ name: 'analysis',
},
{
- name: 'predictions'
+ name: 'predictions',
},
{
- name: 'target'
- }
- ]
+ name: 'target',
+ },
+ ],
},
outputs: {},
metadata: {},
container: {
name: '',
image: 'gcr.io/ml-pipeline/ml-pipeline-local',
- command: [
- 'sh',
- '-c'
- ],
+ command: ['sh', '-c'],
args: [
// tslint:disable-next-line:max-line-length
- 'python /ml/confusion_matrix.py --output {{inputs.parameters.output}} --predictions {{inputs.parameters.predictions}} --analysis {{inputs.parameters.analysis}} --target {{inputs.parameters.target}}'
+ 'python /ml/confusion_matrix.py --output {{inputs.parameters.output}} --predictions {{inputs.parameters.predictions}} --analysis {{inputs.parameters.analysis}} --target {{inputs.parameters.target}}',
],
- resources: {}
- }
+ resources: {},
+ },
},
{
name: 'deletecluster',
inputs: {
parameters: [
{
- name: 'project'
+ name: 'project',
},
{
- name: 'region'
+ name: 'region',
},
{
- name: 'name'
- }
- ]
+ name: 'name',
+ },
+ ],
},
outputs: {},
metadata: {},
container: {
name: '',
image: 'gcr.io/ml-pipeline/ml-pipeline-dataproc-xgboost',
- command: [
- 'sh',
- '-c'
- ],
+ command: ['sh', '-c'],
args: [
// tslint:disable-next-line:max-line-length
- 'python /ml/delete_cluster.py --project {{inputs.parameters.project}} --region {{inputs.parameters.region}} --name {{inputs.parameters.name}}'
+ 'python /ml/delete_cluster.py --project {{inputs.parameters.project}} --region {{inputs.parameters.region}} --name {{inputs.parameters.name}}',
],
- resources: {}
- }
- }
+ resources: {},
+ },
+ },
],
entrypoint: 'xgboost-training',
arguments: {
parameters: [
{
name: 'project',
- value: 'ml-pipeline'
+ value: 'ml-pipeline',
},
{
name: 'region',
- value: 'us-central1'
+ value: 'us-central1',
},
{
name: 'cluster',
- value: 'xgboost-spark-{{workflow.name}}'
+ value: 'xgboost-spark-{{workflow.name}}',
},
{
name: 'output',
- value: 'gs://sample-xgbbost-cm-output'
+ value: 'gs://sample-xgbbost-cm-output',
},
{
name: 'train',
- value: 'gs://ml-pipeline-playground/newsgroup/train.csv'
+ value: 'gs://ml-pipeline-playground/newsgroup/train.csv',
},
{
name: 'eval',
- value: 'gs://ml-pipeline-playground/newsgroup/eval.csv'
+ value: 'gs://ml-pipeline-playground/newsgroup/eval.csv',
},
{
name: 'schema',
- value: 'gs://ml-pipeline-playground/newsgroup/schema.json'
+ value: 'gs://ml-pipeline-playground/newsgroup/schema.json',
},
{
name: 'target',
- value: 'news_label'
+ value: 'news_label',
},
{
name: 'package',
- // tslint:disable-next-line:max-line-length
- value: 'gs://ml-pipeline-playground/xgboost4j-example-0.8-SNAPSHOT-jar-with-dependencies.jar'
+ // tslint:disable-next-line:max-line-length
+ value:
+ 'gs://ml-pipeline-playground/xgboost4j-example-0.8-SNAPSHOT-jar-with-dependencies.jar',
},
{
name: 'workers',
- value: '2'
+ value: '2',
},
{
name: 'rounds',
- value: '200'
+ value: '200',
},
{
name: 'conf',
- value: 'gs://ml-pipeline-playground/trainconfcla.json '
- }
- ]
+ value: 'gs://ml-pipeline-playground/trainconfcla.json ',
+ },
+ ],
},
- onExit: 'exit-handler'
+ onExit: 'exit-handler',
},
status: {
phase: 'Succeeded',
@@ -726,54 +706,55 @@ export default {
parameters: [
{
name: 'project',
- value: 'ml-pipeline'
+ value: 'ml-pipeline',
},
{
name: 'region',
- value: 'us-central1'
+ value: 'us-central1',
},
{
name: 'cluster',
- value: 'xgboost-spark-xgboost-training-gzkm9'
+ value: 'xgboost-spark-xgboost-training-gzkm9',
},
{
name: 'output',
- value: 'gs://sample-xgbbost-cm-output'
+ value: 'gs://sample-xgbbost-cm-output',
},
{
name: 'train',
- value: 'gs://ml-pipeline-playground/newsgroup/train.csv'
+ value: 'gs://ml-pipeline-playground/newsgroup/train.csv',
},
{
name: 'eval',
- value: 'gs://ml-pipeline-playground/newsgroup/eval.csv'
+ value: 'gs://ml-pipeline-playground/newsgroup/eval.csv',
},
{
name: 'schema',
- value: 'gs://ml-pipeline-playground/newsgroup/schema.json'
+ value: 'gs://ml-pipeline-playground/newsgroup/schema.json',
},
{
name: 'target',
- value: 'news_label'
+ value: 'news_label',
},
{
name: 'package',
- // tslint:disable-next-line:max-line-length
- value: 'gs://ml-pipeline-playground/xgboost4j-example-0.8-SNAPSHOT-jar-with-dependencies.jar'
+ // tslint:disable-next-line:max-line-length
+ value:
+ 'gs://ml-pipeline-playground/xgboost4j-example-0.8-SNAPSHOT-jar-with-dependencies.jar',
},
{
name: 'workers',
- value: '2'
+ value: '2',
},
{
name: 'rounds',
- value: '200'
+ value: '200',
},
{
name: 'conf',
- value: 'gs://ml-pipeline-playground/trainconfcla.json '
- }
- ]
+ value: 'gs://ml-pipeline-playground/trainconfcla.json ',
+ },
+ ],
},
children: [
'xgboost-training-gzkm9-4204210601',
@@ -781,11 +762,9 @@ export default {
'xgboost-training-gzkm9-915503087',
'xgboost-training-gzkm9-982760658',
'xgboost-training-gzkm9-4204798981',
- 'xgboost-training-gzkm9-916635920'
+ 'xgboost-training-gzkm9-916635920',
],
- outboundNodes: [
- 'xgboost-training-gzkm9-2203328319'
- ]
+ outboundNodes: ['xgboost-training-gzkm9-2203328319'],
},
'xgboost-training-gzkm9-1253553084': {
id: 'xgboost-training-gzkm9-1253553084',
@@ -794,31 +773,28 @@ export default {
type: 'Steps',
templateName: 'exit-handler',
phase: 'Pending',
- message: 'ImagePullBackOff: Back-off pulling image "gcr.io/ml-pipeline/ml-pipeline-dataproc-create-cluster"',
+ message:
+ 'ImagePullBackOff: Back-off pulling image "gcr.io/ml-pipeline/ml-pipeline-dataproc-create-cluster"',
startedAt: '2018-04-17T00:10:06Z',
finishedAt: '2018-04-17T00:12:01Z',
inputs: {
parameters: [
{
name: 'project',
- value: 'ml-pipeline'
+ value: 'ml-pipeline',
},
{
name: 'region',
- value: 'us-central1'
+ value: 'us-central1',
},
{
name: 'cluster',
- value: 'xgboost-spark-xgboost-training-gzkm9'
- }
- ]
+ value: 'xgboost-spark-xgboost-training-gzkm9',
+ },
+ ],
},
- children: [
- 'xgboost-training-gzkm9-3439262870'
- ],
- outboundNodes: [
- 'xgboost-training-gzkm9-3721733163'
- ]
+ children: ['xgboost-training-gzkm9-3439262870'],
+ outboundNodes: ['xgboost-training-gzkm9-3721733163'],
},
'xgboost-training-gzkm9-1761585008': {
id: 'xgboost-training-gzkm9-1761585008',
@@ -834,46 +810,45 @@ export default {
parameters: [
{
name: 'project',
- value: 'ml-pipeline'
+ value: 'ml-pipeline',
},
{
name: 'region',
- value: 'us-central1'
+ value: 'us-central1',
},
{
name: 'cluster',
- value: 'xgboost-spark-xgboost-training-gzkm9'
+ value: 'xgboost-spark-xgboost-training-gzkm9',
},
{
name: 'output',
- value: 'gs://sample-xgbbost-cm-output/xgboost-training-gzkm9/batchpredict'
+ value: 'gs://sample-xgbbost-cm-output/xgboost-training-gzkm9/batchpredict',
},
{
name: 'eval',
- value: 'gs://sample-xgbbost-cm-output/xgboost-training-gzkm9/transform/eval/part-*'
+ value: 'gs://sample-xgbbost-cm-output/xgboost-training-gzkm9/transform/eval/part-*',
},
{
name: 'model',
- value: 'gs://sample-xgbbost-cm-output/xgboost-training-gzkm9/model'
+ value: 'gs://sample-xgbbost-cm-output/xgboost-training-gzkm9/model',
},
{
name: 'target',
- value: 'news_label'
+ value: 'news_label',
},
{
name: 'package',
- // tslint:disable-next-line:max-line-length
- value: 'gs://ml-pipeline-playground/xgboost4j-example-0.8-SNAPSHOT-jar-with-dependencies.jar'
+ // tslint:disable-next-line:max-line-length
+ value:
+ 'gs://ml-pipeline-playground/xgboost4j-example-0.8-SNAPSHOT-jar-with-dependencies.jar',
},
{
name: 'analysis',
- value: 'gs://sample-xgbbost-cm-output/xgboost-training-gzkm9/analysis'
- }
- ]
+ value: 'gs://sample-xgbbost-cm-output/xgboost-training-gzkm9/analysis',
+ },
+ ],
},
- children: [
- 'xgboost-training-gzkm9-916635920'
- ]
+ children: ['xgboost-training-gzkm9-916635920'],
},
'xgboost-training-gzkm9-2203328319': {
id: 'xgboost-training-gzkm9-2203328319',
@@ -890,26 +865,28 @@ export default {
parameters: [
{
name: 'analysis',
- value: 'gs://sample-xgbbost-cm-output/xgboost-training-gzkm9/analysis'
+ value: 'gs://sample-xgbbost-cm-output/xgboost-training-gzkm9/analysis',
},
{
name: 'predictions',
- value: 'gs://sample-xgbbost-cm-output/xgboost-training-gzkm9/batchpredict/part-*.csv'
+ value: 'gs://sample-xgbbost-cm-output/xgboost-training-gzkm9/batchpredict/part-*.csv',
},
{
name: 'target',
- value: 'news_label'
- }
- ]
+ value: 'news_label',
+ },
+ ],
},
outputs: {
- artifacts: [{
- name: 'mlpipeline-ui-metadata',
- s3: {
- bucket: 'somebucket',
- key: 'confusionmatrix',
+ artifacts: [
+ {
+ name: 'mlpipeline-ui-metadata',
+ s3: {
+ bucket: 'somebucket',
+ key: 'confusionmatrix',
+ },
},
- }],
+ ],
},
},
'xgboost-training-gzkm9-2365787662': {
@@ -926,63 +903,64 @@ export default {
parameters: [
{
name: 'project',
- value: 'ml-pipeline'
+ value: 'ml-pipeline',
},
{
name: 'region',
- value: 'us-central1'
+ value: 'us-central1',
},
{
name: 'cluster',
- value: 'xgboost-spark-xgboost-training-gzkm9'
+ value: 'xgboost-spark-xgboost-training-gzkm9',
},
{
name: 'train',
- value: 'gs://sample-xgbbost-cm-output/xgboost-training-gzkm9/transform/train/part-*'
+ value: 'gs://sample-xgbbost-cm-output/xgboost-training-gzkm9/transform/train/part-*',
},
{
name: 'eval',
- value: 'gs://sample-xgbbost-cm-output/xgboost-training-gzkm9/transform/eval/part-*'
+ value: 'gs://sample-xgbbost-cm-output/xgboost-training-gzkm9/transform/eval/part-*',
},
{
name: 'target',
- value: 'news_label'
+ value: 'news_label',
},
{
name: 'analysis',
- value: 'gs://sample-xgbbost-cm-output/xgboost-training-gzkm9/analysis'
+ value: 'gs://sample-xgbbost-cm-output/xgboost-training-gzkm9/analysis',
},
{
name: 'package',
- // tslint:disable-next-line:max-line-length
- value: 'gs://ml-pipeline-playground/xgboost4j-example-0.8-SNAPSHOT-jar-with-dependencies.jar'
+ // tslint:disable-next-line:max-line-length
+ value:
+ 'gs://ml-pipeline-playground/xgboost4j-example-0.8-SNAPSHOT-jar-with-dependencies.jar',
},
{
name: 'workers',
- value: '2'
+ value: '2',
},
{
name: 'rounds',
- value: '200'
+ value: '200',
},
{
name: 'conf',
- value: 'gs://ml-pipeline-playground/trainconfcla.json '
- }
- ]
+ value: 'gs://ml-pipeline-playground/trainconfcla.json ',
+ },
+ ],
},
outputs: {
- artifacts: [{
- name: 'mlpipeline-ui-metadata',
- s3: {
- bucket: 'somebucket',
- key: 'model',
+ artifacts: [
+ {
+ name: 'mlpipeline-ui-metadata',
+ s3: {
+ bucket: 'somebucket',
+ key: 'model',
+ },
},
- }],
+ ],
},
- children: [
- 'xgboost-training-gzkm9-4204798981'
- ]
+ children: ['xgboost-training-gzkm9-4204798981'],
},
'xgboost-training-gzkm9-2411879589': {
id: 'xgboost-training-gzkm9-2411879589',
@@ -998,25 +976,23 @@ export default {
parameters: [
{
name: 'project',
- value: 'ml-pipeline'
+ value: 'ml-pipeline',
},
{
name: 'region',
- value: 'us-central1'
+ value: 'us-central1',
},
{
name: 'name',
- value: 'xgboost-spark-xgboost-training-gzkm9'
+ value: 'xgboost-spark-xgboost-training-gzkm9',
},
{
name: 'staging',
- value: 'gs://sample-xgbbost-cm-output/xgboost-training-gzkm9/staging'
- }
- ]
+ value: 'gs://sample-xgbbost-cm-output/xgboost-training-gzkm9/staging',
+ },
+ ],
},
- children: [
- 'xgboost-training-gzkm9-916047540'
- ]
+ children: ['xgboost-training-gzkm9-916047540'],
},
'xgboost-training-gzkm9-2457131397': {
id: 'xgboost-training-gzkm9-2457131397',
@@ -1032,46 +1008,46 @@ export default {
parameters: [
{
name: 'project',
- value: 'ml-pipeline'
+ value: 'ml-pipeline',
},
{
name: 'region',
- value: 'us-central1'
+ value: 'us-central1',
},
{
name: 'cluster',
- value: 'xgboost-spark-xgboost-training-gzkm9'
+ value: 'xgboost-spark-xgboost-training-gzkm9',
},
{
name: 'train',
- value: 'gs://ml-pipeline-playground/newsgroup/train.csv'
+ value: 'gs://ml-pipeline-playground/newsgroup/train.csv',
},
{
name: 'eval',
- value: 'gs://ml-pipeline-playground/newsgroup/eval.csv'
+ value: 'gs://ml-pipeline-playground/newsgroup/eval.csv',
},
{
name: 'target',
- value: 'news_label'
+ value: 'news_label',
},
{
name: 'analysis',
- value: 'gs://sample-xgbbost-cm-output/xgboost-training-gzkm9/analysis'
- }
- ]
+ value: 'gs://sample-xgbbost-cm-output/xgboost-training-gzkm9/analysis',
+ },
+ ],
},
outputs: {
- artifacts: [{
- name: 'mlpipeline-ui-metadata',
- s3: {
- bucket: 'somebucket',
- key: 'transform',
+ artifacts: [
+ {
+ name: 'mlpipeline-ui-metadata',
+ s3: {
+ bucket: 'somebucket',
+ key: 'transform',
+ },
},
- }],
+ ],
},
- children: [
- 'xgboost-training-gzkm9-982760658'
- ]
+ children: ['xgboost-training-gzkm9-982760658'],
},
'xgboost-training-gzkm9-3439262870': {
id: 'xgboost-training-gzkm9-3439262870',
@@ -1082,9 +1058,7 @@ export default {
boundaryID: 'xgboost-training-gzkm9-1253553084',
startedAt: '2018-04-17T00:10:06Z',
finishedAt: '2018-04-17T00:12:01Z',
- children: [
- 'xgboost-training-gzkm9-3721733163'
- ]
+ children: ['xgboost-training-gzkm9-3721733163'],
},
'xgboost-training-gzkm9-3636935406': {
id: 'xgboost-training-gzkm9-3636935406',
@@ -1100,38 +1074,38 @@ export default {
parameters: [
{
name: 'project',
- value: 'ml-pipeline'
+ value: 'ml-pipeline',
},
{
name: 'region',
- value: 'us-central1'
+ value: 'us-central1',
},
{
name: 'cluster',
- value: 'xgboost-spark-xgboost-training-gzkm9'
+ value: 'xgboost-spark-xgboost-training-gzkm9',
},
{
name: 'train',
- value: 'gs://ml-pipeline-playground/newsgroup/train.csv'
+ value: 'gs://ml-pipeline-playground/newsgroup/train.csv',
},
{
name: 'schema',
- value: 'gs://ml-pipeline-playground/newsgroup/schema.json'
- }
- ]
+ value: 'gs://ml-pipeline-playground/newsgroup/schema.json',
+ },
+ ],
},
outputs: {
- artifacts: [{
- name: 'mlpipeline-ui-metadata',
- s3: {
- bucket: 'somebucket',
- key: 'analysis',
+ artifacts: [
+ {
+ name: 'mlpipeline-ui-metadata',
+ s3: {
+ bucket: 'somebucket',
+ key: 'analysis',
+ },
},
- }],
+ ],
},
- children: [
- 'xgboost-training-gzkm9-915503087'
- ]
+ children: ['xgboost-training-gzkm9-915503087'],
},
'xgboost-training-gzkm9-3721733163': {
id: 'xgboost-training-gzkm9-3721733163',
@@ -1147,18 +1121,18 @@ export default {
parameters: [
{
name: 'project',
- value: 'ml-pipeline'
+ value: 'ml-pipeline',
},
{
name: 'region',
- value: 'us-central1'
+ value: 'us-central1',
},
{
name: 'name',
- value: 'xgboost-spark-xgboost-training-gzkm9'
- }
- ]
- }
+ value: 'xgboost-spark-xgboost-training-gzkm9',
+ },
+ ],
+ },
},
'xgboost-training-gzkm9-4204210601': {
id: 'xgboost-training-gzkm9-4204210601',
@@ -1169,9 +1143,7 @@ export default {
boundaryID: 'xgboost-training-gzkm9',
startedAt: '2018-04-16T23:37:48Z',
finishedAt: '2018-04-16T23:39:56Z',
- children: [
- 'xgboost-training-gzkm9-2411879589'
- ]
+ children: ['xgboost-training-gzkm9-2411879589'],
},
'xgboost-training-gzkm9-4204798981': {
id: 'xgboost-training-gzkm9-4204798981',
@@ -1182,9 +1154,7 @@ export default {
boundaryID: 'xgboost-training-gzkm9',
startedAt: '2018-04-17T00:08:06Z',
finishedAt: '2018-04-17T00:08:59Z',
- children: [
- 'xgboost-training-gzkm9-1761585008'
- ]
+ children: ['xgboost-training-gzkm9-1761585008'],
},
'xgboost-training-gzkm9-915503087': {
id: 'xgboost-training-gzkm9-915503087',
@@ -1195,9 +1165,7 @@ export default {
boundaryID: 'xgboost-training-gzkm9',
startedAt: '2018-04-16T23:41:24Z',
finishedAt: '2018-04-16T23:49:28Z',
- children: [
- 'xgboost-training-gzkm9-2457131397'
- ]
+ children: ['xgboost-training-gzkm9-2457131397'],
},
'xgboost-training-gzkm9-916047540': {
id: 'xgboost-training-gzkm9-916047540',
@@ -1208,9 +1176,7 @@ export default {
boundaryID: 'xgboost-training-gzkm9',
startedAt: '2018-04-16T23:39:56Z',
finishedAt: '2018-04-16T23:41:24Z',
- children: [
- 'xgboost-training-gzkm9-3636935406'
- ]
+ children: ['xgboost-training-gzkm9-3636935406'],
},
'xgboost-training-gzkm9-916635920': {
id: 'xgboost-training-gzkm9-916635920',
@@ -1221,9 +1187,7 @@ export default {
boundaryID: 'xgboost-training-gzkm9',
startedAt: '2018-04-17T00:08:59Z',
finishedAt: '2018-04-17T00:10:06Z',
- children: [
- 'xgboost-training-gzkm9-2203328319'
- ]
+ children: ['xgboost-training-gzkm9-2203328319'],
},
'xgboost-training-gzkm9-982760658': {
id: 'xgboost-training-gzkm9-982760658',
@@ -1234,10 +1198,8 @@ export default {
boundaryID: 'xgboost-training-gzkm9',
startedAt: '2018-04-16T23:49:28Z',
finishedAt: '2018-04-17T00:08:06Z',
- children: [
- 'xgboost-training-gzkm9-2365787662'
- ]
- }
- }
- }
+ children: ['xgboost-training-gzkm9-2365787662'],
+ },
+ },
+ },
};
diff --git a/frontend/mock-backend/mock-xgboost-small-runtime.ts b/frontend/mock-backend/mock-xgboost-small-runtime.ts
index d30db340133..650a70f0776 100644
--- a/frontend/mock-backend/mock-xgboost-small-runtime.ts
+++ b/frontend/mock-backend/mock-xgboost-small-runtime.ts
@@ -17,7 +17,8 @@ export default {
metadata: {
name: 'job-xgboosttrainingm7t2r-1-2537408167',
namespace: 'default',
- selfLink: '/apis/argoproj.io/v1alpha1/namespaces/default/workflows/job-xgboosttrainingm7t2r-1-2537408167',
+ selfLink:
+ '/apis/argoproj.io/v1alpha1/namespaces/default/workflows/job-xgboosttrainingm7t2r-1-2537408167',
uid: '3333210c-cdef-11e8-8c17-42010a8a0078',
resourceVersion: '24210',
creationTimestamp: '2018-10-12T07:19:47Z',
@@ -26,7 +27,7 @@ export default {
'scheduledworkflows.kubeflow.org/scheduledWorkflowName': 'job-xgboosttrainingm7t2r',
'scheduledworkflows.kubeflow.org/workflowEpoch': '1539328777',
'scheduledworkflows.kubeflow.org/workflowIndex': '1',
- 'workflows.argoproj.io/phase': 'Running'
+ 'workflows.argoproj.io/phase': 'Running',
},
ownerReferences: [
{
@@ -35,9 +36,9 @@ export default {
name: 'job-xgboosttrainingm7t2r',
uid: '2d3b0ed1-cdef-11e8-8c17-42010a8a0078',
controller: true,
- blockOwnerDeletion: true
- }
- ]
+ blockOwnerDeletion: true,
+ },
+ ],
},
spec: {
templates: [
@@ -46,25 +47,25 @@ export default {
inputs: {
parameters: [
{
- name: 'create-cluster-output'
+ name: 'create-cluster-output',
},
{
- name: 'output'
+ name: 'output',
},
{
- name: 'project'
- }
- ]
+ name: 'project',
+ },
+ ],
},
outputs: {
parameters: [
{
name: 'analyze-output',
valueFrom: {
- path: '/output.txt'
- }
- }
- ]
+ path: '/output.txt',
+ },
+ },
+ ],
},
metadata: {},
container: {
@@ -82,22 +83,22 @@ export default {
'--train',
'gs://ml-pipeline-playground/sfpd/train.csv',
'--output',
- '{{inputs.parameters.output}}/{{workflow.name}}/analysis'
+ '{{inputs.parameters.output}}/{{workflow.name}}/analysis',
],
- resources: {}
- }
+ resources: {},
+ },
},
{
name: 'confusion-matrix',
inputs: {
parameters: [
{
- name: 'output'
+ name: 'output',
},
{
- name: 'predict-output'
- }
- ]
+ name: 'predict-output',
+ },
+ ],
},
outputs: {},
metadata: {},
@@ -108,32 +109,32 @@ export default {
'--output',
'{{inputs.parameters.output}}/{{workflow.name}}/confusionmatrix',
'--predictions',
- '{{inputs.parameters.predict-output}}'
+ '{{inputs.parameters.predict-output}}',
],
- resources: {}
- }
+ resources: {},
+ },
},
{
name: 'create-cluster',
inputs: {
parameters: [
{
- name: 'output'
+ name: 'output',
},
{
- name: 'project'
- }
- ]
+ name: 'project',
+ },
+ ],
},
outputs: {
parameters: [
{
name: 'create-cluster-output',
valueFrom: {
- path: '/output.txt'
- }
- }
- ]
+ path: '/output.txt',
+ },
+ },
+ ],
},
metadata: {},
container: {
@@ -147,19 +148,19 @@ export default {
'--name',
'xgb-{{workflow.name}}',
'--staging',
- '{{inputs.parameters.output}}'
+ '{{inputs.parameters.output}}',
],
- resources: {}
- }
+ resources: {},
+ },
},
{
name: 'delete-cluster',
inputs: {
parameters: [
{
- name: 'project'
- }
- ]
+ name: 'project',
+ },
+ ],
},
outputs: {},
metadata: {},
@@ -172,22 +173,22 @@ export default {
'--region',
'us-central1',
'--name',
- 'xgb-{{workflow.name}}'
+ 'xgb-{{workflow.name}}',
],
- resources: {}
- }
+ resources: {},
+ },
},
{
name: 'exit-handler-1',
inputs: {
parameters: [
{
- name: 'output'
+ name: 'output',
},
{
- name: 'project'
- }
- ]
+ name: 'project',
+ },
+ ],
},
outputs: {},
metadata: {},
@@ -200,21 +201,19 @@ export default {
parameters: [
{
name: 'create-cluster-output',
- value: '{{tasks.create-cluster.outputs.parameters.create-cluster-output}}'
+ value: '{{tasks.create-cluster.outputs.parameters.create-cluster-output}}',
},
{
name: 'output',
- value: '{{inputs.parameters.output}}'
+ value: '{{inputs.parameters.output}}',
},
{
name: 'project',
- value: '{{inputs.parameters.project}}'
- }
- ]
+ value: '{{inputs.parameters.project}}',
+ },
+ ],
},
- dependencies: [
- 'create-cluster'
- ]
+ dependencies: ['create-cluster'],
},
{
name: 'confusion-matrix',
@@ -223,17 +222,15 @@ export default {
parameters: [
{
name: 'output',
- value: '{{inputs.parameters.output}}'
+ value: '{{inputs.parameters.output}}',
},
{
name: 'predict-output',
- value: '{{tasks.predict.outputs.parameters.predict-output}}'
- }
- ]
+ value: '{{tasks.predict.outputs.parameters.predict-output}}',
+ },
+ ],
},
- dependencies: [
- 'predict'
- ]
+ dependencies: ['predict'],
},
{
name: 'create-cluster',
@@ -242,14 +239,14 @@ export default {
parameters: [
{
name: 'output',
- value: '{{inputs.parameters.output}}'
+ value: '{{inputs.parameters.output}}',
},
{
name: 'project',
- value: '{{inputs.parameters.project}}'
- }
- ]
- }
+ value: '{{inputs.parameters.project}}',
+ },
+ ],
+ },
},
{
name: 'predict',
@@ -258,36 +255,31 @@ export default {
parameters: [
{
name: 'analyze-output',
- value: '{{tasks.analyze.outputs.parameters.analyze-output}}'
+ value: '{{tasks.analyze.outputs.parameters.analyze-output}}',
},
{
name: 'create-cluster-output',
- value: '{{tasks.create-cluster.outputs.parameters.create-cluster-output}}'
+ value: '{{tasks.create-cluster.outputs.parameters.create-cluster-output}}',
},
{
name: 'output',
- value: '{{inputs.parameters.output}}'
+ value: '{{inputs.parameters.output}}',
},
{
name: 'project',
- value: '{{inputs.parameters.project}}'
+ value: '{{inputs.parameters.project}}',
},
{
name: 'train-output',
- value: '{{tasks.train.outputs.parameters.train-output}}'
+ value: '{{tasks.train.outputs.parameters.train-output}}',
},
{
name: 'transform-eval',
- value: '{{tasks.transform.outputs.parameters.transform-eval}}'
- }
- ]
+ value: '{{tasks.transform.outputs.parameters.transform-eval}}',
+ },
+ ],
},
- dependencies: [
- 'analyze',
- 'create-cluster',
- 'train',
- 'transform'
- ]
+ dependencies: ['analyze', 'create-cluster', 'train', 'transform'],
},
{
name: 'roc',
@@ -296,17 +288,15 @@ export default {
parameters: [
{
name: 'output',
- value: '{{inputs.parameters.output}}'
+ value: '{{inputs.parameters.output}}',
},
{
name: 'predict-output',
- value: '{{tasks.predict.outputs.parameters.predict-output}}'
- }
- ]
+ value: '{{tasks.predict.outputs.parameters.predict-output}}',
+ },
+ ],
},
- dependencies: [
- 'predict'
- ]
+ dependencies: ['predict'],
},
{
name: 'train',
@@ -315,35 +305,31 @@ export default {
parameters: [
{
name: 'analyze-output',
- value: '{{tasks.analyze.outputs.parameters.analyze-output}}'
+ value: '{{tasks.analyze.outputs.parameters.analyze-output}}',
},
{
name: 'create-cluster-output',
- value: '{{tasks.create-cluster.outputs.parameters.create-cluster-output}}'
+ value: '{{tasks.create-cluster.outputs.parameters.create-cluster-output}}',
},
{
name: 'output',
- value: '{{inputs.parameters.output}}'
+ value: '{{inputs.parameters.output}}',
},
{
name: 'project',
- value: '{{inputs.parameters.project}}'
+ value: '{{inputs.parameters.project}}',
},
{
name: 'transform-eval',
- value: '{{tasks.transform.outputs.parameters.transform-eval}}'
+ value: '{{tasks.transform.outputs.parameters.transform-eval}}',
},
{
name: 'transform-train',
- value: '{{tasks.transform.outputs.parameters.transform-train}}'
- }
- ]
+ value: '{{tasks.transform.outputs.parameters.transform-train}}',
+ },
+ ],
},
- dependencies: [
- 'analyze',
- 'create-cluster',
- 'transform'
- ]
+ dependencies: ['analyze', 'create-cluster', 'transform'],
},
{
name: 'transform',
@@ -352,63 +338,60 @@ export default {
parameters: [
{
name: 'analyze-output',
- value: '{{tasks.analyze.outputs.parameters.analyze-output}}'
+ value: '{{tasks.analyze.outputs.parameters.analyze-output}}',
},
{
name: 'create-cluster-output',
- value: '{{tasks.create-cluster.outputs.parameters.create-cluster-output}}'
+ value: '{{tasks.create-cluster.outputs.parameters.create-cluster-output}}',
},
{
name: 'output',
- value: '{{inputs.parameters.output}}'
+ value: '{{inputs.parameters.output}}',
},
{
name: 'project',
- value: '{{inputs.parameters.project}}'
- }
- ]
+ value: '{{inputs.parameters.project}}',
+ },
+ ],
},
- dependencies: [
- 'analyze',
- 'create-cluster'
- ]
- }
- ]
- }
+ dependencies: ['analyze', 'create-cluster'],
+ },
+ ],
+ },
},
{
name: 'predict',
inputs: {
parameters: [
{
- name: 'analyze-output'
+ name: 'analyze-output',
},
{
- name: 'create-cluster-output'
+ name: 'create-cluster-output',
},
{
- name: 'output'
+ name: 'output',
},
{
- name: 'project'
+ name: 'project',
},
{
- name: 'train-output'
+ name: 'train-output',
},
{
- name: 'transform-eval'
- }
- ]
+ name: 'transform-eval',
+ },
+ ],
},
outputs: {
parameters: [
{
name: 'predict-output',
valueFrom: {
- path: '/output.txt'
- }
- }
- ]
+ path: '/output.txt',
+ },
+ },
+ ],
},
metadata: {},
container: {
@@ -432,22 +415,22 @@ export default {
'--model',
'{{inputs.parameters.train-output}}',
'--output',
- '{{inputs.parameters.output}}/{{workflow.name}}/predict'
+ '{{inputs.parameters.output}}/{{workflow.name}}/predict',
],
- resources: {}
- }
+ resources: {},
+ },
},
{
name: 'roc',
inputs: {
parameters: [
{
- name: 'output'
+ name: 'output',
},
{
- name: 'predict-output'
- }
- ]
+ name: 'predict-output',
+ },
+ ],
},
outputs: {},
metadata: {},
@@ -460,44 +443,44 @@ export default {
'--predictions',
'{{inputs.parameters.predict-output}}',
'--trueclass',
- 'ACTION'
+ 'ACTION',
],
- resources: {}
- }
+ resources: {},
+ },
},
{
name: 'train',
inputs: {
parameters: [
{
- name: 'analyze-output'
+ name: 'analyze-output',
},
{
- name: 'create-cluster-output'
+ name: 'create-cluster-output',
},
{
- name: 'output'
+ name: 'output',
},
{
- name: 'project'
+ name: 'project',
},
{
- name: 'transform-eval'
+ name: 'transform-eval',
},
{
- name: 'transform-train'
- }
- ]
+ name: 'transform-train',
+ },
+ ],
},
outputs: {
parameters: [
{
name: 'train-output',
valueFrom: {
- path: '/output.txt'
- }
- }
- ]
+ path: '/output.txt',
+ },
+ },
+ ],
},
metadata: {},
container: {
@@ -527,44 +510,44 @@ export default {
'--conf',
'gs://ml-pipeline-playground/trainconfcla.json',
'--output',
- '{{inputs.parameters.output}}/{{workflow.name}}/model'
+ '{{inputs.parameters.output}}/{{workflow.name}}/model',
],
- resources: {}
- }
+ resources: {},
+ },
},
{
name: 'transform',
inputs: {
parameters: [
{
- name: 'analyze-output'
+ name: 'analyze-output',
},
{
- name: 'create-cluster-output'
+ name: 'create-cluster-output',
},
{
- name: 'output'
+ name: 'output',
},
{
- name: 'project'
- }
- ]
+ name: 'project',
+ },
+ ],
},
outputs: {
parameters: [
{
name: 'transform-eval',
valueFrom: {
- path: '/output_eval.txt'
- }
+ path: '/output_eval.txt',
+ },
},
{
name: 'transform-train',
valueFrom: {
- path: '/output_train.txt'
- }
- }
- ]
+ path: '/output_train.txt',
+ },
+ },
+ ],
},
metadata: {},
container: {
@@ -586,22 +569,22 @@ export default {
'--target',
'resolution',
'--output',
- '{{inputs.parameters.output}}/{{workflow.name}}/transform'
+ '{{inputs.parameters.output}}/{{workflow.name}}/transform',
],
- resources: {}
- }
+ resources: {},
+ },
},
{
name: 'xgboosttrainer',
inputs: {
parameters: [
{
- name: 'output'
+ name: 'output',
},
{
- name: 'project'
- }
- ]
+ name: 'project',
+ },
+ ],
},
outputs: {},
metadata: {},
@@ -614,65 +597,65 @@ export default {
parameters: [
{
name: 'output',
- value: '{{inputs.parameters.output}}'
+ value: '{{inputs.parameters.output}}',
},
{
name: 'project',
- value: '{{inputs.parameters.project}}'
- }
- ]
- }
- }
- ]
- }
- }
+ value: '{{inputs.parameters.project}}',
+ },
+ ],
+ },
+ },
+ ],
+ },
+ },
],
entrypoint: 'xgboosttrainer',
arguments: {
parameters: [
{
name: 'output',
- value: 'gs://yelsayed-2/xgboost'
+ value: 'gs://yelsayed-2/xgboost',
},
{
name: 'project',
- value: 'yelsayed-2'
+ value: 'yelsayed-2',
},
{
name: 'region',
- value: 'us-central1'
+ value: 'us-central1',
},
{
name: 'train-data',
- value: 'gs://ml-pipeline-playground/sfpd/train.csv'
+ value: 'gs://ml-pipeline-playground/sfpd/train.csv',
},
{
name: 'eval-data',
- value: 'gs://ml-pipeline-playground/sfpd/eval.csv'
+ value: 'gs://ml-pipeline-playground/sfpd/eval.csv',
},
{
name: 'schema',
- value: 'gs://ml-pipeline-playground/sfpd/schema.json'
+ value: 'gs://ml-pipeline-playground/sfpd/schema.json',
},
{
name: 'target',
- value: 'resolution'
+ value: 'resolution',
},
{
name: 'rounds',
- value: '200'
+ value: '200',
},
{
name: 'workers',
- value: '2'
+ value: '2',
},
{
name: 'true-label',
- value: 'ACTION'
- }
- ]
+ value: 'ACTION',
+ },
+ ],
},
- onExit: 'delete-cluster'
+ onExit: 'delete-cluster',
},
status: {
phase: 'Running',
@@ -692,17 +675,15 @@ export default {
parameters: [
{
name: 'output',
- value: 'gs://yelsayed-2/xgboost'
+ value: 'gs://yelsayed-2/xgboost',
},
{
name: 'project',
- value: 'yelsayed-2'
- }
- ]
+ value: 'yelsayed-2',
+ },
+ ],
},
- children: [
- 'job-xgboosttrainingm7t2r-1-2537408167-3348277322'
- ]
+ children: ['job-xgboosttrainingm7t2r-1-2537408167-3348277322'],
},
'job-xgboosttrainingm7t2r-1-2537408167-294182655': {
id: 'job-xgboosttrainingm7t2r-1-2537408167-294182655',
@@ -712,21 +693,22 @@ export default {
templateName: 'create-cluster',
phase: 'Pending',
boundaryID: 'job-xgboosttrainingm7t2r-1-2537408167-3348277322',
- message: 'ImagePullBackOff: Back-off pulling image "gcr.io/ml-pipeline/ml-pipeline-dataproc-create-cluster"',
+ message:
+ 'ImagePullBackOff: Back-off pulling image "gcr.io/ml-pipeline/ml-pipeline-dataproc-create-cluster"',
startedAt: '2018-10-12T07:19:47Z',
finishedAt: null,
inputs: {
parameters: [
{
name: 'output',
- value: 'gs://yelsayed-2/xgboost'
+ value: 'gs://yelsayed-2/xgboost',
},
{
name: 'project',
- value: 'yelsayed-2'
- }
- ]
- }
+ value: 'yelsayed-2',
+ },
+ ],
+ },
},
'job-xgboosttrainingm7t2r-1-2537408167-3348277322': {
id: 'job-xgboosttrainingm7t2r-1-2537408167-3348277322',
@@ -742,18 +724,16 @@ export default {
parameters: [
{
name: 'output',
- value: 'gs://yelsayed-2/xgboost'
+ value: 'gs://yelsayed-2/xgboost',
},
{
name: 'project',
- value: 'yelsayed-2'
- }
- ]
- },
- children: [
- 'job-xgboosttrainingm7t2r-1-2537408167-294182655'
- ]
- }
- }
- }
+ value: 'yelsayed-2',
+ },
+ ],
+ },
+ children: ['job-xgboosttrainingm7t2r-1-2537408167-294182655'],
+ },
+ },
+ },
};
diff --git a/frontend/package-lock.json b/frontend/package-lock.json
index c3385760c2b..e83dab1b174 100644
--- a/frontend/package-lock.json
+++ b/frontend/package-lock.json
@@ -63,6 +63,25 @@
"regenerator-runtime": "^0.12.0"
}
},
+ "@babel/types": {
+ "version": "7.7.4",
+ "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.7.4.tgz",
+ "integrity": "sha512-cz5Ji23KCi4T+YIE/BolWosrJuSmoZeN1EFnRtBwF+KKLi8GG/Z2c2hOJJeCXPk4mwk4QFvTmwIodJowXgttRA==",
+ "dev": true,
+ "requires": {
+ "esutils": "^2.0.2",
+ "lodash": "^4.17.13",
+ "to-fast-properties": "^2.0.0"
+ },
+ "dependencies": {
+ "to-fast-properties": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-2.0.0.tgz",
+ "integrity": "sha1-3F5pjL0HkmW8c+A3doGk5Og/YW4=",
+ "dev": true
+ }
+ }
+ },
"@google-cloud/common": {
"version": "2.2.3",
"resolved": "https://registry.npmjs.org/@google-cloud/common/-/common-2.2.3.tgz",
@@ -283,6 +302,83 @@
}
}
},
+ "@jest/console": {
+ "version": "24.9.0",
+ "resolved": "https://registry.npmjs.org/@jest/console/-/console-24.9.0.tgz",
+ "integrity": "sha512-Zuj6b8TnKXi3q4ymac8EQfc3ea/uhLeCGThFqXeC8H9/raaH8ARPUTdId+XyGd03Z4In0/VjD2OYFcBF09fNLQ==",
+ "dev": true,
+ "requires": {
+ "@jest/source-map": "^24.9.0",
+ "chalk": "^2.0.1",
+ "slash": "^2.0.0"
+ },
+ "dependencies": {
+ "chalk": {
+ "version": "2.4.2",
+ "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz",
+ "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==",
+ "dev": true,
+ "requires": {
+ "ansi-styles": "^3.2.1",
+ "escape-string-regexp": "^1.0.5",
+ "supports-color": "^5.3.0"
+ }
+ },
+ "slash": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/slash/-/slash-2.0.0.tgz",
+ "integrity": "sha512-ZYKh3Wh2z1PpEXWr0MpSBZ0V6mZHAQfYevttO11c51CaWjGTaadiKZ+wVt1PbMlDV5qhMFslpZCemhwOK7C89A==",
+ "dev": true
+ }
+ }
+ },
+ "@jest/source-map": {
+ "version": "24.9.0",
+ "resolved": "https://registry.npmjs.org/@jest/source-map/-/source-map-24.9.0.tgz",
+ "integrity": "sha512-/Xw7xGlsZb4MJzNDgB7PW5crou5JqWiBQaz6xyPd3ArOg2nfn/PunV8+olXbbEZzNl591o5rWKE9BRDaFAuIBg==",
+ "dev": true,
+ "requires": {
+ "callsites": "^3.0.0",
+ "graceful-fs": "^4.1.15",
+ "source-map": "^0.6.0"
+ },
+ "dependencies": {
+ "callsites": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz",
+ "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==",
+ "dev": true
+ },
+ "source-map": {
+ "version": "0.6.1",
+ "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz",
+ "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==",
+ "dev": true
+ }
+ }
+ },
+ "@jest/test-result": {
+ "version": "24.9.0",
+ "resolved": "https://registry.npmjs.org/@jest/test-result/-/test-result-24.9.0.tgz",
+ "integrity": "sha512-XEFrHbBonBJ8dGp2JmF8kP/nQI/ImPpygKHwQ/SY+es59Z3L5PI4Qb9TQQMAEeYsThG1xF0k6tmG0tIKATNiiA==",
+ "dev": true,
+ "requires": {
+ "@jest/console": "^24.9.0",
+ "@jest/types": "^24.9.0",
+ "@types/istanbul-lib-coverage": "^2.0.0"
+ }
+ },
+ "@jest/types": {
+ "version": "24.9.0",
+ "resolved": "https://registry.npmjs.org/@jest/types/-/types-24.9.0.tgz",
+ "integrity": "sha512-XKK7ze1apu5JWQ5eZjHITP66AX+QsLlbaJRBGYr8pNzwcAE2JVkwnf0yqjHTsDRcjR0mujy/NmZMXw5kl+kGBw==",
+ "dev": true,
+ "requires": {
+ "@types/istanbul-lib-coverage": "^2.0.0",
+ "@types/istanbul-reports": "^1.1.1",
+ "@types/yargs": "^13.0.0"
+ }
+ },
"@material-ui/core": {
"version": "3.7.1",
"resolved": "https://registry.npmjs.org/@material-ui/core/-/core-3.7.1.tgz",
@@ -798,6 +894,31 @@
"@types/node": "*"
}
},
+ "@types/istanbul-lib-coverage": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.1.tgz",
+ "integrity": "sha512-hRJD2ahnnpLgsj6KWMYSrmXkM3rm2Dl1qkx6IOFD5FnuNPXJIG5L0dhgKXCYTRMGzU4n0wImQ/xfmRc4POUFlg==",
+ "dev": true
+ },
+ "@types/istanbul-lib-report": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/@types/istanbul-lib-report/-/istanbul-lib-report-1.1.1.tgz",
+ "integrity": "sha512-3BUTyMzbZa2DtDI2BkERNC6jJw2Mr2Y0oGI7mRxYNBPxppbtEK1F66u3bKwU2g+wxwWI7PAoRpJnOY1grJqzHg==",
+ "dev": true,
+ "requires": {
+ "@types/istanbul-lib-coverage": "*"
+ }
+ },
+ "@types/istanbul-reports": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/@types/istanbul-reports/-/istanbul-reports-1.1.1.tgz",
+ "integrity": "sha512-UpYjBi8xefVChsCoBpKShdxTllC9pwISirfoZsUa2AAdQg/Jd2KQGtSbw+ya7GPo7x/wAPlH6JBhKhAsXUEZNA==",
+ "dev": true,
+ "requires": {
+ "@types/istanbul-lib-coverage": "*",
+ "@types/istanbul-lib-report": "*"
+ }
+ },
"@types/jest": {
"version": "23.3.11",
"resolved": "https://registry.npmjs.org/@types/jest/-/jest-23.3.11.tgz",
@@ -855,9 +976,9 @@
"dev": true
},
"@types/node": {
- "version": "10.12.18",
- "resolved": "https://registry.npmjs.org/@types/node/-/node-10.12.18.tgz",
- "integrity": "sha512-fh+pAqt4xRzPfqA6eh3Z2y6fyZavRIumvjhaCL753+TVkGKGhpPeyrJG2JftD0T9q4GF00KjefsQ+PQNDdWQaQ=="
+ "version": "10.17.11",
+ "resolved": "https://registry.npmjs.org/@types/node/-/node-10.17.11.tgz",
+ "integrity": "sha512-dNd2pp8qTzzNLAs3O8nH3iU9DG9866KHq9L3ISPB7DOGERZN81nW/5/g/KzMJpCU8jrbCiMRBzV9/sCEdRosig=="
},
"@types/prop-types": {
"version": "15.5.8",
@@ -952,6 +1073,12 @@
"@types/mime": "*"
}
},
+ "@types/stack-utils": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/@types/stack-utils/-/stack-utils-1.0.1.tgz",
+ "integrity": "sha512-l42BggppR6zLmpfU6fq9HEa2oGPEI8yrSPL3GITjfRInppYFahObbIQOQK3UGxEnyQpltZLaPe75046NOZQikw==",
+ "dev": true
+ },
"@types/strip-bom": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/@types/strip-bom/-/strip-bom-3.0.0.tgz",
@@ -964,6 +1091,21 @@
"integrity": "sha512-7NQmHra/JILCd1QqpSzl8+mJRc8ZHz3uDm8YV1Ks9IhK0epEiTw8aIErbvH9PI+6XbqhyIQy3462nEsn7UVzjQ==",
"dev": true
},
+ "@types/yargs": {
+ "version": "13.0.3",
+ "resolved": "https://registry.npmjs.org/@types/yargs/-/yargs-13.0.3.tgz",
+ "integrity": "sha512-K8/LfZq2duW33XW/tFwEAfnZlqIfVsoyRB3kfXdPXYhl0nfM8mmh7GS0jg7WrX2Dgq/0Ha/pR1PaR+BvmWwjiQ==",
+ "dev": true,
+ "requires": {
+ "@types/yargs-parser": "*"
+ }
+ },
+ "@types/yargs-parser": {
+ "version": "13.1.0",
+ "resolved": "https://registry.npmjs.org/@types/yargs-parser/-/yargs-parser-13.1.0.tgz",
+ "integrity": "sha512-gCubfBUZ6KxzoibJ+SCUc/57Ms1jz5NjHe4+dI2krNmU5zCPAphyLJYyTOg06ueIyfj+SaCUqmzun7ImlxDcKg==",
+ "dev": true
+ },
"abab": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/abab/-/abab-2.0.0.tgz",
@@ -4555,6 +4697,12 @@
"resolved": "https://registry.npmjs.org/diff-match-patch/-/diff-match-patch-1.0.4.tgz",
"integrity": "sha512-Uv3SW8bmH9nAtHKaKSanOQmj2DnlH65fUpcrMdfdaOxUG02QQ4YGZ8AE7kKOMisF7UqvOlGKVYWRvezdncW9lg=="
},
+ "diff-sequences": {
+ "version": "24.9.0",
+ "resolved": "https://registry.npmjs.org/diff-sequences/-/diff-sequences-24.9.0.tgz",
+ "integrity": "sha512-Dj6Wk3tWyTE+Fo1rW8v0Xhwk80um6yFYKbuAxc9c3EZxIHFDYwbi34Uk42u1CdnIiVorvt4RmlSDjIPyzGC2ew==",
+ "dev": true
+ },
"diffie-hellman": {
"version": "5.0.3",
"resolved": "https://registry.npmjs.org/diffie-hellman/-/diffie-hellman-5.0.3.tgz",
@@ -5862,7 +6010,8 @@
},
"ansi-regex": {
"version": "2.1.1",
- "bundled": true
+ "bundled": true,
+ "optional": true
},
"aproba": {
"version": "1.2.0",
@@ -5880,11 +6029,13 @@
},
"balanced-match": {
"version": "1.0.0",
- "bundled": true
+ "bundled": true,
+ "optional": true
},
"brace-expansion": {
"version": "1.1.11",
"bundled": true,
+ "optional": true,
"requires": {
"balanced-match": "^1.0.0",
"concat-map": "0.0.1"
@@ -5897,15 +6048,18 @@
},
"code-point-at": {
"version": "1.1.0",
- "bundled": true
+ "bundled": true,
+ "optional": true
},
"concat-map": {
"version": "0.0.1",
- "bundled": true
+ "bundled": true,
+ "optional": true
},
"console-control-strings": {
"version": "1.1.0",
- "bundled": true
+ "bundled": true,
+ "optional": true
},
"core-util-is": {
"version": "1.0.2",
@@ -6008,7 +6162,8 @@
},
"inherits": {
"version": "2.0.3",
- "bundled": true
+ "bundled": true,
+ "optional": true
},
"ini": {
"version": "1.3.5",
@@ -6018,6 +6173,7 @@
"is-fullwidth-code-point": {
"version": "1.0.0",
"bundled": true,
+ "optional": true,
"requires": {
"number-is-nan": "^1.0.0"
}
@@ -6030,17 +6186,20 @@
"minimatch": {
"version": "3.0.4",
"bundled": true,
+ "optional": true,
"requires": {
"brace-expansion": "^1.1.7"
}
},
"minimist": {
"version": "0.0.8",
- "bundled": true
+ "bundled": true,
+ "optional": true
},
"minipass": {
"version": "2.3.5",
"bundled": true,
+ "optional": true,
"requires": {
"safe-buffer": "^5.1.2",
"yallist": "^3.0.0"
@@ -6057,6 +6216,7 @@
"mkdirp": {
"version": "0.5.1",
"bundled": true,
+ "optional": true,
"requires": {
"minimist": "0.0.8"
}
@@ -6129,7 +6289,8 @@
},
"number-is-nan": {
"version": "1.0.1",
- "bundled": true
+ "bundled": true,
+ "optional": true
},
"object-assign": {
"version": "4.1.1",
@@ -6139,6 +6300,7 @@
"once": {
"version": "1.4.0",
"bundled": true,
+ "optional": true,
"requires": {
"wrappy": "1"
}
@@ -6214,7 +6376,8 @@
},
"safe-buffer": {
"version": "5.1.2",
- "bundled": true
+ "bundled": true,
+ "optional": true
},
"safer-buffer": {
"version": "2.1.2",
@@ -6244,6 +6407,7 @@
"string-width": {
"version": "1.0.2",
"bundled": true,
+ "optional": true,
"requires": {
"code-point-at": "^1.0.0",
"is-fullwidth-code-point": "^1.0.0",
@@ -6261,6 +6425,7 @@
"strip-ansi": {
"version": "3.0.1",
"bundled": true,
+ "optional": true,
"requires": {
"ansi-regex": "^2.0.0"
}
@@ -6299,11 +6464,13 @@
},
"wrappy": {
"version": "1.0.2",
- "bundled": true
+ "bundled": true,
+ "optional": true
},
"yallist": {
"version": "3.0.3",
- "bundled": true
+ "bundled": true,
+ "optional": true
}
}
},
@@ -7300,9 +7467,9 @@
"integrity": "sha1-/Xqtcmvxpf0W38KbL3pmAdJxOcQ="
},
"handlebars": {
- "version": "4.4.3",
- "resolved": "https://registry.npmjs.org/handlebars/-/handlebars-4.4.3.tgz",
- "integrity": "sha512-B0W4A2U1ww3q7VVthTKfh+epHx+q4mCt6iK+zEAzbMBpWQAwxCeKxEGpj/1oQTpzPXDNSOG7hmG14TsISH50yw==",
+ "version": "4.5.3",
+ "resolved": "https://registry.npmjs.org/handlebars/-/handlebars-4.5.3.tgz",
+ "integrity": "sha512-3yPecJoJHK/4c6aZhSvxOyG4vJKDshV36VHp0iVCDVh7o9w2vwi3NSnL2MMPj3YdduqaBcu7cGbggJQM0br9xA==",
"requires": {
"neo-async": "^2.6.0",
"optimist": "^0.6.1",
@@ -7747,13 +7914,24 @@
"integrity": "sha1-7AbBDgo0wPL68Zn3/X/Hj//QPHM="
},
"https-proxy-agent": {
- "version": "2.2.1",
- "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-2.2.1.tgz",
- "integrity": "sha512-HPCTS1LW51bcyMYbxUIOO4HEOlQ1/1qRaFWcyxvwaqUS9TY88aoEuHUY33kuAh1YhVVaDQhLZsnPd+XNARWZlQ==",
+ "version": "2.2.4",
+ "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-2.2.4.tgz",
+ "integrity": "sha512-OmvfoQ53WLjtA9HeYP9RNrWMJzzAz1JGaSFr1nijg0PVR1JaD/xbJq1mdEIIlxGpXp9eSe/O2LgU9DJmTPd0Eg==",
"dev": true,
"requires": {
- "agent-base": "^4.1.0",
+ "agent-base": "^4.3.0",
"debug": "^3.1.0"
+ },
+ "dependencies": {
+ "agent-base": {
+ "version": "4.3.0",
+ "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-4.3.0.tgz",
+ "integrity": "sha512-salcGninV0nPrwpGNn4VTXBb1SOuXQBiqbrNXoeizJsHrsL6ERFM2Ne3JUSBWRE6aeNJI2ROP/WEEIDUiDe3cg==",
+ "dev": true,
+ "requires": {
+ "es6-promisify": "^5.0.0"
+ }
+ }
}
},
"hyphenate-style-name": {
@@ -8941,6 +9119,12 @@
"resolved": "https://registry.npmjs.org/jest-mock/-/jest-mock-22.4.3.tgz",
"integrity": "sha512-+4R6mH5M1G4NK16CKg9N1DtCaFmuxhcIqF4lQK/Q1CIotqMs/XBemfpDPeVZBFow6iyUNu6EBT9ugdNOTT5o5Q=="
},
+ "jest-pnp-resolver": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/jest-pnp-resolver/-/jest-pnp-resolver-1.2.1.tgz",
+ "integrity": "sha512-pgFw2tm54fzgYvc/OHrnysABEObZCUNFnhjoRjaVOCN8NYc032/gVjPaHD4Aq6ApkSieWtfKAFQtmDKAmhupnQ==",
+ "dev": true
+ },
"jest-regex-util": {
"version": "22.4.3",
"resolved": "https://registry.npmjs.org/jest-regex-util/-/jest-regex-util-22.4.3.tgz",
@@ -13841,6 +14025,185 @@
}
}
},
+ "snapshot-diff": {
+ "version": "0.6.1",
+ "resolved": "https://registry.npmjs.org/snapshot-diff/-/snapshot-diff-0.6.1.tgz",
+ "integrity": "sha512-wWt3x4fb7FJIcV05Ng9NceVSTvQYE493sIqebzUoQbQlRG6rIR03KaRt8o/7W7znaYjUbP0eOq1iK+DfpZXaeQ==",
+ "dev": true,
+ "requires": {
+ "jest-diff": "^24.0.0",
+ "jest-snapshot": "^24.0.0",
+ "pretty-format": "^24.0.0",
+ "strip-ansi": "^5.0.0"
+ },
+ "dependencies": {
+ "ansi-regex": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.0.tgz",
+ "integrity": "sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg==",
+ "dev": true
+ },
+ "chalk": {
+ "version": "2.4.2",
+ "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz",
+ "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==",
+ "dev": true,
+ "requires": {
+ "ansi-styles": "^3.2.1",
+ "escape-string-regexp": "^1.0.5",
+ "supports-color": "^5.3.0"
+ }
+ },
+ "expect": {
+ "version": "24.9.0",
+ "resolved": "https://registry.npmjs.org/expect/-/expect-24.9.0.tgz",
+ "integrity": "sha512-wvVAx8XIol3Z5m9zvZXiyZOQ+sRJqNTIm6sGjdWlaZIeupQGO3WbYI+15D/AmEwZywL6wtJkbAbJtzkOfBuR0Q==",
+ "dev": true,
+ "requires": {
+ "@jest/types": "^24.9.0",
+ "ansi-styles": "^3.2.0",
+ "jest-get-type": "^24.9.0",
+ "jest-matcher-utils": "^24.9.0",
+ "jest-message-util": "^24.9.0",
+ "jest-regex-util": "^24.9.0"
+ }
+ },
+ "jest-diff": {
+ "version": "24.9.0",
+ "resolved": "https://registry.npmjs.org/jest-diff/-/jest-diff-24.9.0.tgz",
+ "integrity": "sha512-qMfrTs8AdJE2iqrTp0hzh7kTd2PQWrsFyj9tORoKmu32xjPjeE4NyjVRDz8ybYwqS2ik8N4hsIpiVTyFeo2lBQ==",
+ "dev": true,
+ "requires": {
+ "chalk": "^2.0.1",
+ "diff-sequences": "^24.9.0",
+ "jest-get-type": "^24.9.0",
+ "pretty-format": "^24.9.0"
+ }
+ },
+ "jest-get-type": {
+ "version": "24.9.0",
+ "resolved": "https://registry.npmjs.org/jest-get-type/-/jest-get-type-24.9.0.tgz",
+ "integrity": "sha512-lUseMzAley4LhIcpSP9Jf+fTrQ4a1yHQwLNeeVa2cEmbCGeoZAtYPOIv8JaxLD/sUpKxetKGP+gsHl8f8TSj8Q==",
+ "dev": true
+ },
+ "jest-matcher-utils": {
+ "version": "24.9.0",
+ "resolved": "https://registry.npmjs.org/jest-matcher-utils/-/jest-matcher-utils-24.9.0.tgz",
+ "integrity": "sha512-OZz2IXsu6eaiMAwe67c1T+5tUAtQyQx27/EMEkbFAGiw52tB9em+uGbzpcgYVpA8wl0hlxKPZxrly4CXU/GjHA==",
+ "dev": true,
+ "requires": {
+ "chalk": "^2.0.1",
+ "jest-diff": "^24.9.0",
+ "jest-get-type": "^24.9.0",
+ "pretty-format": "^24.9.0"
+ }
+ },
+ "jest-message-util": {
+ "version": "24.9.0",
+ "resolved": "https://registry.npmjs.org/jest-message-util/-/jest-message-util-24.9.0.tgz",
+ "integrity": "sha512-oCj8FiZ3U0hTP4aSui87P4L4jC37BtQwUMqk+zk/b11FR19BJDeZsZAvIHutWnmtw7r85UmR3CEWZ0HWU2mAlw==",
+ "dev": true,
+ "requires": {
+ "@babel/code-frame": "^7.0.0",
+ "@jest/test-result": "^24.9.0",
+ "@jest/types": "^24.9.0",
+ "@types/stack-utils": "^1.0.1",
+ "chalk": "^2.0.1",
+ "micromatch": "^3.1.10",
+ "slash": "^2.0.0",
+ "stack-utils": "^1.0.1"
+ }
+ },
+ "jest-regex-util": {
+ "version": "24.9.0",
+ "resolved": "https://registry.npmjs.org/jest-regex-util/-/jest-regex-util-24.9.0.tgz",
+ "integrity": "sha512-05Cmb6CuxaA+Ys6fjr3PhvV3bGQmO+2p2La4hFbU+W5uOc479f7FdLXUWXw4pYMAhhSZIuKHwSXSu6CsSBAXQA==",
+ "dev": true
+ },
+ "jest-resolve": {
+ "version": "24.9.0",
+ "resolved": "https://registry.npmjs.org/jest-resolve/-/jest-resolve-24.9.0.tgz",
+ "integrity": "sha512-TaLeLVL1l08YFZAt3zaPtjiVvyy4oSA6CRe+0AFPPVX3Q/VI0giIWWoAvoS5L96vj9Dqxj4fB5p2qrHCmTU/MQ==",
+ "dev": true,
+ "requires": {
+ "@jest/types": "^24.9.0",
+ "browser-resolve": "^1.11.3",
+ "chalk": "^2.0.1",
+ "jest-pnp-resolver": "^1.2.1",
+ "realpath-native": "^1.1.0"
+ }
+ },
+ "jest-snapshot": {
+ "version": "24.9.0",
+ "resolved": "https://registry.npmjs.org/jest-snapshot/-/jest-snapshot-24.9.0.tgz",
+ "integrity": "sha512-uI/rszGSs73xCM0l+up7O7a40o90cnrk429LOiK3aeTvfC0HHmldbd81/B7Ix81KSFe1lwkbl7GnBGG4UfuDew==",
+ "dev": true,
+ "requires": {
+ "@babel/types": "^7.0.0",
+ "@jest/types": "^24.9.0",
+ "chalk": "^2.0.1",
+ "expect": "^24.9.0",
+ "jest-diff": "^24.9.0",
+ "jest-get-type": "^24.9.0",
+ "jest-matcher-utils": "^24.9.0",
+ "jest-message-util": "^24.9.0",
+ "jest-resolve": "^24.9.0",
+ "mkdirp": "^0.5.1",
+ "natural-compare": "^1.4.0",
+ "pretty-format": "^24.9.0",
+ "semver": "^6.2.0"
+ }
+ },
+ "pretty-format": {
+ "version": "24.9.0",
+ "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-24.9.0.tgz",
+ "integrity": "sha512-00ZMZUiHaJrNfk33guavqgvfJS30sLYf0f8+Srklv0AMPodGGHcoHgksZ3OThYnIvOd+8yMCn0YiEOogjlgsnA==",
+ "dev": true,
+ "requires": {
+ "@jest/types": "^24.9.0",
+ "ansi-regex": "^4.0.0",
+ "ansi-styles": "^3.2.0",
+ "react-is": "^16.8.4"
+ }
+ },
+ "react-is": {
+ "version": "16.12.0",
+ "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.12.0.tgz",
+ "integrity": "sha512-rPCkf/mWBtKc97aLL9/txD8DZdemK0vkA3JMLShjlJB3Pj3s+lpf1KaBzMfQrAmhMQB0n1cU/SUGgKKBCe837Q==",
+ "dev": true
+ },
+ "realpath-native": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/realpath-native/-/realpath-native-1.1.0.tgz",
+ "integrity": "sha512-wlgPA6cCIIg9gKz0fgAPjnzh4yR/LnXovwuo9hvyGvx3h8nX4+/iLZplfUWasXpqD8BdnGnP5njOFjkUwPzvjA==",
+ "dev": true,
+ "requires": {
+ "util.promisify": "^1.0.0"
+ }
+ },
+ "semver": {
+ "version": "6.3.0",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz",
+ "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==",
+ "dev": true
+ },
+ "slash": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/slash/-/slash-2.0.0.tgz",
+ "integrity": "sha512-ZYKh3Wh2z1PpEXWr0MpSBZ0V6mZHAQfYevttO11c51CaWjGTaadiKZ+wVt1PbMlDV5qhMFslpZCemhwOK7C89A==",
+ "dev": true
+ },
+ "strip-ansi": {
+ "version": "5.2.0",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-5.2.0.tgz",
+ "integrity": "sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==",
+ "dev": true,
+ "requires": {
+ "ansi-regex": "^4.1.0"
+ }
+ }
+ }
+ },
"sockjs": {
"version": "0.3.18",
"resolved": "https://registry.npmjs.org/sockjs/-/sockjs-0.3.18.tgz",
diff --git a/frontend/package.json b/frontend/package.json
index ea43f857c09..1a46d18ef5c 100644
--- a/frontend/package.json
+++ b/frontend/package.json
@@ -44,8 +44,8 @@
"build": "npm run lint && react-scripts-ts build",
"docker": "COMMIT_HASH=`git rev-parse HEAD`; docker build -q -t ml-pipelines-frontend:${COMMIT_HASH} --build-arg COMMIT_HASH=${COMMIT_HASH} --build-arg DATE=\"`date -u`\" -f Dockerfile ..",
"eject": "react-scripts-ts eject",
- "format": "prettier --write 'src/**/*.{ts,tsx}'",
- "format:check": "prettier --check 'src/**/*.{ts,tsx}' || node ./scripts/check-format-error-info.js",
+ "format": "prettier --write './**/*.{ts,tsx}'",
+ "format:check": "prettier --check './**/*.{ts,tsx}' || node ./scripts/check-format-error-info.js",
"java": "java -version",
"lint": "tslint -c ./tslint.prod.json -p .",
"mock:api": "ts-node-dev -O '{\"module\": \"commonjs\"}' mock-backend/mock-api-server.ts 3001",
@@ -55,7 +55,8 @@
"start:proxy-standalone-and-server": "./start-proxy-standalone-and-server.sh",
"start": "react-scripts-ts start",
"test": "react-scripts-ts test --env=jsdom",
- "test:coverage": "npm test -- --env=jsdom --coverage",
+ "test:server:coverage": "cd ./server && npm test -- --coverage && cd ..",
+ "test:coverage": "npm test -- --coverage && npm run test:server:coverage",
"test:ci": "npm run format:check && npm run lint && npm run test:coverage",
"test:ci:prow": "npm set unsafe-perm true && npm ci && npm run test:ci && ./scripts/report-coveralls.sh",
"vr-approve": "backstop approve",
@@ -74,7 +75,7 @@
"@types/js-yaml": "^3.11.2",
"@types/lodash": ">=4.14.117",
"@types/markdown-to-jsx": "^6.9.0",
- "@types/node": "^10.10.1",
+ "@types/node": "^10.17.11",
"@types/react": "^16.7.18",
"@types/react-dom": "^16.0.7",
"@types/react-router-dom": "^4.3.1",
@@ -88,6 +89,7 @@
"prettier": "1.18.2",
"react-router-test-context": "^0.1.0",
"react-test-renderer": "^16.5.2",
+ "snapshot-diff": "^0.6.1",
"swagger-ts-client": "^0.9.6",
"ts-node": "^7.0.1",
"ts-node-dev": "^1.0.0-pre.30",
@@ -99,6 +101,7 @@
"jest": {
"collectCoverageFrom": [
"src/**/*.{ts,tsx}",
+ "!src/**/*.d.ts",
"!src/apis/**/*.ts",
"!src/icons/*.tsx",
"!src/third_party/*",
diff --git a/frontend/public/index.html b/frontend/public/index.html
index 36ebdc45695..0ccbe720ba3 100644
--- a/frontend/public/index.html
+++ b/frontend/public/index.html
@@ -27,6 +27,7 @@
window.KFP_FLAGS={};
window.KFP_FLAGS.DEPLOYMENT=null;
+