Skip to content

Commit

Permalink
add type checking sample to sample tests (#1129)
Browse files Browse the repository at this point in the history
* add type checking sample to sample tests

* Dadd the test script exit code to the sample test result; update the check_notebook_result script to not validate the pipeline runs when experiment arg is not provided

* fix typo
  • Loading branch information
gaoning777 authored and k8s-ci-robot committed Apr 12, 2019
1 parent eb58f50 commit 06e544b
Show file tree
Hide file tree
Showing 4 changed files with 75 additions and 31 deletions.
1 change: 1 addition & 0 deletions samples/notebooks/DSL Static Type Checking.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -780,6 +780,7 @@
}
],
"metadata": {
"celltoolbar": "Tags",
"kernelspec": {
"display_name": "Python 3",
"language": "python",
Expand Down
68 changes: 40 additions & 28 deletions test/sample-test/check_notebook_results.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,12 @@
import utils

###### Input/Output Instruction ######
# input: experiment name, testname, and, namespace
# args:
# experiment: where the test run belong, only necessary when a job is submitted.
# namespace: where the pipeline system is deployed.
# testname: test name in the json xml
# result: name of the file that stores the test result
# exit_code: the exit code of the bash command that runs the test.

# Parsing the input arguments
def parse_arguments():
Expand All @@ -26,20 +31,23 @@ def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--experiment',
type=str,
required=True,
help='The experiment name')
parser.add_argument('--testname',
type=str,
required=True,
help="Test name")
parser.add_argument('--namespace',
type=str,
default='kubeflow',
help="namespace of the deployed pipeline system. Default: kubeflow")
parser.add_argument('--testname',
type=str,
required=True,
help="Test name")
parser.add_argument('--result',
type=str,
required=True,
help='The path of the test result that will be exported.')
parser.add_argument('--exit-code',
type=str,
required=True,
help='The exit code of the bash command that runs the test.')
args = parser.parse_args()
return args

Expand All @@ -48,33 +56,37 @@ def main():
test_cases = []
test_name = args.testname + ' Sample Test'

###### Initialization ######
host = 'ml-pipeline.%s.svc.cluster.local:8888' % args.namespace
client = Client(host=host)
###### Write the script exit code log ######
utils.add_junit_test(test_cases, 'test script execution', (args.exit_code == '0'), 'test script failure with exit code: ' + args.exit_code)

if args.experiment is not None:
###### Initialization ######
host = 'ml-pipeline.%s.svc.cluster.local:8888' % args.namespace
client = Client(host=host)

###### Get experiments ######
experiment_id = client.get_experiment(experiment_name=args.experiment).id
###### Get experiments ######
experiment_id = client.get_experiment(experiment_name=args.experiment).id

###### Get runs ######
list_runs_response = client.list_runs(page_size=1000, experiment_id=experiment_id)
###### Get runs ######
list_runs_response = client.list_runs(page_size=1000, experiment_id=experiment_id)

###### Check all runs ######
for run in list_runs_response.runs:
run_id = run.id
response = client.wait_for_run_completion(run_id, 1200)
succ = (response.run.status.lower()=='succeeded')
utils.add_junit_test(test_cases, 'job completion', succ, 'waiting for job completion failure')
###### Check all runs ######
for run in list_runs_response.runs:
run_id = run.id
response = client.wait_for_run_completion(run_id, 1200)
succ = (response.run.status.lower()=='succeeded')
utils.add_junit_test(test_cases, 'job completion', succ, 'waiting for job completion failure')

###### Output Argo Log for Debugging ######
workflow_json = client._get_workflow_json(run_id)
workflow_id = workflow_json['metadata']['name']
argo_log, _ = utils.run_bash_command('argo logs -n {} -w {}'.format(args.namespace, workflow_id))
print("=========Argo Workflow Log=========")
print(argo_log)
###### Output Argo Log for Debugging ######
workflow_json = client._get_workflow_json(run_id)
workflow_id = workflow_json['metadata']['name']
argo_log, _ = utils.run_bash_command('argo logs -n {} -w {}'.format(args.namespace, workflow_id))
print("=========Argo Workflow Log=========")
print(argo_log)

if not succ:
utils.write_junit_xml(test_name, args.result, test_cases)
exit(1)
if not succ:
utils.write_junit_xml(test_name, args.result, test_cases)
exit(1)

###### Write out the test result in junit xml ######
utils.write_junit_xml(test_name, args.result, test_cases)
Expand Down
22 changes: 20 additions & 2 deletions test/sample-test/run_test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -314,8 +314,9 @@ elif [ "$TEST_NAME" == "notebook-tfx" ]; then
jupyter nbconvert --to python notebook-tfx.ipynb
pip3 install tensorflow==1.8.0
ipython notebook-tfx.py
EXIT_CODE=$?
cd "${TEST_DIR}"
python3 check_notebook_results.py --experiment notebook-tfx-test --testname notebooktfx --result $SAMPLE_NOTEBOOK_TFX_TEST_RESULT --namespace ${NAMESPACE}
python3 check_notebook_results.py --experiment notebook-tfx-test --testname notebooktfx --result $SAMPLE_NOTEBOOK_TFX_TEST_RESULT --namespace ${NAMESPACE} --exit-code ${EXIT_CODE}

echo "Copy the test results to GCS ${RESULTS_GCS_DIR}/"
gsutil cp $SAMPLE_NOTEBOOK_TFX_TEST_RESULT ${RESULTS_GCS_DIR}/$SAMPLE_NOTEBOOK_TFX_TEST_RESULT
Expand All @@ -334,9 +335,26 @@ elif [ "$TEST_NAME" == "notebook-lightweight" ]; then
jupyter nbconvert --to python notebook-lightweight.ipynb
pip3 install tensorflow==1.8.0
ipython notebook-lightweight.py
EXIT_CODE=$?
cd "${TEST_DIR}"
python3 check_notebook_results.py --experiment notebook-lightweight --testname notebooklightweight --result $SAMPLE_NOTEBOOK_LIGHTWEIGHT_TEST_RESULT --namespace ${NAMESPACE}
python3 check_notebook_results.py --experiment notebook-lightweight --testname notebooklightweight --result $SAMPLE_NOTEBOOK_LIGHTWEIGHT_TEST_RESULT --namespace ${NAMESPACE} --exit-code ${EXIT_CODE}

echo "Copy the test results to GCS ${RESULTS_GCS_DIR}/"
gsutil cp $SAMPLE_NOTEBOOK_LIGHTWEIGHT_TEST_RESULT ${RESULTS_GCS_DIR}/$SAMPLE_NOTEBOOK_LIGHTWEIGHT_TEST_RESULT
elif [ "$TEST_NAME" == "notebook-typecheck" ]; then
SAMPLE_NOTEBOOK_TYPECHECK_TEST_RESULT=junit_SampleNotebookTypecheckOutput.xml
SAMPLE_NOTEBOOK_TYPECHECK_TEST_OUTPUT=${RESULTS_GCS_DIR}

cd ${BASE_DIR}/samples/notebooks
export LC_ALL=C.UTF-8
export LANG=C.UTF-8
papermill --prepare-only -p KFP_PACKAGE /tmp/kfp.tar.gz DSL\ Static\ Type\ Checking.ipynb notebook-typecheck.ipynb
jupyter nbconvert --to python notebook-typecheck.ipynb
ipython notebook-typecheck.py
EXIT_CODE=$?
cd "${TEST_DIR}"
python3 check_notebook_results.py --testname notebooktypecheck --result $SAMPLE_NOTEBOOK_TYPECHECK_TEST_RESULT --exit-code ${EXIT_CODE}

echo "Copy the test results to GCS ${RESULTS_GCS_DIR}/"
gsutil cp $SAMPLE_NOTEBOOK_TYPECHECK_TEST_RESULT ${RESULTS_GCS_DIR}/$SAMPLE_NOTEBOOK_TYPECHECK_TEST_RESULT
fi
15 changes: 14 additions & 1 deletion test/sample_test_v2.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,20 @@ spec:
value: "{{inputs.parameters.namespace}}"
- name: test-name
value: "notebook-lightweight"

- name: run-notebook-typecheck-tests
template: run-sample-tests
arguments:
parameters:
- name: test-results-gcs-dir
value: "{{inputs.parameters.test-results-gcs-dir}}"
- name: target-image-prefix
value: "{{inputs.parameters.target-image-prefix}}"
- name: sample-tests-image
value: "{{inputs.parameters.target-image-prefix}}{{inputs.parameters.sample-tests-image-suffix}}"
- name: namespace
value: "{{inputs.parameters.namespace}}"
- name: test-name
value: "notebook-typecheck"

# Build and push image
- name: build-image-by-dockerfile
Expand Down

0 comments on commit 06e544b

Please sign in to comment.