Skip to content

Commit

Permalink
Allow pytest to return success when no tests are collected for flaky …
Browse files Browse the repository at this point in the history
…test suite (DataDog#17990)

* Manage no tests collected exit code 5 from pytest for flaky test suite
* Manage passing additional -m arguments to pytest in e2e test
* Avoid passing `all` to pytest (revert to initial state)
  • Loading branch information
vivek-datadog authored and ravindrasojitra-crest committed Aug 5, 2024
1 parent 10c06e7 commit 941a8c3
Show file tree
Hide file tree
Showing 2 changed files with 108 additions and 7 deletions.
87 changes: 81 additions & 6 deletions .github/workflows/test-target.yml
Original file line number Diff line number Diff line change
Expand Up @@ -212,23 +212,79 @@ jobs:
if: inputs.standard && !inputs.minimum-base-package
env:
DDEV_TEST_ENABLE_TRACING: "${{ inputs.repo == 'core' && '1' || '0' }}"
run: ddev test --cov --junit ${{ inputs.target }} ${{ inputs.pytest-args != '' && format('-- {0}', inputs.pytest-args) || '' }}
run: |
if [ "${{ inputs.pytest-args }}" = "-m flaky" ]; then
set +e # Disable immediate exit
ddev test --cov --junit ${{ inputs.target }} -- ${{ inputs.pytest-args }}
exit_code=$?
if [ $exit_code -eq 5 ]; then
# Flaky test count can be zero, this is done to avoid pipeline failure
echo "No tests were collected."
exit 0
else
exit $exit_code
fi
else
ddev test --cov --junit ${{ inputs.target }} ${{ inputs.pytest-args != '' && format('-- {0}', inputs.pytest-args) || '' }}
fi
- name: Run Unit & Integration tests with minimum version of base package
if: inputs.standard && inputs.minimum-base-package
run: ddev test --compat --recreate --junit ${{ inputs.target }} ${{ inputs.pytest-args != '' && format('-- {0}', inputs.pytest-args) || '' }}
run: |
if [ "${{ inputs.pytest-args }}" = "-m flaky" ]; then
set +e # Disable immediate exit
ddev test --compat --recreate --junit ${{ inputs.target }} -- ${{ inputs.pytest-args }}
exit_code=$?
if [ $exit_code -eq 5 ]; then
# Flaky test count can be zero, this is done to avoid pipeline failure
echo "No tests were collected."
exit 0
else
exit $exit_code
fi
else
ddev test --compat --recreate --junit ${{ inputs.target }} ${{ inputs.pytest-args != '' && format('-- {0}', inputs.pytest-args) || '' }}
fi
- name: Run E2E tests with latest base package
if: inputs.standard && inputs.repo == 'core' && !inputs.minimum-base-package
env:
DD_API_KEY: "${{ secrets.DD_API_KEY }}"
run: ddev env test --base --new-env --junit ${{ inputs.target }} ${{ inputs.pytest-args != '' && format('-- all {0}', inputs.pytest-args) || '' }}
run: |
if [ "${{ inputs.pytest-args }}" = "-m flaky" ]; then
set +e # Disable immediate exit
ddev env test --base --new-env --junit ${{ inputs.target }} -- ${{ inputs.pytest-args }}
exit_code=$?
if [ $exit_code -eq 5 ]; then
# Flaky test count can be zero, this is done to avoid pipeline failure
echo "No tests were collected."
exit 0
else
exit $exit_code
fi
else
ddev env test --base --new-env --junit ${{ inputs.target }} ${{ inputs.pytest-args != '' && format('-- {0}', inputs.pytest-args) || '' }}
fi
- name: Run E2E tests
if: inputs.standard && inputs.repo != 'core'
env:
DD_API_KEY: "${{ secrets.DD_API_KEY }}"
run: ddev env test --new-env --junit ${{ inputs.target }} ${{ inputs.pytest-args != '' && format('-- all {0}', inputs.pytest-args) || '' }}
run: |
if [ "${{ inputs.pytest-args }}" = "-m flaky" ]; then
set +e # Disable immediate exit
ddev env test --new-env --junit ${{ inputs.target }} -- ${{ inputs.pytest-args }}
exit_code=$?
if [ $exit_code -eq 5 ]; then
# Flaky test count can be zero, this is done to avoid pipeline failure
echo "No tests were collected."
exit 0
else
exit $exit_code
fi
else
ddev env test --new-env --junit ${{ inputs.target }} ${{ inputs.pytest-args != '' && format('-- {0}', inputs.pytest-args) || '' }}
fi
- name: Run benchmarks
if: inputs.benchmark
Expand All @@ -243,7 +299,21 @@ jobs:
env:
DD_API_KEY: "${{ secrets.DD_API_KEY }}"
DDEV_TEST_ENABLE_TRACING: "${{ inputs.repo == 'core' && '1' || '0' }}"
run: ddev env test --base --new-env --junit ${{ inputs.target }}:latest ${{ inputs.pytest-args != '' && format('-- all {0}', inputs.pytest-args) || '' }}
run: |
if [ "${{ inputs.pytest-args }}" = "-m flaky" ]; then
set +e # Disable immediate exit
ddev env test --base --new-env --junit ${{ inputs.target }}:latest -- ${{ inputs.pytest-args }}
exit_code=$?
if [ $exit_code -eq 5 ]; then
# Flaky test count can be zero, this is done to avoid pipeline failure
echo "No tests were collected."
exit 0
else
exit $exit_code
fi
else
ddev env test --base --new-env --junit ${{ inputs.target }}:latest ${{ inputs.pytest-args != '' && format('-- {0}', inputs.pytest-args) || '' }}
fi
- name: View trace log
if: inputs.repo == 'core' && always()
Expand Down Expand Up @@ -272,7 +342,12 @@ jobs:
path: "${{ env.TEST_RESULTS_BASE_DIR }}"

- name: Upload coverage data
if: inputs.standard && !github.event.repository.private && always()
if: >
inputs.standard &&
!github.event.repository.private &&
always() &&
inputs.pytest-args != '-m flaky'
# Flaky tests will have low coverage, don't upload it to avoid pipeline failure
uses: codecov/codecov-action@v4
with:
token: ${{ secrets.CODECOV_TOKEN }}
Expand Down
28 changes: 27 additions & 1 deletion ddev/src/ddev/cli/test/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -199,7 +199,33 @@ def test(
base_command.append('--memray')

if e2e:
base_command.extend(('-m', 'e2e'))
# Convert pytest_args to a list if it's a tuple
pytest_args_list = list(pytest_args) if isinstance(pytest_args, tuple) else pytest_args

# Initialize a list to hold indices of '-m' options and their values to be removed
indices_to_remove = []
marker_values = []

# Iterate over pytest_args_list to find '-m' or '--markers' options and their values
for i, arg in enumerate(pytest_args_list):
if arg in ('-m', '--markers') and i + 1 < len(pytest_args_list):
indices_to_remove.extend([i, i + 1])
marker_values.append(pytest_args_list[i + 1])

# Reverse sort indices_to_remove to avoid index shifting issues during removal
indices_to_remove.sort(reverse=True)

# Remove the '-m' options and their values from pytest_args_list
for index in indices_to_remove:
pytest_args_list.pop(index)

# After removing the '-m' options and their values
# Convert the modified pytest_args_list back to a tuple
pytest_args = tuple(pytest_args_list)

# Construct the combined marker expression with extracted marker values and 'e2e'
combined_marker = " and ".join(marker_values) + " and e2e" if marker_values else "e2e"
base_command.extend(('-m', combined_marker))
global_env_vars[EndToEndEnvVars.PARENT_PYTHON] = sys.executable

app.display_debug(f'Targets: {", ".join(targets)}')
Expand Down

0 comments on commit 941a8c3

Please sign in to comment.