Skip to content

Commit

Permalink
Initial AZP configuration
Browse files Browse the repository at this point in the history
  • Loading branch information
gundalow committed Jan 29, 2021
1 parent 684f215 commit d3e57bf
Show file tree
Hide file tree
Showing 13 changed files with 534 additions and 3 deletions.
3 changes: 3 additions & 0 deletions .azure-pipelines/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
## Azure Pipelines Configuration

Please see the [Documentation](https://github.com/ansible/community/wiki/Testing:-Azure-Pipelines) for more information.
181 changes: 181 additions & 0 deletions .azure-pipelines/azure-pipelines.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,181 @@
trigger:
batch: true
branches:
include:
- main
- stable-*

pr:
autoCancel: true
branches:
include:
- main
- stable-*

schedules:
- cron: 0 9 * * *
displayName: Nightly
always: true
branches:
include:
- main
- stable-*

variables:
- name: checkoutPath
value: ansible_collections/community/aws
- name: coverageBranches
value: main
- name: pipelinesCoverage
value: coverage
- name: entryPoint
value: tests/utils/shippable/shippable.sh
- name: fetchDepth
value: 0

resources:
containers:
- container: default
image: quay.io/ansible/azure-pipelines-test-container:1.7.1

pool: Standard

stages:
### Sanity
- stage: Sanity_devel
displayName: Sanity devel
dependsOn: []
jobs:
- template: templates/matrix.yml
parameters:
nameFormat: Test 1 {0}
testFormat: devel/sanity/1/{0}
targets:
- test: ''
- stage: Sanity_2_10
displayName: Sanity 2.10
dependsOn: []
jobs:
- template: templates/matrix.yml
parameters:
nameFormat: Test 1 {0}
testFormat: 2.10/sanity/1/{0}
targets:
- test: ''
- stage: Sanity_2_9
displayName: Sanity 2.9
dependsOn: []
jobs:
- template: templates/matrix.yml
parameters:
nameFormat: Test 1 {0}
testFormat: 2.9/sanity/1/{0}
targets:
- test: ''
### Units
- stage: Units_devel
displayName: Units devel
dependsOn: []
jobs:
- template: templates/matrix.yml
parameters:
nameFormat: Python {0}
testFormat: devel/units/{0}/1
targets:
- test: 2.7
- test: 3.6
- test: 3.7
- test: 3.8
- test: 3.9
- stage: Units_2_10
displayName: Units 2.10
dependsOn: []
jobs:
- template: templates/matrix.yml
parameters:
nameFormat: Python {0}
testFormat: 2.10/units/{0}/1
targets:
- test: 2.7
- test: 3.6
- test: 3.7
- test: 3.8
- test: 3.9
- stage: Units_2_9
displayName: Units 2.9
dependsOn: []
jobs:
- template: templates/matrix.yml
parameters:
nameFormat: Python {0}
testFormat: 2.9/units/{0}/1
targets:
- test: 2.7
- test: 3.6
- test: 3.7
- test: 3.8
### AWS Integration Tests
- stage: AWS_devel
displayName: AWS devel
dependsOn: []
jobs:
- template: templates/matrix.yml
parameters:
nameFormat: Python {0}
testFormat: devel/aws/{0}
targets:
- test: 2.7
- test: 3.7
groups:
- 1
- 2
- 3
- 4
- stage: AWS_2_10
displayName: AWS 2.10
dependsOn: []
jobs:
- template: templates/matrix.yml
parameters:
nameFormat: Python {0}
testFormat: 2.10/aws/{0}
targets:
- test: 2.7
- test: 3.7
groups:
- 1
- 2
- 3
- 4
- stage: AWS_2_9
displayName: AWS 2.9
dependsOn: []
jobs:
- template: templates/matrix.yml
parameters:
nameFormat: Python {0}
testFormat: 2.9/aws/{0}
targets:
- test: 2.7
- test: 3.7
groups:
- 1
- 2
- 3
- 4

### Finally
- stage: Summary
condition: succeededOrFailed()
dependsOn:
- Sanity_devel
- Sanity_2_10
- Sanity_2_9
- Units_devel
- Units_2_9
- Units_2_10
- AWS_devel
- AWS_2_10
- AWS_2_9
jobs:
- template: templates/coverage.yml
20 changes: 20 additions & 0 deletions .azure-pipelines/scripts/aggregate-coverage.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
#!/usr/bin/env bash
# Aggregate code coverage results for later processing.

set -o pipefail -eu

agent_temp_directory="$1"

PATH="${PWD}/bin:${PATH}"

mkdir "${agent_temp_directory}/coverage/"

options=(--venv --venv-system-site-packages --color -v)

ansible-test coverage combine --export "${agent_temp_directory}/coverage/" "${options[@]}"

if ansible-test coverage analyze targets generate --help >/dev/null 2>&1; then
# Only analyze coverage if the installed version of ansible-test supports it.
# Doing so allows this script to work unmodified for multiple Ansible versions.
ansible-test coverage analyze targets generate "${agent_temp_directory}/coverage/coverage-analyze-targets.json" "${options[@]}"
fi
60 changes: 60 additions & 0 deletions .azure-pipelines/scripts/combine-coverage.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
#!/usr/bin/env python
"""
Combine coverage data from multiple jobs, keeping the data only from the most recent attempt from each job.
Coverage artifacts must be named using the format: "Coverage $(System.JobAttempt) {StableUniqueNameForEachJob}"
The recommended coverage artifact name format is: Coverage $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)
Keep in mind that Azure Pipelines does not enforce unique job display names (only names).
It is up to pipeline authors to avoid name collisions when deviating from the recommended format.
"""

from __future__ import (absolute_import, division, print_function)
__metaclass__ = type

import os
import re
import shutil
import sys


def main():
"""Main program entry point."""
source_directory = sys.argv[1]

if '/ansible_collections/' in os.getcwd():
output_path = "tests/output"
else:
output_path = "test/results"

destination_directory = os.path.join(output_path, 'coverage')

if not os.path.exists(destination_directory):
os.makedirs(destination_directory)

jobs = {}
count = 0

for name in os.listdir(source_directory):
match = re.search('^Coverage (?P<attempt>[0-9]+) (?P<label>.+)$', name)
label = match.group('label')
attempt = int(match.group('attempt'))
jobs[label] = max(attempt, jobs.get(label, 0))

for label, attempt in jobs.items():
name = 'Coverage {attempt} {label}'.format(label=label, attempt=attempt)
source = os.path.join(source_directory, name)
source_files = os.listdir(source)

for source_file in source_files:
source_path = os.path.join(source, source_file)
destination_path = os.path.join(destination_directory, source_file + '.' + label)
print('"%s" -> "%s"' % (source_path, destination_path))
shutil.copyfile(source_path, destination_path)
count += 1

print('Coverage file count: %d' % count)
print('##vso[task.setVariable variable=coverageFileCount]%d' % count)
print('##vso[task.setVariable variable=outputPath]%s' % output_path)


if __name__ == '__main__':
main()
24 changes: 24 additions & 0 deletions .azure-pipelines/scripts/process-results.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
#!/usr/bin/env bash
# Check the test results and set variables for use in later steps.

set -o pipefail -eu

if [[ "$PWD" =~ /ansible_collections/ ]]; then
output_path="tests/output"
else
output_path="test/results"
fi

echo "##vso[task.setVariable variable=outputPath]${output_path}"

if compgen -G "${output_path}"'/junit/*.xml' > /dev/null; then
echo "##vso[task.setVariable variable=haveTestResults]true"
fi

if compgen -G "${output_path}"'/bot/ansible-test-*' > /dev/null; then
echo "##vso[task.setVariable variable=haveBotResults]true"
fi

if compgen -G "${output_path}"'/coverage/*' > /dev/null; then
echo "##vso[task.setVariable variable=haveCoverageData]true"
fi
27 changes: 27 additions & 0 deletions .azure-pipelines/scripts/publish-codecov.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
#!/usr/bin/env bash
# Upload code coverage reports to codecov.io.
# Multiple coverage files from multiple languages are accepted and aggregated after upload.
# Python coverage, as well as PowerShell and Python stubs can all be uploaded.

set -o pipefail -eu

output_path="$1"

curl --silent --show-error https://codecov.io/bash > codecov.sh

for file in "${output_path}"/reports/coverage*.xml; do
name="${file}"
name="${name##*/}" # remove path
name="${name##coverage=}" # remove 'coverage=' prefix if present
name="${name%.xml}" # remove '.xml' suffix

bash codecov.sh \
-f "${file}" \
-n "${name}" \
-X coveragepy \
-X gcov \
-X fix \
-X search \
-X xcode \
|| echo "Failed to upload code coverage report to codecov.io: ${file}"
done
15 changes: 15 additions & 0 deletions .azure-pipelines/scripts/report-coverage.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
#!/usr/bin/env bash
# Generate code coverage reports for uploading to Azure Pipelines and codecov.io.

set -o pipefail -eu

PATH="${PWD}/bin:${PATH}"

if ! ansible-test --help >/dev/null 2>&1; then
# Install the devel version of ansible-test for generating code coverage reports.
# This is only used by Ansible Collections, which are typically tested against multiple Ansible versions (in separate jobs).
# Since a version of ansible-test is required that can work the output from multiple older releases, the devel version is used.
pip install https://github.com/ansible/ansible/archive/devel.tar.gz --disable-pip-version-check
fi

ansible-test coverage xml --stub --venv --venv-system-site-packages --color -v
34 changes: 34 additions & 0 deletions .azure-pipelines/scripts/run-tests.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
#!/usr/bin/env bash
# Configure the test environment and run the tests.

set -o pipefail -eu

entry_point="$1"
test="$2"
read -r -a coverage_branches <<< "$3" # space separated list of branches to run code coverage on for scheduled builds

export COMMIT_MESSAGE
export COMPLETE
export COVERAGE
export IS_PULL_REQUEST

if [ "${SYSTEM_PULLREQUEST_TARGETBRANCH:-}" ]; then
IS_PULL_REQUEST=true
COMMIT_MESSAGE=$(git log --format=%B -n 1 HEAD^2)
else
IS_PULL_REQUEST=
COMMIT_MESSAGE=$(git log --format=%B -n 1 HEAD)
fi

COMPLETE=
COVERAGE=

if [ "${BUILD_REASON}" = "Schedule" ]; then
COMPLETE=yes

if printf '%s\n' "${coverage_branches[@]}" | grep -q "^${BUILD_SOURCEBRANCHNAME}$"; then
COVERAGE=yes
fi
fi

"${entry_point}" "${test}" 2>&1 | "$(dirname "$0")/time-command.py"
25 changes: 25 additions & 0 deletions .azure-pipelines/scripts/time-command.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
#!/usr/bin/env python
"""Prepends a relative timestamp to each input line from stdin and writes it to stdout."""

from __future__ import (absolute_import, division, print_function)
__metaclass__ = type

import sys
import time


def main():
"""Main program entry point."""
start = time.time()

sys.stdin.reconfigure(errors='surrogateescape')
sys.stdout.reconfigure(errors='surrogateescape')

for line in sys.stdin:
seconds = time.time() - start
sys.stdout.write('%02d:%02d %s' % (seconds // 60, seconds % 60, line))
sys.stdout.flush()


if __name__ == '__main__':
main()
Loading

0 comments on commit d3e57bf

Please sign in to comment.