diff --git a/.cirrus.star b/.cirrus.star index 00ea8d0c..9a96f2a9 100644 --- a/.cirrus.star +++ b/.cirrus.star @@ -6,7 +6,6 @@ load("cirrus", "fs") def main(): return { "env": { - "IMG_SFX": fs.read("IMG_SFX").strip(), - "IMPORT_IMG_SFX": fs.read("IMPORT_IMG_SFX").strip() + "IMG_SFX": fs.read("IMG_SFX").strip() }, } diff --git a/IMG_SFX b/IMG_SFX index f7c2459b..e952fcce 100644 --- a/IMG_SFX +++ b/IMG_SFX @@ -1 +1 @@ -20240620t153000z-f40f39d13 +20240701t155130z-f40f39d13 diff --git a/IMPORT_IMG_SFX b/IMPORT_IMG_SFX deleted file mode 100644 index 4c4272bf..00000000 --- a/IMPORT_IMG_SFX +++ /dev/null @@ -1 +0,0 @@ -20240423t151529z-f40f39d13 diff --git a/Makefile b/Makefile index a79f2549..5ad4a6ea 100644 --- a/Makefile +++ b/Makefile @@ -20,15 +20,14 @@ if_ci_else = $(if $(findstring true,$(CI)),$(1),$(2)) export CENTOS_STREAM_RELEASE = 9 +# Warning: Beta Fedora releases are not supported. Verifiy EC2 AMI availability +# here: https://fedoraproject.org/cloud/download export FEDORA_RELEASE = 40 export PRIOR_FEDORA_RELEASE = 39 # This should always be one-greater than $FEDORA_RELEASE (assuming it's actually the latest) export RAWHIDE_RELEASE = 41 -# See import_images/README.md -export FEDORA_IMPORT_IMG_SFX = $(_IMPORT_IMG_SFX) - # Automation assumes the actual release number (after SID upgrade) # is always one-greater than the latest DEBIAN_BASE_FAMILY (GCE image). export DEBIAN_RELEASE = 13 @@ -104,7 +103,6 @@ override _HLPFMT = "%-20s %s\n" # Suffix value for any images built from this make execution _IMG_SFX ?= $(file $@,$(shell date --utc +%Y%m%dt%H%M%Sz)-f$(FEDORA_RELEASE)f$(PRIOR_FEDORA_RELEASE)d$(subst .,,$(DEBIAN_RELEASE))) - @echo "$(file $@.tmp - mv $@.tmp $@ - -# MD5 metadata value checked by AWS after upload + 5 retries. -# Cache disabled to avoid sync. issues w/ vmimport service if -# image re-uploaded. -# TODO: Use sha256 from ..._CSUM_URL file instead of recalculating -# https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html -# Avoid re-uploading unnecessarily -.SECONDARY: $(_TEMPDIR)/%.uploaded -$(_TEMPDIR)/%.uploaded: $(_TEMPDIR)/%.$(IMPORT_FORMAT) $(_TEMPDIR)/%.md5 - -$(AWS) s3 rm --quiet s3://packer-image-import/%.$(IMPORT_FORMAT) - $(AWS) s3api put-object \ - --content-md5 "$(file < $(_TEMPDIR)/$*.md5)" \ - --content-encoding binary/octet-stream \ - --cache-control no-cache \ - --bucket packer-image-import \ - --key $*.$(IMPORT_FORMAT) \ - --body $(_TEMPDIR)/$*.$(IMPORT_FORMAT) > $@.tmp - mv $@.tmp $@ - -# For whatever reason, the 'Format' value must be all upper-case. -# Avoid creating unnecessary/duplicate import tasks -.SECONDARY: $(_TEMPDIR)/%.import_task_id -$(_TEMPDIR)/%.import_task_id: $(_TEMPDIR)/%.uploaded - $(AWS) ec2 import-snapshot \ - --disk-container Format=$(shell tr '[:lower:]' '[:upper:]'<<<"$(IMPORT_FORMAT)"),UserBucket="{S3Bucket=packer-image-import,S3Key=$*.$(IMPORT_FORMAT)}" > $@.tmp.json - @cat $@.tmp.json - jq -r -e .ImportTaskId $@.tmp.json > $@.tmp - mv $@.tmp $@ - -# Avoid importing multiple snapshots for the same image -.PRECIOUS: $(_TEMPDIR)/%.snapshot_id -$(_TEMPDIR)/%.snapshot_id: $(_TEMPDIR)/%.import_task_id - bash import_images/wait_import_task.sh "$<" > $@.tmp - mv $@.tmp $@ - -define _register_sed - sed -r \ - -e 's/@@@NAME@@@/$(1)/' \ - -e 's/@@@IMPORT_IMG_SFX@@@/$(_IMPORT_IMG_SFX)/' \ - -e 's/@@@ARCH@@@/$(2)/' \ - -e 's/@@@SNAPSHOT_ID@@@/$(3)/' \ - import_images/register.json.in \ - > $(4) -endef - -$(_TEMPDIR)/fedora-aws-$(_IMPORT_IMG_SFX).register.json: $(_TEMPDIR)/fedora-aws-$(_IMPORT_IMG_SFX).snapshot_id import_images/register.json.in - $(call _register_sed,fedora-aws,x86_64,$(file <$<),$@) - -$(_TEMPDIR)/fedora-aws-arm64-$(_IMPORT_IMG_SFX).register.json: $(_TEMPDIR)/fedora-aws-arm64-$(_IMPORT_IMG_SFX).snapshot_id import_images/register.json.in - $(call _register_sed,fedora-aws-arm64,arm64,$(file <$<),$@) - -# Avoid multiple registrations for the same image -.PRECIOUS: $(_TEMPDIR)/%.ami.id -$(_TEMPDIR)/%.ami.id: $(_TEMPDIR)/%.register.json - $(AWS) ec2 register-image --cli-input-json "$$(<$<)" > $@.tmp.json - cat $@.tmp.json - jq -r -e .ImageId $@.tmp.json > $@.tmp - mv $@.tmp $@ - -$(_TEMPDIR)/%.ami.name: $(_TEMPDIR)/%.register.json - jq -r -e .Name $< > $@.tmp - mv $@.tmp $@ - -$(_TEMPDIR)/%.ami.json: $(_TEMPDIR)/%.ami.id $(_TEMPDIR)/%.ami.name - $(AWS) ec2 create-tags \ - --resources "$$(<$(_TEMPDIR)/$*.ami.id)" \ - --tags \ - Key=Name,Value=$$(<$(_TEMPDIR)/$*.ami.name) \ - Key=automation,Value=false - $(AWS) --output table ec2 describe-images --image-ids "$$(<$(_TEMPDIR)/$*.ami.id)" \ - | tee $@ - -.PHONY: import_images -import_images: $(_TEMPDIR)/fedora-aws-$(_IMPORT_IMG_SFX).ami.json $(_TEMPDIR)/fedora-aws-arm64-$(_IMPORT_IMG_SFX).ami.json import_images/manifest.json.in ## Import generic Fedora cloud images into AWS EC2. - sed -r \ - -e 's/@@@IMG_SFX@@@/$(_IMPORT_IMG_SFX)/' \ - -e 's/@@@CIRRUS_TASK_ID@@@/$(CIRRUS_TASK_ID)/' \ - import_images/manifest.json.in \ - > import_images/manifest.json - @echo "Image import(s) successful!" - .PHONY: base_images # This needs to run in a virt/nested-virt capable environment base_images: base_images/manifest.json ## Create, prepare, and import base-level images into GCE. diff --git a/README.md b/README.md index 04af9eda..e8982e6c 100644 --- a/README.md +++ b/README.md @@ -52,7 +52,7 @@ However, all steps are listed below for completeness. For more information on the overall process of importing custom GCE VM Images, please [refer to the documentation](https://cloud.google.com/compute/docs/import/import-existing-image). For references to the latest pre-build AWS EC2 Fedora AMI's see [the -upstream cloud page](https://alt.fedoraproject.org/cloud/). +upstream cloud page](https://fedoraproject.org/cloud/download). For more information on the primary tool (*packer*) used for this process, please [see it's documentation page](https://www.packer.io/docs). diff --git a/base_images/cloud.yml b/base_images/cloud.yml index f1ea4691..81582054 100644 --- a/base_images/cloud.yml +++ b/base_images/cloud.yml @@ -26,8 +26,6 @@ variables: # Empty value means it must be passed in on command-line PRIOR_FEDORA_IMAGE_URL: "{{env `PRIOR_FEDORA_IMAGE_URL`}}" PRIOR_FEDORA_CSUM_URL: "{{env `PRIOR_FEDORA_CSUM_URL`}}" - FEDORA_IMPORT_IMG_SFX: "{{env `FEDORA_IMPORT_IMG_SFX`}}" - DEBIAN_RELEASE: "{{env `DEBIAN_RELEASE`}}" DEBIAN_BASE_FAMILY: "{{env `DEBIAN_BASE_FAMILY`}}" @@ -109,20 +107,18 @@ builders: - &fedora-aws name: 'fedora-aws' type: 'amazon-ebs' - source_ami_filter: # Will fail if >1 or no AMI found + source_ami_filter: + # Many of these search filter values (like account ID and name) aren't publicized + # anywhere. They were found by examining AWS EC2 AMIs published/referenced from + # the AWS sections on https://fedoraproject.org/cloud/download owners: - # Docs are wrong, specifying the Account ID required to make AMIs private. - # The Account ID is hard-coded here out of expediency, since passing in - # more packer args from the command-line (in Makefile) is non-trivial. - - &accountid '449134212816' - # It's necessary to 'search' for the base-image by these criteria. If - # more than one image is found, Packer will fail the build (and display - # the conflicting AMI IDs). + - &fedora_accountid 125523088429 + most_recent: true # Required b/c >1 search result likely to be returned filters: &ami_filters architecture: 'x86_64' image-type: 'machine' - is-public: 'false' - name: '{{build_name}}-i{{user `FEDORA_IMPORT_IMG_SFX`}}' + is-public: 'true' + name: 'Fedora-Cloud-Base*-{{user `FEDORA_RELEASE`}}-*us-east-1*' root-device-type: 'ebs' state: 'available' virtualization-type: 'hvm' @@ -146,7 +142,6 @@ builders: volume_type: 'gp2' delete_on_termination: true # These are critical and used by security-polciy to enforce instance launch limits. - tags: &awstags <<: *imgcpylabels # EC2 expects "Name" to be capitalized @@ -160,7 +155,7 @@ builders: # This is necessary for security - The CI service accounts are not permitted # to use AMI's from any other account, including public ones. ami_users: - - *accountid + - &accountid '449134212816' ssh_username: 'fedora' ssh_clear_authorized_keys: true # N/B: Required Packer >= 1.8.0 @@ -171,7 +166,8 @@ builders: name: 'fedora-aws-arm64' source_ami_filter: owners: - - *accountid + - *fedora_accountid + most_recent: true # Required b/c >1 search result likely to be returned filters: <<: *ami_filters architecture: 'arm64' diff --git a/imgobsolete/entrypoint.sh b/imgobsolete/entrypoint.sh index c3425695..6ab547b6 100755 --- a/imgobsolete/entrypoint.sh +++ b/imgobsolete/entrypoint.sh @@ -11,7 +11,7 @@ set -eo pipefail # shellcheck source=imgts/lib_entrypoint.sh source /usr/local/bin/lib_entrypoint.sh -req_env_vars GCPJSON GCPNAME GCPPROJECT AWSINI IMG_SFX IMPORT_IMG_SFX +req_env_vars GCPJSON GCPNAME GCPPROJECT AWSINI IMG_SFX gcloud_init @@ -159,10 +159,10 @@ for (( i=nr_amis ; i ; i-- )); do continue fi - # Any image matching the currently in-use IMG_SFX or IMPORT_IMG_SFX + # Any image matching the currently in-use IMG_SFX # must always be preserved. Values are defined in cirrus.yml # shellcheck disable=SC2154 - if [[ "$name" =~ $IMG_SFX ]] || [[ "$name" =~ $IMPORT_IMG_SFX ]]; then + if [[ "$name" =~ $IMG_SFX ]]; then msg "Retaining current (latest) image $name | $tags" continue fi diff --git a/imgprune/entrypoint.sh b/imgprune/entrypoint.sh index e3ab7bef..ead67a06 100755 --- a/imgprune/entrypoint.sh +++ b/imgprune/entrypoint.sh @@ -11,7 +11,7 @@ set -e # shellcheck source=imgts/lib_entrypoint.sh source /usr/local/bin/lib_entrypoint.sh -req_env_vars GCPJSON GCPNAME GCPPROJECT AWSINI IMG_SFX IMPORT_IMG_SFX +req_env_vars GCPJSON GCPNAME GCPPROJECT AWSINI IMG_SFX gcloud_init @@ -48,7 +48,7 @@ $GCLOUD compute images list --show-deprecated \ # Any image matching the currently in-use IMG_SFX must always be preserved. # Values are defined in cirrus.yml # shellcheck disable=SC2154 - if [[ "$name" =~ $IMG_SFX ]] || [[ "$name" =~ $IMPORT_IMG_SFX ]]; then + if [[ "$name" =~ $IMG_SFX ]]; then msg " Skipping current (latest) image $name" continue fi @@ -91,9 +91,9 @@ for (( i=nr_amis ; i ; i-- )); do warn 0 " EC2 AMI ID '$ami_id' is missing a 'Name' tag" fi - # Any image matching the currently in-use IMG_SFX or IMPORT_IMG_SFX + # Any image matching the currently in-use IMG_SFX # must always be preserved. - if [[ "$name" =~ $IMG_SFX ]] || [[ "$name" =~ $IMPORT_IMG_SFX ]]; then + if [[ "$name" =~ $IMG_SFX ]]; then warn 0 " Retaining current (latest) image $name id $ami_id" $AWS ec2 disable-image-deprecation --image-id "$ami_id" > /dev/null continue diff --git a/import_images/README.md b/import_images/README.md deleted file mode 100644 index cfb680f9..00000000 --- a/import_images/README.md +++ /dev/null @@ -1,108 +0,0 @@ -# Semi-manual image imports - -## Overview - -[Due to a bug in -packer](https://github.com/hashicorp/packer-plugin-amazon/issues/264) and -the sheer complexity of EC2 image imports, this process is impractical for -full automation. It tends toward nearly always requiring supervision of a -human: - -* There are multiple failure-points, some are not well reported to - the user by tools here or by AWS itself. -* The upload of the image to s3 can be unreliable. Silently corrupting image - data. -* The import-process is managed by a hosted AWS service which can be slow - and is occasionally unreliable. -* Failure often results in one or more leftover/incomplete resources - (s3 objects, EC2 snapshots, and AMIs) - -## Requirements - -* You're generally familiar with the (manual) - [EC2 snapshot import process](https://docs.aws.amazon.com/vm-import/latest/userguide/vmimport-import-snapshot.html). -* You are in possession of an AWS EC2 account, with the [IAM policy - `vmimport`](https://docs.aws.amazon.com/vm-import/latest/userguide/required-permissions.html#vmimport-role) attached. -* You have "Access Key" and "Secret Access Key" values set in [a credentials - file](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html). - These are only shown once, if lost a new "Access Key" needs to be created. - The format for `~/.aws/credentials` is very simple: - - ``` - [default] - aws_access_key_id = - aws_secret_access_key = - ``` - - The format for `~/.aws/config` is similarly simple: - - ``` - [defaults] - output = json - region = us-east-1 - ``` - -* Podman is installed and functional -* At least 10gig free space under `/tmp`, more if there are failures / multiple runs. -* *Network bandwidth sufficient for downloading and uploading many GBs of - data, potentially multiple times.* - -## Process - -Unless there is a problem with the current contents or age of the -imported images, this process does not need to be followed. The -normal PR-based build workflow can simply be followed as usual. -This process is only needed to bring newly updated Fedora images into -AWS to build CI images from. For example, due to a new Beta or GA release. - -***Note:*** Most of the steps below will happen within a container environment. -Any exceptions are noted in the individual steps below with *[HOST]* - -1. *[HOST]* Edit the `Makefile`, update the Fedora release numbers - under the section - `##### Important image release and source details #####` -1. *[HOST]* Run `make IMPORT_IMG_SFX` -1. *[HOST]* Run - ```bash - $ make image_builder_debug \ - GAC_FILEPATH=/dev/null \ - AWS_SHARED_CREDENTIALS_FILE=/path/to/.aws/credentials - ``` -1. Run `make import_images` (or `make --jobs=4 import_images` if you're brave). -1. The following steps should all occur successfully for each imported image. - 1. Image is downloaded. - 1. Image checksum is downloaded. - 1. Image is verified against the checksum. - 1. Image is converted to `VHDX` format. - 1. The `VHDX` image is uploaded to the `packer-image-import` S3 bucket. - 1. AWS `import-snapshot` process is started (uses AWS vmimport service) - 1. Progress of snapshot import is monitored until completion or failure. - 1. The imported snapshot is converted into an AMI - 1. Essential tags are added to the AMI - 1. Details ascii-table about the new AMI is printed on success. -1. Assuming all image imports were successful, a final success message will be - printed by `make`. - -## Failure responses - -This list is not exhaustive, and only represents common/likely failures. -Normally there is no need to exit the build container. - -* If image download fails, double-check any error output, run `make clean` - and retry. -* If checksum validation fails, - run `make clean`. - Retry `make import_images`. -* If s3 upload fails, - Confirm service availability, - retry `make import_images`. -* If snapshot import fails with a `Disk validation failed` error, - Retry `make import_images`. -* If snapshot import fails with non-validation error, - find snapshot in EC2 and delete it manually. - Retry `make import_images`. -* If AMI registration fails, remove any conflicting AMIs *and* snapshots. - Retry `make import_images`. -* If import was successful but AMI tagging failed, manually add - the required tags to AMI: `automation=false` and `Name=-i${IMG_SFX}`. - Where `` is `fedora-aws` or `fedora-aws-arm64`. diff --git a/import_images/handle_image.sh b/import_images/handle_image.sh deleted file mode 100644 index a075b802..00000000 --- a/import_images/handle_image.sh +++ /dev/null @@ -1,45 +0,0 @@ -#!/bin/bash - -# This script is intended to be run by packer, usage under any other -# environment may behave badly. Its purpose is to download a VM -# image and a checksum file. Verify the image's checksum matches. -# If it does, convert the downloaded image into the format indicated -# by the first argument's `.extension`. -# -# The first argument is the file path and name for the output image, -# the second argument is the image download URL (ending in a filename). -# The third argument is the download URL for a checksum file containing -# details necessary to verify vs filename included in image download URL. - -set -eo pipefail - -SCRIPT_FILEPATH=$(realpath "${BASH_SOURCE[0]}") -SCRIPT_DIRPATH=$(dirname "$SCRIPT_FILEPATH") -REPO_DIRPATH=$(realpath "$SCRIPT_DIRPATH/../") - -# shellcheck source=./lib.sh -source "$REPO_DIRPATH/lib.sh" - -[[ "$#" -eq 3 ]] || \ - die "Expected to be called with three arguments, not: $#" - -# Packer needs to provide the desired filename as it's unable to parse -# a filename out of the URL or interpret output from this script. -dest_dirpath=$(dirname "$1") -dest_filename=$(basename "$1") -dest_format=$(cut -d. -f2<<<"$dest_filename") -src_url="$2" -src_filename=$(basename "$src_url") -cs_url="$3" - -req_env_vars dest_dirpath dest_filename dest_format src_url src_filename cs_url - -mkdir -p "$dest_dirpath" -cd "$dest_dirpath" -[[ -r "$src_filename" ]] || \ - curl --fail --location -O "$src_url" -echo "Downloading & verifying checksums in $cs_url" -curl --fail --location "$cs_url" -o - | \ - sha256sum --ignore-missing --check - -echo "Converting '$src_filename' to ($dest_format format) '$dest_filename'" -qemu-img convert "$src_filename" -O "$dest_format" "${dest_filename}" diff --git a/import_images/manifest.json.in b/import_images/manifest.json.in deleted file mode 100644 index 5fff1c1c..00000000 --- a/import_images/manifest.json.in +++ /dev/null @@ -1,31 +0,0 @@ -{ - "builds": [ - { - "name": "fedora-aws", - "builder_type": "hamsterwheel", - "build_time": 0, - "files": null, - "artifact_id": "", - "packer_run_uuid": null, - "custom_data": { - "IMG_SFX": "fedora-aws-i@@@IMPORT_IMG_SFX@@@", - "STAGE": "import", - "TASK": "@@@CIRRUS_TASK_ID@@@" - } - }, - { - "name": "fedora-aws-arm64", - "builder_type": "hamsterwheel", - "build_time": 0, - "files": null, - "artifact_id": "", - "packer_run_uuid": null, - "custom_data": { - "IMG_SFX": "fedora-aws-arm64-i@@@IMPORT_IMG_SFX@@@", - "STAGE": "import", - "TASK": "@@@CIRRUS_TASK_ID@@@" - } - } - ], - "last_run_uuid": "00000000-0000-0000-0000-000000000000" -} diff --git a/import_images/register.json.in b/import_images/register.json.in deleted file mode 100644 index f888e25c..00000000 --- a/import_images/register.json.in +++ /dev/null @@ -1,18 +0,0 @@ -{ - "Name": "@@@NAME@@@-i@@@IMPORT_IMG_SFX@@@", - "VirtualizationType": "hvm", - "Architecture": "@@@ARCH@@@", - "EnaSupport": true, - "RootDeviceName": "/dev/sda1", - "BlockDeviceMappings": [ - { - "DeviceName": "/dev/sda1", - "Ebs": { - "DeleteOnTermination": true, - "SnapshotId": "@@@SNAPSHOT_ID@@@", - "VolumeSize": 10, - "VolumeType": "gp2" - } - } - ] -} diff --git a/import_images/wait_import_task.sh b/import_images/wait_import_task.sh deleted file mode 100644 index 353a9b52..00000000 --- a/import_images/wait_import_task.sh +++ /dev/null @@ -1,84 +0,0 @@ -#!/bin/bash - -# This script is intended to be called by the main Makefile -# to wait for and confirm successful import and conversion -# of an uploaded image object from S3 into EC2. It expects -# the path to a file containing the import task ID as the -# first argument. -# -# If the import is successful, the snapshot ID is written -# to stdout. Otherwise, all output goes to stderr, and -# the script exits non-zero on failure or timeout. On -# failure, the file containing the import task ID will -# be removed. - -set -eo pipefail - -AWS="${AWS:-aws --output json --region us-east-1}" - -# The import/conversion process can take a LONG time, have observed -# > 10 minutes on occasion. Normally, takes 2-5 minutes. -SLEEP_SECONDS=10 -TIMEOUT_SECONDS=720 - -TASK_ID_FILE="$1" - -tmpfile=$(mktemp -p '' tmp.$(basename ${BASH_SOURCE[0]}).XXXX) - -die() { echo "ERROR: ${1:-No error message provided}" > /dev/stderr; exit 1; } - -msg() { echo "${1:-No error message provided}" > /dev/stderr; } - -unset snapshot_id -handle_exit() { - set +e - rm -f "$tmpfile" &> /dev/null - if [[ -n "$snapshot_id" ]]; then - msg "Success ($task_id): $snapshot_id" - echo -n "$snapshot_id" > /dev/stdout - return 0 - fi - rm -f "$TASK_ID_FILE" - die "Timeout or other error reported while waiting for snapshot import" -} -trap handle_exit EXIT - -[[ -n "$AWS_SHARED_CREDENTIALS_FILE" ]] || \ - die "\$AWS_SHARED_CREDENTIALS_FILE must not be unset/empty." - -[[ -r "$1" ]] || \ - die "Can't read task id from file '$TASK_ID_FILE'" - -task_id=$(<$TASK_ID_FILE) - -msg "Waiting up to $TIMEOUT_SECONDS seconds for '$task_id' import. Checking progress every $SLEEP_SECONDS seconds." -for (( i=$TIMEOUT_SECONDS ; i ; i=i-$SLEEP_SECONDS )); do \ - - # Sleep first, to give AWS time to start meaningful work. - sleep ${SLEEP_SECONDS}s - - $AWS ec2 describe-import-snapshot-tasks \ - --import-task-ids $task_id > $tmpfile - - if ! st_msg=$(jq -r -e '.ImportSnapshotTasks[0].SnapshotTaskDetail.StatusMessage?' $tmpfile) && \ - [[ -n $st_msg ]] && \ - [[ ! "$st_msg" =~ null ]] - then - die "Unexpected result: $st_msg" - elif grep -Eiq '(error)|(fail)' <<<"$st_msg"; then - die "$task_id: $st_msg" - fi - - msg "$task_id: $st_msg (${i}s remaining)" - - # Why AWS you use StatusMessage && Status? Bad names! WHY!?!?!?! - if status=$(jq -r -e '.ImportSnapshotTasks[0].SnapshotTaskDetail.Status?' $tmpfile) && \ - [[ "$status" == "completed" ]] && \ - snapshot_id=$(jq -r -e '.ImportSnapshotTasks[0].SnapshotTaskDetail.SnapshotId?' $tmpfile) - then - msg "Import complete to: $snapshot_id" - break - else - unset snapshot_id - fi -done