diff --git a/.clang-format b/.clang-format
new file mode 100644
index 000000000000..2f14c8575147
--- /dev/null
+++ b/.clang-format
@@ -0,0 +1,2 @@
+# Allow unlimited column length, rather than 80. This prevents word-wrapping comments, which end up in Swagger.
+ColumnLimit: 0
\ No newline at end of file
diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile
new file mode 100644
index 000000000000..d943c48cf8bf
--- /dev/null
+++ b/.devcontainer/Dockerfile
@@ -0,0 +1,41 @@
+# See here for image contents: https://github.com/microsoft/vscode-dev-containers/tree/v0.234.0/containers/debian/.devcontainer/base.Dockerfile
+
+ARG VARIANT="bullseye"
+FROM --platform=linux/amd64 mcr.microsoft.com/vscode/devcontainers/base:0-${VARIANT}
+
+ARG NEW_USERNAME=""
+ARG HOME=""
+ARG OLD_USERNAME="vscode"
+ARG VSCODE_SCRIPT_VERSION=""
+
+ARG PROTOC_ZIP="protoc-3.14.0-linux-x86_64.zip"
+RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
+ && apt-get install -y apt-transport-https ca-certificates git sudo \
+ # use new user instead of vscode user
+ && usermod -l $NEW_USERNAME -d /home/$NEW_USERNAME -m $OLD_USERNAME \
+ && groupmod -n $NEW_USERNAME $OLD_USERNAME \
+ && echo $NEW_USERNAME ALL=\(root\) NOPASSWD:ALL > /etc/sudoers.d/$NEW_USERNAME \
+ && chmod 0440 /etc/sudoers.d/$NEW_USERNAME \
+ # kubectl
+ && curl -fsSLo /usr/share/keyrings/kubernetes-archive-keyring.gpg https://packages.cloud.google.com/apt/doc/apt-key.gpg \
+ && echo "deb [signed-by=/usr/share/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list \
+ && apt-get update \
+ && apt-get install -y kubectl \
+ # protobuf
+ && curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v3.14.0/${PROTOC_ZIP} \
+ && unzip -o ${PROTOC_ZIP} -d /usr/local bin/protoc \
+ && unzip -o ${PROTOC_ZIP} -d /usr/local 'include/*' \
+ && rm -f ${PROTOC_ZIP} \
+ && chmod 755 /usr/local/bin/protoc \
+ && chmod -R 755 /usr/local/include/ \
+ # k3d
+ && wget -q -O - https://raw.githubusercontent.com/k3d-io/k3d/main/install.sh | bash \
+ # go
+ && bash -ec "$(curl -fsSL "https://raw.githubusercontent.com/microsoft/vscode-dev-containers/$VSCODE_SCRIPT_VERSION/script-library/go-debian.sh")" -- "1.18" "/usr/local/go" "$HOME/go" "automatic" "true" "false" \
+ # dind
+ && bash -ec "$(curl -fsSL "https://raw.githubusercontent.com/microsoft/vscode-dev-containers/$VSCODE_SCRIPT_VERSION/script-library/docker-in-docker-debian.sh")" -- "true" "automatic" "true" "20.10" "v1" \
+ # node
+ && bash -ec "$(curl -fsSL "https://raw.githubusercontent.com/microsoft/vscode-dev-containers/$VSCODE_SCRIPT_VERSION/script-library/node-debian.sh")" -- "/usr/local/share/nvm" "16" "automatic" "true" "true" \
+ # python
+ && bash -ec "$(curl -fsSL "https://raw.githubusercontent.com/microsoft/vscode-dev-containers/$VSCODE_SCRIPT_VERSION/script-library/python-debian.sh")" -- "3.9" \
+ && apt-get clean -y && rm -rf /var/lib/apt/lists/*
diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json
new file mode 100644
index 000000000000..49c7e50fd431
--- /dev/null
+++ b/.devcontainer/devcontainer.json
@@ -0,0 +1,30 @@
+// For format details, see https://aka.ms/devcontainer.json. For config options, see the README at:
+// https://github.com/microsoft/vscode-dev-containers/tree/v0.234.0/containers/debian
+{
+ "name": "Debian",
+ "build": {
+ "dockerfile": "Dockerfile",
+ "args": {
+ "VARIANT": "bullseye",
+ "NEW_USERNAME": "${localEnv:USER}",
+ "HOME": "${localEnv:HOME}",
+ "VSCODE_SCRIPT_VERSION": "v0.234.0"
+ }
+ },
+
+ "settings": {},
+
+ "extensions": [],
+
+ "mounts": [ "source=/var/run/docker.sock,target=/var/run/docker.sock,type=bind" ],
+
+ "runArgs": [ "--cap-add=SYS_PTRACE", "--security-opt", "seccomp=unconfined" ],
+
+ "remoteUser": "${localEnv:USER}",
+ "features": {},
+
+ "workspaceMount": "source=${localWorkspaceFolder},target=${localEnv:HOME}/go/src/github.com/argoproj/argo-workflows,type=bind",
+ "workspaceFolder": "${localEnv:HOME}/go/src/github.com/argoproj/argo-workflows",
+
+ "postCreateCommand": "bash -i .devcontainer/startup.sh"
+}
diff --git a/.devcontainer/startup.sh b/.devcontainer/startup.sh
new file mode 100644
index 000000000000..2e3c1f39c4b6
--- /dev/null
+++ b/.devcontainer/startup.sh
@@ -0,0 +1,18 @@
+#!/bin/bash
+set -e
+
+sudo apt update
+sudo chown $USER:docker /var/run/docker.sock
+sudo chown -fR $USER:golang $GOPATH
+
+echo $'127.0.0.1 dex\n127.0.0.1 minio\n127.0.0.1 postgres\n127.0.0.1 mysql\n127.0.0.1 azurite' | sudo tee -a /etc/hosts
+
+if k3d cluster list | grep k3s-default;
+then
+ echo "skip k3s creation, k3s-default cluster already exist"
+else
+ k3d cluster create
+fi
+
+until k3d cluster start --wait ; do sleep 5 ; done
+k3d kubeconfig merge k3s-default --kubeconfig-merge-default --kubeconfig-switch-context
diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md
deleted file mode 100644
index 4c88b912f518..000000000000
--- a/.github/ISSUE_TEMPLATE/bug_report.md
+++ /dev/null
@@ -1,48 +0,0 @@
----
-name: Reproducible bug report
-about: Create a reproducible bug report. Not for support requests.
-labels: ['bug', 'triage']
----
-
-
-
-## Checklist
-
-
-
-* [ ] Double-checked my configuration.
-* [ ] Tested using the latest version.
-* [ ] Used the Emissary executor.
-
-## Summary
-
-What happened/what you expected to happen?
-
-What version are you running?
-
-## Diagnostics
-
-Paste the smallest workflow that reproduces the bug. We must be able to run the workflow.
-
-```yaml
-
-```
-
-```bash
-# Logs from the workflow controller:
-kubectl logs -n argo deploy/workflow-controller | grep ${workflow}
-
-# If the workflow's pods have not been created, you can skip the rest of the diagnostics.
-
-# The workflow's pods that are problematic:
-kubectl get pod -o yaml -l workflows.argoproj.io/workflow=${workflow},workflow.argoproj.io/phase!=Succeeded
-
-# Logs from in your workflow's wait container, something like:
-kubectl logs -c wait -l workflows.argoproj.io/workflow=${workflow},workflow.argoproj.io/phase!=Succeeded
-```
-
----
-
-**Message from the maintainers**:
-
-Impacted by this bug? Give it a 👍. We prioritise the issues with the most 👍.
diff --git a/.github/ISSUE_TEMPLATE/bug_report.yaml b/.github/ISSUE_TEMPLATE/bug_report.yaml
new file mode 100644
index 000000000000..4e6e657c7c0e
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/bug_report.yaml
@@ -0,0 +1,48 @@
+name: Reproducible bug report
+description: Create a reproducible bug report. Not for support requests.
+labels: [ bug ]
+body:
+ - type: checkboxes
+ id: terms
+ attributes:
+ label: Pre-requisites
+ options:
+ - label: I have double-checked my configuration
+ required: true
+ - label: I can confirm the issues exists when I tested with `:latest`
+ required: true
+ - label: I'd like to contribute the fix myself (see [contributing guide](https://github.com/argoproj/argo-workflows/blob/master/docs/CONTRIBUTING.md))
+ - type: textarea
+ id: description
+ attributes:
+ label: What happened/what you expected to happen?
+ validations:
+ required: true
+ - type: input
+ id: version
+ attributes:
+ label: Version
+ placeholder: e.g. v3.3.8 or latest
+ validations:
+ required: true
+ - type: textarea
+ id: failing-workflow
+ attributes:
+ label: Paste a small workflow that reproduces the issue. We must be able to run the workflow; don't enter a workflows that uses private images.
+ render: YAML
+ validations:
+ required: true
+ - type: textarea
+ id: controller-logs
+ attributes:
+ label: Logs from the workflow controller
+ value: kubectl logs -n argo deploy/workflow-controller | grep ${workflow}
+ validations:
+ required: true
+ - type: textarea
+ id: wait-logs
+ attributes:
+ label: Logs from in your workflow's wait container
+ value: kubectl logs -c wait -l workflows.argoproj.io/workflow=${workflow},workflow.argoproj.io/phase!=Succeeded
+ validations:
+ required: true
diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml
index 259837a4a2f9..a8865c0269f1 100644
--- a/.github/ISSUE_TEMPLATE/config.yml
+++ b/.github/ISSUE_TEMPLATE/config.yml
@@ -10,6 +10,3 @@ contact_links:
- name: Chat on Slack
url: https://argoproj.github.io/community/join-slack
about: Maybe chatting with the community can help
- - name: 30m to talk anything Argo
- url: https://bit.ly/book-30m-with-argo-team
- about: Sign-up for 30m with the core Argo engineers
diff --git a/.github/ISSUE_TEMPLATE/regression.md b/.github/ISSUE_TEMPLATE/regression.md
deleted file mode 100644
index 18691b095482..000000000000
--- a/.github/ISSUE_TEMPLATE/regression.md
+++ /dev/null
@@ -1,46 +0,0 @@
----
-name: Regression report
-about: Create a regression report. Not for support requests.
-labels: ['bug', 'regression', 'triage']
----
-## Checklist
-
-
-
-* [ ] Double-checked my configuration.
-* [ ] Tested using the latest version.
-* [ ] Used the Emissary executor.
-
-## Summary
-
-What happened/what you expected to happen?
-
-What version are you running?
-
-
-## Diagnostics
-
-Paste the smallest workflow that reproduces the bug. We must be able to run the workflow.
-
-```yaml
-
-```
-
-```bash
-# Logs from the workflow controller:
-kubectl logs -n argo deploy/workflow-controller | grep ${workflow}
-
-# If the workflow's pods have not been created, you can skip the rest of the diagnostics.
-
-# The workflow's pods that are problematic:
-kubectl get pod -o yaml -l workflows.argoproj.io/workflow=${workflow},workflow.argoproj.io/phase!=Succeeded
-
-# Logs from in your workflow's wait container, something like:
-kubectl logs -c wait -l workflows.argoproj.io/workflow=${workflow},workflow.argoproj.io/phase!=Succeeded
-```
-
----
-
-**Message from the maintainers**:
-
-Impacted by this regression? Give it a 👍. We prioritise the issues with the most 👍.
diff --git a/.github/ISSUE_TEMPLATE/regression.yaml b/.github/ISSUE_TEMPLATE/regression.yaml
new file mode 100644
index 000000000000..cd14eef0dd2d
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/regression.yaml
@@ -0,0 +1,49 @@
+name: Regression report
+description: Create a regression report. Not for support requests.
+labels: [ bug, regression ]
+
+body:
+ - type: checkboxes
+ id: terms
+ attributes:
+ label: Pre-requisites
+ options:
+ - label: I have double-checked my configuration
+ required: true
+ - label: I can confirm the issues exists when I tested with `:latest`
+ required: true
+ - label: I'd like to contribute the fix myself (see [contributing guide](https://github.com/argoproj/argo-workflows/blob/master/docs/CONTRIBUTING.md))
+ - type: textarea
+ id: description
+ attributes:
+ label: What happened/what you expected to happen?
+ validations:
+ required: true
+ - type: input
+ id: version
+ attributes:
+ label: Version
+ placeholder: e.g. v3.3.8 or latest
+ validations:
+ required: true
+ - type: textarea
+ id: failing-workflow
+ attributes:
+ label: Paste a small workflow that reproduces the issue. We must be able to run the workflow; don't enter a workflows that uses private images.
+ render: YAML
+ validations:
+ required: true
+ - type: textarea
+ id: controller-logs
+ attributes:
+ label: Logs from the workflow controller
+ value: kubectl logs -n argo deploy/workflow-controller | grep ${workflow}
+ validations:
+ required: true
+ - type: textarea
+ id: wait-logs
+ attributes:
+ label: Logs from in your workflow's wait container
+ value: kubectl logs -c wait -l workflows.argoproj.io/workflow=${workflow},workflow.argoproj.io/phase!=Succeeded
+ validations:
+ required: true
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
index af8fd7f28499..caa13f04034f 100644
--- a/.github/dependabot.yml
+++ b/.github/dependabot.yml
@@ -3,14 +3,26 @@ updates:
- package-ecosystem: "gomod"
directory: "/"
schedule:
- interval: "daily"
+ interval: "weekly"
+ day: "saturday"
ignore:
- dependency-name: k8s.io/*
- dependency-name: github.com/grpc-ecosystem/*
- dependency-name: google.golang.org/grpc
- open-pull-requests-limit: 2
+ open-pull-requests-limit: 10
- package-ecosystem: "github-actions"
directory: "/"
schedule:
- interval: "daily"
+ interval: "weekly"
+ day: "saturday"
+
+ - package-ecosystem: "npm"
+ directory: "/ui"
+ schedule:
+ interval: "weekly"
+ day: "saturday"
+ ignore:
+ - dependency-name: raw-loader
+ - dependency-name: style-loader
+ open-pull-requests-limit: 10
diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md
index 18c987fb2a65..d6f5b1b9b7b9 100644
--- a/.github/pull_request_template.md
+++ b/.github/pull_request_template.md
@@ -16,7 +16,7 @@ If you did not do this, reset all your commit and replace them with a single com
```
git reset HEAD~1 ;# change 1 to how many commits you made
-git commit --sign-off -m 'feat: my feat. Fixes #1234'
+git commit --signoff -m 'feat: my feat. Fixes #1234'
```
When creating your PR:
diff --git a/.github/stale.yml b/.github/stale.yml
index 9b0df8afb1a9..069f95dd7e5f 100644
--- a/.github/stale.yml
+++ b/.github/stale.yml
@@ -1,12 +1,11 @@
# https://probot.github.io/apps/stale/
# Number of days of inactivity before an issue becomes stale
-daysUntilStale: 7
+daysUntilStale: 14
# Number of days of inactivity before a stale issue is closed
-daysUntilClose: 3
+daysUntilClose: 7
# Issues with these labels will never be considered stale
exemptLabels:
- enhancement
- - mentoring
- pinned
- security
- tech-debt
@@ -15,7 +14,9 @@ staleLabel: stale
# Comment to post when marking an issue as stale. Set to `false` to disable
markComment: >
This issue has been automatically marked as stale because it has not had
- recent activity. It will be closed if no further activity occurs. Thank you
- for your contributions.
+ recent activity. It will be closed if no further activity occurs. If this is a mentoring request,
+ please provide an update here. Thank you for your contributions.
# Comment to post when closing a stale issue. Set to `false` to disable
-closeComment: false
\ No newline at end of file
+closeComment: >
+ This issue has been closed due to inactivity. Feel free to re-open if you
+ still encounter this issue.
diff --git a/.github/workflows/changelog.yaml b/.github/workflows/changelog.yaml
index 0f3a8e4df657..e9eea357dbe1 100644
--- a/.github/workflows/changelog.yaml
+++ b/.github/workflows/changelog.yaml
@@ -5,8 +5,14 @@ on:
tags:
- v*
- "!v0.0.0"
+permissions:
+ contents: read
+
jobs:
generate_changelog:
+ permissions:
+ contents: write # for peter-evans/create-pull-request to create branch
+ pull-requests: write # for peter-evans/create-pull-request to create a PR
if: github.repository == 'argoproj/argo-workflows'
runs-on: ubuntu-latest
name: Generate changelog
diff --git a/.github/workflows/ci-build.yaml b/.github/workflows/ci-build.yaml
index e046e7af5c93..f308bf335cd1 100644
--- a/.github/workflows/ci-build.yaml
+++ b/.github/workflows/ci-build.yaml
@@ -13,6 +13,9 @@ concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
+permissions:
+ contents: read
+
jobs:
tests:
name: Unit Tests
@@ -20,9 +23,9 @@ jobs:
timeout-minutes: 8
steps:
- uses: actions/checkout@v3
- - uses: actions/setup-go@v2
+ - uses: actions/setup-go@v3
with:
- go-version: "1.17"
+ go-version: "1.18"
# https://github.com/actions/cache/blob/main/examples.md#go---modules
- uses: actions/cache@v3
with:
@@ -32,7 +35,7 @@ jobs:
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- - run: make test STATIC_FILES=false GOTEST='go test -covermode=atomic -coverprofile=coverage.out'
+ - run: make test STATIC_FILES=false GOTEST='go test -p 20 -covermode=atomic -coverprofile=coverage.out'
# engineers just ignore this in PRs, so lets not even run it
- run: bash <(curl -s https://codecov.io/bash)
if: github.ref == 'refs/heads/master'
@@ -44,7 +47,7 @@ jobs:
# needs: [ lint ]
steps:
- uses: actions/checkout@v3
- - uses: docker/setup-buildx-action@v1
+ - uses: docker/setup-buildx-action@v2
- name: Cache Docker layers
uses: actions/cache@v3
with:
@@ -62,7 +65,7 @@ jobs:
--output=type=docker \
.
- run: docker save quay.io/argoproj/argoexec:latest > /tmp/argoexec_image.tar
- - uses: actions/upload-artifact@v2
+ - uses: actions/upload-artifact@v3
with:
name: argoexec
path: /tmp/argoexec_image.tar
@@ -78,50 +81,46 @@ jobs:
e2e-tests:
name: E2E Tests
runs-on: ubuntu-latest
- timeout-minutes: 20
+ timeout-minutes: 25
needs: [ tests, argoexec-image ]
env:
KUBECONFIG: /home/runner/.kubeconfig
strategy:
fail-fast: false
- max-parallel: 4
matrix:
include:
- - test: test-plugins
- containerRuntimeExecutor: emissary
- profile: plugins
+ - test: test-executor
+ profile: minimal
+ - test: test-corefunctional
+ profile: minimal
- test: test-functional
- containerRuntimeExecutor: emissary
profile: minimal
- test: test-api
- containerRuntimeExecutor: emissary
profile: mysql
- test: test-cli
- containerRuntimeExecutor: emissary
profile: mysql
- test: test-cron
- containerRuntimeExecutor: emissary
profile: minimal
- test: test-examples
- containerRuntimeExecutor: emissary
profile: minimal
- - test: test-executor
- containerRuntimeExecutor: emissary
- profile: minimal
- - test: test-executor
- containerRuntimeExecutor: docker
- profile: minimal
- - test: test-executor
- containerRuntimeExecutor: kubelet
+ - test: test-plugins
+ profile: plugins
+ - test: test-java-sdk
profile: minimal
- - test: test-executor
- containerRuntimeExecutor: pns
+ - test: test-python-sdk
profile: minimal
steps:
- uses: actions/checkout@v3
- - uses: actions/setup-go@v2
+ - uses: actions/setup-go@v3
+ with:
+ go-version: "1.18"
+ - uses: actions/setup-java@v3
with:
- go-version: "1.17"
+ java-version: '8'
+ distribution: adopt
+ - uses: actions/setup-python@v4
+ with:
+ python-version: '3.x'
- uses: actions/cache@v3
with:
path: |
@@ -139,7 +138,7 @@ jobs:
echo " user:" >> $KUBECONFIG
echo " token: xxxxxx" >> $KUBECONFIG
until kubectl cluster-info ; do sleep 10s ; done
- - uses: actions/download-artifact@v2
+ - uses: actions/download-artifact@v3
with:
name: argoexec
path: /tmp
@@ -150,14 +149,21 @@ jobs:
echo '127.0.0.1 minio' | sudo tee -a /etc/hosts
echo '127.0.0.1 postgres' | sudo tee -a /etc/hosts
echo '127.0.0.1 mysql' | sudo tee -a /etc/hosts
- - run: make install PROFILE=${{matrix.profile}} E2E_EXECUTOR=${{matrix.containerRuntimeExecutor}} STATIC_FILES=false
+ echo '127.0.0.1 azurite' | sudo tee -a /etc/hosts
+ - run: make install PROFILE=${{matrix.profile}} STATIC_FILES=false
- run: make controller $(go env GOPATH)/bin/goreman STATIC_FILES=false
- run: make cli STATIC_FILES=false
- if: ${{matrix.test == 'test-api' || matrix.test == 'test-cli'}}
- - run: make start PROFILE=${{matrix.profile}} E2E_EXECUTOR=${{matrix.containerRuntimeExecutor}} AUTH_MODE=client STATIC_FILES=false LOG_LEVEL=info API=${{matrix.test == 'test-api' || matrix.test == 'test-cli'}} UI=false > /tmp/argo.log 2>&1 &
+ if: ${{matrix.test == 'test-api' || matrix.test == 'test-cli' || matrix.test == 'test-java-sdk' || matrix.test == 'test-python-sdk'}}
+ - run: make start PROFILE=${{matrix.profile}} AUTH_MODE=client STATIC_FILES=false LOG_LEVEL=info API=${{matrix.test == 'test-api' || matrix.test == 'test-cli' || matrix.test == 'test-java-sdk' || matrix.test == 'test-python-sdk'}} UI=false AZURE=true > /tmp/argo.log 2>&1 &
- run: make wait
timeout-minutes: 4
- - run: make ${{matrix.test}} E2E_TIMEOUT=1m STATIC_FILES=false
+ - name: make ${{matrix.test}}
+ # https://github.com/marketplace/actions/retry-step
+ uses: nick-fields/retry@v2.8.1
+ with:
+ timeout_minutes: 20
+ max_attempts: 2
+ command: make ${{matrix.test}} E2E_SUITE_TIMEOUT=20m STATIC_FILES=false AZURE=true
- if: ${{ failure() }}
run: |
[ -e /tmp/argo.log ] && cat /tmp/argo.log
@@ -171,9 +177,9 @@ jobs:
GOPATH: /home/runner/go
steps:
- uses: actions/checkout@v3
- - uses: actions/setup-go@v2
+ - uses: actions/setup-go@v3
with:
- go-version: "1.17"
+ go-version: "1.18"
- uses: actions/cache@v3
with:
path: |
@@ -209,12 +215,18 @@ jobs:
GOPATH: /home/runner/go
steps:
- uses: actions/checkout@v3
+ - uses: actions/setup-go@v3
with:
- fetch-depth: 0
- - run: cp server/static/files.go.stub server/static/files.go
- - uses: golangci/golangci-lint-action@v2
+ go-version: "1.18"
+ - uses: actions/cache@v3
with:
- version: v1.42.0
+ path: |
+ ~/.cache/go-build
+ ~/go/pkg/mod
+ key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
+ restore-keys: |
+ ${{ runner.os }}-go-
+ - run: make lint STATIC_FILES=false
- run: git diff --exit-code
ui:
@@ -237,3 +249,7 @@ jobs:
- run: yarn --cwd ui test
- run: yarn --cwd ui lint
- run: git diff --exit-code
+ # check to see if it'll start (but not if it'll render)
+ - run: yarn --cwd ui start &
+ - run: until curl http://localhost:8080 > /dev/null ; do sleep 10s ; done
+ timeout-minutes: 1
diff --git a/.github/workflows/dependabot-reviewer.yml b/.github/workflows/dependabot-reviewer.yml
new file mode 100644
index 000000000000..8110f0e7a704
--- /dev/null
+++ b/.github/workflows/dependabot-reviewer.yml
@@ -0,0 +1,28 @@
+# https://docs.github.com/en/code-security/dependabot/working-with-dependabot/automating-dependabot-with-github-actions
+name: Approve and enable auto-merge for dependabot
+on: pull_request
+
+permissions:
+ pull-requests: write
+ contents: write
+
+jobs:
+ review:
+ runs-on: ubuntu-latest
+ if: ${{ github.actor == 'dependabot[bot]' && github.repository == 'argoproj/argo-workflows'}}
+ steps:
+ - name: Dependabot metadata
+ id: metadata
+ uses: dependabot/fetch-metadata@v1.3.3
+ with:
+ github-token: "${{ secrets.GITHUB_TOKEN }}"
+ - name: Approve PR
+ run: gh pr review --approve "$PR_URL"
+ env:
+ PR_URL: ${{github.event.pull_request.html_url}}
+ GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}}
+ - name: Enable auto-merge for Dependabot PRs
+ run: gh pr merge --auto --squash "$PR_URL"
+ env:
+ PR_URL: ${{github.event.pull_request.html_url}}
+ GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}}
\ No newline at end of file
diff --git a/.github/workflows/gh-pages.yaml b/.github/workflows/gh-pages.yaml
index 9fab807cba9c..dc08efb64d51 100644
--- a/.github/workflows/gh-pages.yaml
+++ b/.github/workflows/gh-pages.yaml
@@ -1,30 +1,46 @@
-name: Deploy
+name: Docs
on:
push:
branches:
- master
+ pull_request:
+ branches:
+ - master
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
+permissions:
+ contents: read
+
jobs:
- deploy:
- if: github.repository == 'argoproj/argo-workflows'
+ docs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- - uses: actions/setup-python@v3
+ - uses: actions/setup-python@v4
with:
python-version: 3.9
- - uses: actions/setup-go@v1
+ - uses: actions/setup-go@v3
+ with:
+ go-version: '1.18'
+ - uses: actions/setup-node@v3
+ with:
+ node-version: "16"
+ # Use the same make target both locally and on CI to make it easier to debug failures.
+ - run: make docs
+ # If markdownlint fixes issues, files will be changed. If so, fail the build.
+ - run: git diff --exit-code
+ # Upload the site so reviewers see it.
+ - uses: actions/upload-artifact@v3
with:
- go-version: '1.17'
- - run: pip install mkdocs==1.2.3 mkdocs_material==8.1.9
- - run: mkdocs build
- - run: make parse-examples
+ name: docs
+ path: site
+ if-no-files-found: error
- uses: peaceiris/actions-gh-pages@v2.9.0
+ if: github.repository == 'argoproj/argo-workflows' && github.ref == 'refs/heads/master'
env:
PERSONAL_TOKEN: ${{ secrets.PERSONAL_TOKEN }}
PUBLISH_BRANCH: gh-pages
diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml
index 3cee95e17b27..cdfeb0846a38 100644
--- a/.github/workflows/release.yaml
+++ b/.github/workflows/release.yaml
@@ -16,6 +16,9 @@ defaults:
run:
shell: bash
+permissions:
+ contents: read
+
jobs:
build-linux-amd64:
name: Build & push linux/amd64
@@ -29,7 +32,7 @@ jobs:
- uses: actions/checkout@v3
- name: Set up Docker Buildx
- uses: docker/setup-buildx-action@v1
+ uses: docker/setup-buildx-action@v2
- name: Cache Docker layers
uses: actions/cache@v3
@@ -41,13 +44,13 @@ jobs:
${{ runner.os }}-${{ matrix.platform }}-${{ matrix.target }}-buildx-
- name: Docker Login
- uses: docker/login-action@v1
+ uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERIO_USERNAME }}
password: ${{ secrets.DOCKERIO_PASSWORD }}
- name: Docker Login
- uses: docker/login-action@v1
+ uses: docker/login-action@v2
with:
registry: quay.io
username: ${{ secrets.QUAYIO_USERNAME }}
@@ -95,12 +98,12 @@ jobs:
- uses: actions/checkout@v3
- name: Set up QEMU
- uses: docker/setup-qemu-action@v1
+ uses: docker/setup-qemu-action@v2
with:
platforms: arm64
- name: Set up Docker Buildx
- uses: docker/setup-buildx-action@v1
+ uses: docker/setup-buildx-action@v2
- name: Cache Docker layers
uses: actions/cache@v3
@@ -112,13 +115,13 @@ jobs:
${{ runner.os }}-${{ matrix.platform }}-${{ matrix.target }}-buildx-
- name: Docker Login
- uses: docker/login-action@v1
+ uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERIO_USERNAME }}
password: ${{ secrets.DOCKERIO_PASSWORD }}
- name: Docker Login
- uses: docker/login-action@v1
+ uses: docker/login-action@v2
with:
registry: quay.io
username: ${{ secrets.QUAYIO_USERNAME }}
@@ -317,6 +320,8 @@ jobs:
done
publish-release:
+ permissions:
+ contents: write # for softprops/action-gh-release to create GitHub release
runs-on: ubuntu-latest
if: github.repository == 'argoproj/argo-workflows'
needs: [ push-images, test-images-linux-amd64, test-images-windows ]
@@ -327,9 +332,9 @@ jobs:
- uses: actions/setup-node@v3
with:
node-version: "16"
- - uses: actions/setup-go@v2
+ - uses: actions/setup-go@v3
with:
- go-version: "1.17"
+ go-version: "1.18"
- uses: actions/cache@v3
with:
path: ui/node_modules
diff --git a/.github/workflows/sdks.yaml b/.github/workflows/sdks.yaml
index cf6036f93b21..2d7f1ab82db4 100644
--- a/.github/workflows/sdks.yaml
+++ b/.github/workflows/sdks.yaml
@@ -3,8 +3,16 @@ on:
push:
tags:
- v*
+
+permissions:
+ contents: read
+
jobs:
sdk:
+ permissions:
+ contents: read
+ packages: write # for publishing packages
+ contents: write # for creating releases
if: github.repository == 'argoproj/argo-workflows'
runs-on: ubuntu-latest
name: Publish SDK
diff --git a/.github/workflows/snyk.yml b/.github/workflows/snyk.yml
index d986eaa70b4a..903e150218ea 100644
--- a/.github/workflows/snyk.yml
+++ b/.github/workflows/snyk.yml
@@ -2,6 +2,9 @@ name: Snyk
on:
schedule:
- cron: "30 2 * * *"
+permissions:
+ contents: read
+
jobs:
# we do not scan images here, they're scanned here: https://app.snyk.io/org/argoproj/projects
diff --git a/.gitignore b/.gitignore
index 4792506f8cb7..012b8b068077 100644
--- a/.gitignore
+++ b/.gitignore
@@ -40,3 +40,7 @@ git-ask-pass.sh
sdks/python/client/dist/*
/v3/
/cmd/argoexec/commands/test.txt
+
+# Do not commit rendered installation manifests since they are misleading to users.
+manifests/install.yaml
+manifests/namespace-install.yaml
diff --git a/.golangci.yml b/.golangci.yml
index b2e9b37a1acd..8eb46041a64a 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -1,6 +1,5 @@
# https://golangci-lint.run/usage/quick-start/
run:
- concurrency: 4
timeout: 8m
skip-dirs:
- dist
@@ -20,6 +19,7 @@ run:
- cron
- executor
- examples
+ - corefunctional
- functional
- plugins
linters:
@@ -31,7 +31,8 @@ linters:
# only minor issues
# - errorlint
- exportloopref
- - gci
+ # seems to have bugs in recent version, also slow
+ # - gci
- gosec
- gosimple
- govet
diff --git a/.markdownlint.yaml b/.markdownlint.yaml
new file mode 100644
index 000000000000..261ef7e65178
--- /dev/null
+++ b/.markdownlint.yaml
@@ -0,0 +1,5 @@
+# not fix for line length
+MD013: false
+# mkdocs uses 4 spaces indent
+MD007:
+ indent: 4
diff --git a/.mlc_config.json b/.mlc_config.json
new file mode 100644
index 000000000000..946725d2fec4
--- /dev/null
+++ b/.mlc_config.json
@@ -0,0 +1,11 @@
+{
+ "ignorePatterns": [
+ {
+ "pattern": ".*localhost.*"
+ }
+ ],
+ "aliveStatusCodes": [
+ 200,
+ 429
+ ]
+}
diff --git a/.spelling b/.spelling
new file mode 100644
index 000000000000..507e9340a9a2
--- /dev/null
+++ b/.spelling
@@ -0,0 +1,216 @@
+# markdown-spellcheck spelling configuration file
+# Format - lines beginning # are comments
+# global dictionary is at the start, file overrides afterwards
+# one word per line, to define a file override use ' - filename'
+# where filename is relative to this configuration file
+000s
+0s
+100Mi
+100s
+10h
+10s
+120s
+120sec
+1Gi
+1Mi
+1h
+1m
+2Gi
+2h
+30s
+3min
+3s
+4Gi
+4xx
+512Mi
+5m
+5xx
+8Ki
+90m
+Alexandre
+Alibaba
+Ang
+Anthos
+ArgoLabs
+Artifactory
+BlackRock
+Breitgand
+Couler
+DataDog
+Dataflow
+DeleteObject
+DevOps
+Dex
+EtcD
+EventRouter
+FailFast
+GSoC
+GitOps
+Github
+Golang
+Grafana
+Grammarly
+Hadoop
+Heptio
+Homebrew
+InsideBoard
+Invocators
+Istio
+J.P.
+Jemison
+JetBrains
+KNative
+Katacoda
+Kerberos
+KubectlExec
+Kubeflow
+Kustomize
+Lifecycle-Hook
+LitmusChaos
+metadata
+MLOps
+MinIO
+Minikube
+MySQL
+Nagal
+Nano
+Nginx
+Node.JS.
+OAuth
+OAuth2
+Okta
+parameterize
+parameterized
+parameterizing
+PDBs
+PProf
+PVCs
+Peixuan
+Ploomber
+Postgres
+Roadmap
+RoleBinding
+s3
+SDKs
+Sharding
+Singer.io
+Snyk
+Sumit
+Tekton
+Tianchu
+Traefik
+TripAdvisor
+VSCode
+Valasek
+Webhooks
+Welch
+`CronTab`
+`OnFailure`
+a.m.
+alexec
+anded
+apis
+architecting
+argo
+args
+async
+auth
+backend
+blkperl
+boolean
+booleans
+buildkit
+config
+cpu
+cron
+daemoned
+dev-container
+dinever
+dropdown
+e.g.
+e2e
+entrypoint
+enum
+env
+errored
+expr
+fibonacci
+finalizer
+govaluate
+gzipped
+i.e.
+instantiator
+instantiators
+jenkins
+k3d
+k3s
+k8s-jobs
+kube
+kubelet
+kubernetes
+localhost
+memoization
+memoized
+memoizing
+mentee
+mentees
+minikube
+mutex
+namespace
+namespaces
+natively
+p.m.
+params
+pre-commit
+rc2
+repo
+roadmap
+runtime
+runtimes
+sandboxed
+sarabala1979
+simster7
+stateful
+stderr
+tczhao
+terrytangyuan
+themself
+un-reconciled
+untracked
+v1
+v1.0
+v1.1
+v1.2
+v1.3
+v2
+v2.10
+v2.11
+v2.12
+v2.23.0
+v2.4
+v2.5
+v2.6
+v2.7
+v2.7.2
+v2.8
+v2.9
+v3.0
+v3.0.0
+v3.1
+v3.1.4
+v3.2
+v3.2.
+v3.3
+v3.3.
+v3.4
+v3.4.
+validator
+versioning
+webHDFS
+webhook
+webhooks
+workflow-controller-configmap
+yaml
+idempotence
+kube-scheduler
+kube-apiserver
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 081daf69e99c..90cf195d0349 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,733 @@
# Changelog
+## v3.4.0 (2022-09-18)
+
+ * [047952afd](https://github.com/argoproj/argo-workflows/commit/047952afd539d06cae2fd6ba0b608b19c1194bba) fix: SDK workflow file
+ * [97328f1ed](https://github.com/argoproj/argo-workflows/commit/97328f1ed3885663b780f43e6b553208ecba4d3c) chore(deps): bump classnames and @types/classnames in /ui (#9603)
+ * [2dac194a5](https://github.com/argoproj/argo-workflows/commit/2dac194a52acb46c5535e5f552fdf7fd520d0f4e) chore(deps-dev): bump @babel/core from 7.19.0 to 7.19.1 in /ui (#9602)
+ * [47544cc02](https://github.com/argoproj/argo-workflows/commit/47544cc02a8663b5b69e4c213a382ff156deb63e) feat: Support retrying complex workflows with nested group nodes (#9499)
+ * [30bd96b4c](https://github.com/argoproj/argo-workflows/commit/30bd96b4c030fb728a3da78e0045982bf778d554) fix: Error message if cronworkflow failed to update (#9583)
+ * [fc5e11cd3](https://github.com/argoproj/argo-workflows/commit/fc5e11cd37f51e36517f7699c23afabac4f08528) chore(deps-dev): bump webpack-dev-server from 4.10.1 to 4.11.0 in /ui (#9567)
+ * [ace179804](https://github.com/argoproj/argo-workflows/commit/ace179804996edc0d356bff257a980e60b9bc5a0) docs(dev-container): Fix buildkit doc for local dev (#9580)
+
+### Contributors
+
+ * JM
+ * Saravanan Balasubramanian
+ * Yuan Tang
+ * dependabot[bot]
+
+## v3.4.0-rc4 (2022-09-10)
+
+ * [dee4ea5b0](https://github.com/argoproj/argo-workflows/commit/dee4ea5b0be2408e13af7745db910d0130e578f2) chore(deps-dev): bump @babel/core from 7.18.13 to 7.19.0 in /ui (#9566)
+ * [8172b493d](https://github.com/argoproj/argo-workflows/commit/8172b493d649c20b0b72ae56cf5b69bd2fa5ed8d) chore(deps-dev): bump sass from 1.54.8 to 1.54.9 in /ui (#9565)
+ * [68a793586](https://github.com/argoproj/argo-workflows/commit/68a793586ed8154f71d156e9daa8055e7ea8492e) chore(deps-dev): bump @babel/preset-env from 7.18.10 to 7.19.0 in /ui (#9562)
+ * [e1d8387fa](https://github.com/argoproj/argo-workflows/commit/e1d8387fa7a9c0648c548e2809f61eb77a802537) chore(deps-dev): bump babel-jest from 29.0.1 to 29.0.2 in /ui (#9564)
+ * [3950f8c1c](https://github.com/argoproj/argo-workflows/commit/3950f8c1c12ff7451b3e1be96b2ba108025a9677) chore(deps): bump google.golang.org/api from 0.94.0 to 0.95.0 (#9561)
+ * [8310bdbc9](https://github.com/argoproj/argo-workflows/commit/8310bdbc9d07f87640d944b949e465a044148368) chore(deps): bump github.com/coreos/go-oidc/v3 from 3.3.0 to 3.4.0 (#9560)
+ * [baaa8d0a9](https://github.com/argoproj/argo-workflows/commit/baaa8d0a9e90f5234ce7d02cbc33f8756a3ad4da) chore(deps): bump github.com/minio/minio-go/v7 from 7.0.35 to 7.0.36 (#9558)
+ * [aab923452](https://github.com/argoproj/argo-workflows/commit/aab92345267e9e0562ee8495f49ac6d80e06ae28) chore(deps): bump github.com/spf13/viper from 1.12.0 to 1.13.0 (#9559)
+ * [ec7c210c9](https://github.com/argoproj/argo-workflows/commit/ec7c210c9743d8f85d528d5593bc7390d73ff534) fix: use urlencode instead of htmlencode to sanitize url (#9538)
+ * [3a3f15997](https://github.com/argoproj/argo-workflows/commit/3a3f1599718453ca79800cfc28f6631ee780911b) fix: enable workflow-aggregate-roles to treat workflowtaskresults. Fixes #9545 (#9546)
+ * [9d66b69f0](https://github.com/argoproj/argo-workflows/commit/9d66b69f0bca92d7ef0c9aa67e87a2e334797530) fix: for pod that's been GC'ed we need to get the log from the artifact (#9540)
+ * [34a4e48c3](https://github.com/argoproj/argo-workflows/commit/34a4e48c3f412ba89cd0491469d13a14fdaf51b3) chore(deps): bump github.com/minio/minio-go/v7 from 7.0.34 to 7.0.35 (#9502)
+ * [ef6bd5710](https://github.com/argoproj/argo-workflows/commit/ef6bd5710e5780afe40321f4d384471d9e02197c) fix: Capture exit code of signaled containers. Fixes #9415 (#9523)
+ * [6e2f15f9e](https://github.com/argoproj/argo-workflows/commit/6e2f15f9eea82f1344f139800869f9e7fd255b04) feat: added support for DAG task name as variables in workflow templates (#9387)
+ * [f27475feb](https://github.com/argoproj/argo-workflows/commit/f27475feb850dc43e07c3c5215cc9638947f0859) fix: default to 'main' container in Sensor logs. Fixes #9459 (#9438)
+ * [c00fbf88f](https://github.com/argoproj/argo-workflows/commit/c00fbf88f15104673b05ba5e109a72fed84dd38e) feat: Add node ID to node info panel (#9500)
+ * [2a80a2c1a](https://github.com/argoproj/argo-workflows/commit/2a80a2c1a9b0a2370f547492ef9168ee583077f5) fix: revert accidental commit in UI logs viewer (#9515)
+ * [b9d02cfd5](https://github.com/argoproj/argo-workflows/commit/b9d02cfd59c72b2bc8e437e6591ca4a145a3eb9b) chore(deps): bump cloud.google.com/go/storage from 1.25.0 to 1.26.0 (#9506)
+ * [bd9fc66c5](https://github.com/argoproj/argo-workflows/commit/bd9fc66c52c8e14123e5d7a4a7829023a072da9f) chore(deps-dev): bump @fortawesome/fontawesome-free from 6.1.2 to 6.2.0 in /ui (#9513)
+ * [9004f5e26](https://github.com/argoproj/argo-workflows/commit/9004f5e263a4ead8a5be4a4a09db03064eb1d453) chore(deps): bump google.golang.org/api from 0.93.0 to 0.94.0 (#9505)
+ * [605b0a0eb](https://github.com/argoproj/argo-workflows/commit/605b0a0eb3413107e2e87d6f3399d6b5f2778727) chore(deps-dev): bump sass from 1.54.5 to 1.54.8 in /ui (#9514)
+ * [6af53eff3](https://github.com/argoproj/argo-workflows/commit/6af53eff34180d9d238ba0fd0cb5a5b9b57b15a5) chore(deps-dev): bump babel-jest from 28.1.3 to 29.0.1 in /ui (#9512)
+ * [a2c20d70e](https://github.com/argoproj/argo-workflows/commit/a2c20d70e8885937532055b8c2791799020057ec) chore(deps): bump react-monaco-editor from 0.49.0 to 0.50.1 in /ui (#9509)
+ * [041d1382d](https://github.com/argoproj/argo-workflows/commit/041d1382d0a22a8bb88e88486f79c6b4bb6dfc8d) chore(deps-dev): bump webpack-dev-server from 4.10.0 to 4.10.1 in /ui (#9510)
+ * [7f9a15e77](https://github.com/argoproj/argo-workflows/commit/7f9a15e77eaa84d7f5474d28e30e52a77ca76b2e) chore(deps-dev): bump @babel/core from 7.18.10 to 7.18.13 in /ui (#9507)
+ * [08963c468](https://github.com/argoproj/argo-workflows/commit/08963c4680353a0b4e0abf16f0590a66b8dd4b3e) chore(deps-dev): bump @types/dagre from 0.7.47 to 0.7.48 in /ui (#9508)
+ * [1b09c8641](https://github.com/argoproj/argo-workflows/commit/1b09c8641ad11680b90dba582b3eae98dcee01c3) chore(deps): bump github.com/coreos/go-oidc/v3 from 3.2.0 to 3.3.0 (#9504)
+ * [4053ddf08](https://github.com/argoproj/argo-workflows/commit/4053ddf081755df8819a4a33ce558c92235ea81d) chore(deps): bump github.com/aliyun/aliyun-oss-go-sdk from 2.2.4+incompatible to 2.2.5+incompatible (#9503)
+ * [06d295752](https://github.com/argoproj/argo-workflows/commit/06d29575210d7b61ca7c7f2fb8e28fdd6c3d5637) feat: log format option for main containers (#9468)
+
+### Contributors
+
+ * Alex Collins
+ * Julie Vogelman
+ * Rohan Kumar
+ * Takao Shibata
+ * Thomas Bonfort
+ * Tianchu Zhao
+ * Yuan Tang
+ * dependabot[bot]
+ * jsvk
+
+## v3.4.0-rc3 (2022-08-31)
+
+ * [b941fbcab](https://github.com/argoproj/argo-workflows/commit/b941fbcaba087d5c5569573d1ef1a027313174ce) feat: improve e2e test for ArtifactGC (#9448)
+ * [94608d1dd](https://github.com/argoproj/argo-workflows/commit/94608d1ddc8781a55563f52ea65476dc99a54f94) feat: added support for artifact GC on GCS (#9420)
+ * [26ab0aed8](https://github.com/argoproj/argo-workflows/commit/26ab0aed8ba19571ffe3a2b048fcb43cbd1986e3) fix: link to "get artifacts from logs" was assuming Node ID was equal to Pod Name (#9464)
+ * [9cce91ea0](https://github.com/argoproj/argo-workflows/commit/9cce91ea0ca748cb35bd653c6f401d1aed97e6e8) Update USERS.md (#9471)
+ * [7118e1224](https://github.com/argoproj/argo-workflows/commit/7118e1224283ecb894794fdd72526089409e1476) feat: support slash in synchronization lock names. Fixes #9394 (#9404)
+ * [ff4109928](https://github.com/argoproj/argo-workflows/commit/ff4109928bd09a1b1d716cbdf82bd3ca132276d1) fix: Descendants of suspended nodes need to be removed when retrying workflow (#9440)
+ * [a09172afa](https://github.com/argoproj/argo-workflows/commit/a09172afafdb98ab362058618b5dc61980f0254e) fix: Incorrect alignment for archived workflow. Fixes #9433 (#9439)
+ * [04d19435c](https://github.com/argoproj/argo-workflows/commit/04d19435cb07e8815f1f95cca6751f8ce6b4bec1) fix: Properly reset suspended and skipped nodes when retrying (#9422)
+ * [de6b5ae6f](https://github.com/argoproj/argo-workflows/commit/de6b5ae6fa39693b7cd7777b9fcff9ff291476dd) fix(executor): Resource template gets incorrect plural for certain types (#9396)
+ * [3ddbb5e00](https://github.com/argoproj/argo-workflows/commit/3ddbb5e009f39fdb31cdaa7d77fca71dc3ae3f0e) fix: Only validate manifests for certain resource actions. Fixes #9418 (#9419)
+ * [a91e0041c](https://github.com/argoproj/argo-workflows/commit/a91e0041c9583deb48751c666dbbef111f3a56f9) fix: Workflow level http template hook status update. Fixes #8529 (#8586)
+ * [343c29819](https://github.com/argoproj/argo-workflows/commit/343c29819ac92d35f5db8a0de432f63df148ea31) fix: Argo waiter: invalid memory address or nil pointer dereference (#9408)
+ * [6f19e50a4](https://github.com/argoproj/argo-workflows/commit/6f19e50a41a17dbf06e6281f005ade6a2f19dba4) fix: Invalid memory address or nil pointer dereference (#9409)
+ * [7d9319b60](https://github.com/argoproj/argo-workflows/commit/7d9319b60d0bc417b25d35968c1619e51c13b7ec) Fix: UI to reflect Template.ArchiveLocation when showing Artifact's bucket in URN (#9351)
+ * [b7904c41c](https://github.com/argoproj/argo-workflows/commit/b7904c41c008176f40bb69c312b38ce6c0f9ce03) chore(deps-dev): bump sass from 1.54.4 to 1.54.5 in /ui (#9402)
+ * [fa66ed8e8](https://github.com/argoproj/argo-workflows/commit/fa66ed8e8bc20c4d759eb923b99dd6641ceafa86) chore(deps): bump github.com/tidwall/gjson from 1.14.2 to 1.14.3 (#9401)
+
+### Contributors
+
+ * Brian Tate
+ * Julie Vogelman
+ * Rohan Kumar
+ * Saravanan Balasubramanian
+ * William Reed
+ * Yuan Tang
+ * dependabot[bot]
+ * jsvk
+
+## v3.4.0-rc2 (2022-08-18)
+
+ * [6e8d1629d](https://github.com/argoproj/argo-workflows/commit/6e8d1629d9eebf78dce07f180ee99a233e422a80) fix: Artifact panel crashes when viewing artifacts. Fixes #9391 (#9392)
+ * [aa23a9ec8](https://github.com/argoproj/argo-workflows/commit/aa23a9ec8b9fc95593fdc41e1632412542a9c050) fix: Exit handle and Lifecycle hook to access {steps/tasks status} (#9229)
+ * [74cdf5d87](https://github.com/argoproj/argo-workflows/commit/74cdf5d870cc4d0b5576e6d78da7a6fde6a1be99) fix: improper selfLinks for cluster-scoped resources. Fixes #9320 (#9375)
+ * [f53d4834a](https://github.com/argoproj/argo-workflows/commit/f53d4834a208f39797637d7fad744caf0540cff8) fix: Panic on nill pointer when running a workflow with restricted parallelism (#9385)
+ * [c756291f7](https://github.com/argoproj/argo-workflows/commit/c756291f701296b36411ccdd639a965a302a5af8) fix: removed error check which prevented deleting successful artGC wfs. (#9383)
+ * [81e3d23e7](https://github.com/argoproj/argo-workflows/commit/81e3d23e730d80f24c90feb283fa3ff3b358e215) chore(deps): bump google.golang.org/api from 0.91.0 to 0.93.0 (#9381)
+ * [62b0db982](https://github.com/argoproj/argo-workflows/commit/62b0db9822ef93732544667739b33c1d9792ccf9) fix(ui): Correctly show icons in DAG. Fixes #9372 & #9373 (#9378)
+ * [47f59c050](https://github.com/argoproj/argo-workflows/commit/47f59c050ed579cdf9e01eddf0f388ac52fe5713) chore(deps): bump cloud.google.com/go/storage from 1.24.0 to 1.25.0 (#9357)
+ * [65670a402](https://github.com/argoproj/argo-workflows/commit/65670a402b1e9a96d246fd2ee363dd27a7f3149b) fix: Fix blank workflow details page after workflow submission (#9377)
+ * [6d08098a8](https://github.com/argoproj/argo-workflows/commit/6d08098a887c701cfffb2ea57f0391d6f7f5d489) feat: add argo delete --force. Fixes #9315. (#9321)
+ * [12466b7c9](https://github.com/argoproj/argo-workflows/commit/12466b7c9138052150afa6e0e81964d91a0538f5) fix: Retry for http timeout error. Fixes #9271 (#9335)
+ * [1b252fd33](https://github.com/argoproj/argo-workflows/commit/1b252fd33c8e456af0f6ed437b4f74a6d8cb46e7) chore(deps-dev): bump sass from 1.54.3 to 1.54.4 in /ui (#9359)
+ * [3f56a74dd](https://github.com/argoproj/argo-workflows/commit/3f56a74dd44e6e28da5bf2fc28cf03bae9b9f5c1) chore(deps-dev): bump webpack-dev-server from 4.9.3 to 4.10.0 in /ui (#9358)
+ * [fd08b0339](https://github.com/argoproj/argo-workflows/commit/fd08b0339506f8f11288393061cf8c2eb155403a) fix: ArtifactGC e2e test was looking for the wrong artifact names (#9353)
+ * [b430180d2](https://github.com/argoproj/argo-workflows/commit/b430180d275adac05d64b82613134b926d4405f1) fix: Deleted pods are not tracked correctly when retrying workflow (#9340)
+ * [e12c697b7](https://github.com/argoproj/argo-workflows/commit/e12c697b7be2547cdffd18c73bf39e10dfa458f0) feat: fix bugs in retryWorkflow if failed pod node has children nodes. Fix #9244 (#9285)
+ * [61f252f1d](https://github.com/argoproj/argo-workflows/commit/61f252f1d2083e5e9f262d0acd72058571e27708) fix: TestWorkflowStepRetry's comment accurately reflects what it does. (#9234)
+
+### Contributors
+
+ * Alex Collins
+ * Dillen Padhiar
+ * Julie Vogelman
+ * Kyle Wong
+ * Robert Kotcher
+ * Saravanan Balasubramanian
+ * Yuan Tang
+ * dependabot[bot]
+ * jingkai
+ * smile-luobin
+
+## v3.4.0-rc1 (2022-08-09)
+
+ * [f481e3b74](https://github.com/argoproj/argo-workflows/commit/f481e3b7444eb9cbb5c4402a27ef209818b1d817) feat: fix workflow hangs during executeDAGTask. Fixes #6557 (#8992)
+ * [ec213c070](https://github.com/argoproj/argo-workflows/commit/ec213c070d92f4ac937f55315feab0fcc108fed5) Fixes #8622: fix http1 keep alive connection leak (#9298)
+ * [0d77f5554](https://github.com/argoproj/argo-workflows/commit/0d77f5554f251771a175a95fc80eeb12489e42b4) fix: Look in correct bucket when downloading artifacts (Template.ArchiveLocation configured) (#9301)
+ * [b356cb503](https://github.com/argoproj/argo-workflows/commit/b356cb503863da43c0cc5e1fe667ebf602cb5354) feat: Artifact GC (#9255)
+ * [e246abec1](https://github.com/argoproj/argo-workflows/commit/e246abec1cbe6be8cb8955f798602faf619a943f) feat: modify "argoexec artifact delete" to handle multiple artifacts. Fixes #9143 (#9291)
+ * [f359625f6](https://github.com/argoproj/argo-workflows/commit/f359625f6262b6fa93b558f4e488a13652e9f50a) chore(deps-dev): bump @babel/preset-env from 7.18.9 to 7.18.10 in /ui (#9311)
+ * [ffefe9402](https://github.com/argoproj/argo-workflows/commit/ffefe9402885a275e7a26c12b5a5e52e7522c4d7) chore(deps): bump github.com/minio/minio-go/v7 from 7.0.32 to 7.0.34 (#9304)
+ * [ee8404bac](https://github.com/argoproj/argo-workflows/commit/ee8404baca5303a6a66f0236aa82464572bded0c) chore(deps-dev): bump @babel/core from 7.18.9 to 7.18.10 in /ui (#9310)
+ * [028851d7f](https://github.com/argoproj/argo-workflows/commit/028851d7f832be5687048fbec20d4d47ef910d26) chore(deps-dev): bump sass from 1.54.0 to 1.54.3 in /ui (#9309)
+ * [c0d26d61c](https://github.com/argoproj/argo-workflows/commit/c0d26d61c02f7fb4140a089139f8984df91eaaf9) chore(deps): bump cron-parser from 4.5.0 to 4.6.0 in /ui (#9307)
+ * [8d06a83bc](https://github.com/argoproj/argo-workflows/commit/8d06a83bccba87886163143e959369f0d0240943) chore(deps): bump github.com/prometheus/client_golang from 1.12.2 to 1.13.0 (#9306)
+ * [f83346959](https://github.com/argoproj/argo-workflows/commit/f83346959cf5204fe80b6b70e4d823bf481579fe) chore(deps): bump google.golang.org/api from 0.90.0 to 0.91.0 (#9305)
+ * [63876713e](https://github.com/argoproj/argo-workflows/commit/63876713e809ceca8e1e540a38b5ad0e650cbb2a) chore(deps): bump github.com/tidwall/gjson from 1.14.1 to 1.14.2 (#9303)
+ * [06b0a8cce](https://github.com/argoproj/argo-workflows/commit/06b0a8cce637db1adae0bae91670e002cfd0ae4d) fix(gcs): Wrap errors using `%w` to make retrying work (#9280)
+ * [083f3a21a](https://github.com/argoproj/argo-workflows/commit/083f3a21a601e086ca48d2532463a858cc8b316b) fix: pass correct error obj for azure blob failures (#9276)
+ * [55d15aeb0](https://github.com/argoproj/argo-workflows/commit/55d15aeb03847771e2b48f11fa84f88ad1df3e7c) feat: support zip for output artifacts archive. Fixes #8861 (#8973)
+ * [a51e833d9](https://github.com/argoproj/argo-workflows/commit/a51e833d9eea18ce5ef7606e55ddd025efa85de1) chore(deps): bump google.golang.org/api from 0.89.0 to 0.90.0 (#9260)
+ * [c484c57f1](https://github.com/argoproj/argo-workflows/commit/c484c57f13f6316bbf5ac7e98c1216ba915923c7) chore(deps-dev): bump @fortawesome/fontawesome-free from 6.1.1 to 6.1.2 in /ui (#9261)
+ * [2d1758fe9](https://github.com/argoproj/argo-workflows/commit/2d1758fe90fd60b37d0dfccb55c3f79d8a897289) fix: retryStrategy.Limit is now read properly for backoff strategy. Fixes #9170. (#9213)
+ * [b565bf358](https://github.com/argoproj/argo-workflows/commit/b565bf35897f529bbb446058c24b72d506024e29) Fix: user namespace override (Fixes #9266) (#9267)
+ * [0c24ca1ba](https://github.com/argoproj/argo-workflows/commit/0c24ca1ba8a5c38c846d595770e16398f6bd84a5) fix: TestParallel 503 with external url (#9265)
+ * [fd6c7a7ec](https://github.com/argoproj/argo-workflows/commit/fd6c7a7ec1f2053f9fdd03451d7d29b1339c0408) feat: Add custom event aggregator function with annotations (#9247)
+ * [be6ba4f77](https://github.com/argoproj/argo-workflows/commit/be6ba4f772f65588af7c79cc9351ff6dea63ed16) fix: add ServiceUnavailable to s3 transient errors list Fixes #9248 (#9249)
+ * [51538235c](https://github.com/argoproj/argo-workflows/commit/51538235c7a70b89855dd3b96d97387472bdbade) chore(deps): bump github.com/minio/minio-go/v7 from 7.0.31 to 7.0.32 (#9253)
+ * [5cf5150ef](https://github.com/argoproj/argo-workflows/commit/5cf5150efe1694bb165e98c1d7509f9987d4f524) chore(deps): bump cloud.google.com/go/storage from 1.22.1 to 1.24.0 (#9252)
+ * [454f19ac8](https://github.com/argoproj/argo-workflows/commit/454f19ac8959f3e0db87bb34ec8f7099558aa737) chore(deps): bump google.golang.org/api from 0.87.0 to 0.89.0 (#9251)
+ * [e19d73f64](https://github.com/argoproj/argo-workflows/commit/e19d73f64af073bdd7778674c72a1d197c0836f6) chore(deps-dev): bump @babel/core from 7.18.6 to 7.18.9 in /ui (#9218)
+ * [073431310](https://github.com/argoproj/argo-workflows/commit/07343131080ab125da7ed7d33dbf2d7e0e21362a) chore(deps-dev): bump sass from 1.53.0 to 1.54.0 in /ui (#9219)
+ * [aa6aaf753](https://github.com/argoproj/argo-workflows/commit/aa6aaf7539ed86f08c43d4a59eb42337aea86ce6) chore(deps-dev): bump @babel/preset-env from 7.18.6 to 7.18.9 in /ui (#9216)
+ * [6f8592228](https://github.com/argoproj/argo-workflows/commit/6f8592228668457a8b1db072cc53db2c5b01de55) chore(deps): bump github.com/sirupsen/logrus from 1.8.1 to 1.9.0 (#9214)
+ * [769896eb5](https://github.com/argoproj/argo-workflows/commit/769896eb5bf0a7d8db1a94b423e5bc16cf09d5aa) feat: APIratelimit headers and doc (#9206)
+ * [bcb596270](https://github.com/argoproj/argo-workflows/commit/bcb59627072c3b4f0cd1cef12f499ec3d8e87815) ui: remove workflowlist searchbox (#9208)
+ * [15fdf4903](https://github.com/argoproj/argo-workflows/commit/15fdf4903a05c7854656f59f61a676362fe551c6) fix: line return in devcontainer host file (#9204)
+ * [44731d671](https://github.com/argoproj/argo-workflows/commit/44731d671d425b0709bab5c5e27ed7c42a0ee92d) feat: adding new CRD type "ArtifactGCTask" (#9184)
+ * [d5d4628a3](https://github.com/argoproj/argo-workflows/commit/d5d4628a3573a0e1a75c367243e259844320e021) fix: Set namespace to user namespace obtained from /userinfo service (#9191)
+ * [e4489f5d1](https://github.com/argoproj/argo-workflows/commit/e4489f5d12c4f62421c87c69d8b997aad71fdea6) feat: log format option for wait and init containers. Fixes #8986 (#9169)
+ * [573fe98ff](https://github.com/argoproj/argo-workflows/commit/573fe98ffaa119b607bb5d4aafc1fb3c70a4c564) fix: remove unused argument which is triggering in lint (needed for PRs to pass CI) (#9186)
+ * [1af892133](https://github.com/argoproj/argo-workflows/commit/1af892133cd5b9e6ac22fc61bd4eabd84c568e89) feat: api ratelimiter for argoserver (#8993)
+ * [0f1d1d9b7](https://github.com/argoproj/argo-workflows/commit/0f1d1d9b7ef9b602b82123a9d92c212b50ac01e1) fix: support RemainingItemCount in archivedWrokflow (#9118)
+ * [aea581e02](https://github.com/argoproj/argo-workflows/commit/aea581e027fcd0675e785f413e964c588af304ad) fix: Incorrect link to workflows list with the same author (#9173)
+ * [fd6f3c263](https://github.com/argoproj/argo-workflows/commit/fd6f3c263412a1174de723470a14721b220c4651) feat: Add support for Azure Blob Storage artifacts Fixes #1540 (#9026)
+ * [26ff2e8a1](https://github.com/argoproj/argo-workflows/commit/26ff2e8a17ff68628090e18a3f246ab87fe950a3) chore(deps): bump google.golang.org/api from 0.86.0 to 0.87.0 (#9157)
+ * [877f36f37](https://github.com/argoproj/argo-workflows/commit/877f36f370d7ef00a1b8f136bb157e64c1e2769a) fix: Workflow details accessing undefined templateRef. Fixes #9167 (#9168)
+ * [6c20202ca](https://github.com/argoproj/argo-workflows/commit/6c20202cae8e62bb6c04a067a269e964d181e864) feat: make node info side panel resizable. Fixes #8917 (#8963)
+ * [19db1d35e](https://github.com/argoproj/argo-workflows/commit/19db1d35e3f1be55ca8e7ddc5040b9eaf4ac3f4b) chore(deps-dev): bump babel-jest from 28.1.2 to 28.1.3 in /ui (#9159)
+ * [96b98dafb](https://github.com/argoproj/argo-workflows/commit/96b98dafbdde5770d4d92c469e13ca81734a753f) chore(deps): bump github.com/prometheus/common from 0.35.0 to 0.37.0 (#9158)
+ * [4dc0e83ea](https://github.com/argoproj/argo-workflows/commit/4dc0e83ea091990e2a02dd8a2b542035ebe98d9a) chore(deps-dev): bump webpack-dev-server from 4.9.2 to 4.9.3 in /ui (#9105)
+ * [cbe17105d](https://github.com/argoproj/argo-workflows/commit/cbe17105d91517f37cafafb49ad5f422b895c239) chore(deps): bump github.com/minio/minio-go/v7 from 7.0.30 to 7.0.31 (#9130)
+ * [a9c36e723](https://github.com/argoproj/argo-workflows/commit/a9c36e723c0ab44baf3ea0cdf4706fc4b8bf848a) chore(deps-dev): bump @types/swagger-ui-react from 3.23.2 to 4.11.0 in /ui (#9132)
+ * [9bbf7e0f0](https://github.com/argoproj/argo-workflows/commit/9bbf7e0f092f0d76c7419d291d3f9dba016b2f3c) feat: Support overriding parameters when retry/resubmit workflows (#9141)
+ * [42729ff75](https://github.com/argoproj/argo-workflows/commit/42729ff7542760bd27b08a7347a603d8f232466e) fix: Workflow retry should also reset the selected nodes (#9156)
+ * [559b59c0a](https://github.com/argoproj/argo-workflows/commit/559b59c0a2b9b3254740edf634de8a1c63c84ab0) feat: report Artifact GC failures in user interface. Fixes #8518 (#9115)
+ * [56d0c664a](https://github.com/argoproj/argo-workflows/commit/56d0c664ad96c95ca6c2311b2d1559dd423a5e4d) fix: Do not error when getting log artifacts from GCS. Fixes #8746 (#9155)
+ * [2b92b1aef](https://github.com/argoproj/argo-workflows/commit/2b92b1aefbf1e6a12476b946f05559c9b05fffef) fix: Fixed swagger error. Fixes #8922 (#9078)
+ * [57bac335a](https://github.com/argoproj/argo-workflows/commit/57bac335afac2c28a4eb5ccf1fa97bb5bba63e97) feat: refactoring e2e test timeouts to support multiple environments. (#8925)
+ * [921ae1ebf](https://github.com/argoproj/argo-workflows/commit/921ae1ebf5f849d4f684c79dee375205f05cfca9) chore(deps): bump moment from 2.29.3 to 2.29.4 in /ui (#9131)
+ * [c149dc53c](https://github.com/argoproj/argo-workflows/commit/c149dc53c78571778b0589d977dd0445e75d9eec) chore(deps): bump github.com/stretchr/testify from 1.7.5 to 1.8.0 (#9097)
+ * [a0c9e66c1](https://github.com/argoproj/argo-workflows/commit/a0c9e66c1d1cb3d83c5150814c4b8ccd9acdcfb1) chore(deps): bump react-monaco-editor from 0.48.0 to 0.49.0 in /ui (#9104)
+ * [0f0e25e03](https://github.com/argoproj/argo-workflows/commit/0f0e25e03ffe00f79e74087044ecd080f2d6242a) [Snyk] Upgrade swagger-ui-react from 4.10.3 to 4.12.0 (#9072)
+ * [8fc78ca9d](https://github.com/argoproj/argo-workflows/commit/8fc78ca9dce321f2173fba7735e4b4bd48df1b6c) chore(deps): bump cronstrue from 1.125.0 to 2.11.0 in /ui (#9102)
+ * [01e9ef78f](https://github.com/argoproj/argo-workflows/commit/01e9ef78f9cd81d3e0ea4c85e33abd181118868c) chore(deps-dev): bump @babel/core from 7.18.5 to 7.18.6 in /ui (#9100)
+ * [50a4d0044](https://github.com/argoproj/argo-workflows/commit/50a4d00443cfc53976db6227394784bbf34fe239) feat: Support retry on nested DAG and node groups (#9028)
+ * [20f8582a9](https://github.com/argoproj/argo-workflows/commit/20f8582a9e71effee220b160b229b5fd68bf7c95) feat(ui): Add workflow author information to workflow summary and drawer (#9119)
+ * [18be9593e](https://github.com/argoproj/argo-workflows/commit/18be9593e76bdeb456b5de5ea047a6aa8d201d74) chore(deps-dev): bump babel-jest from 28.1.1 to 28.1.2 in /ui (#9103)
+ * [154d849b3](https://github.com/argoproj/argo-workflows/commit/154d849b32082a4211487b6dbebbae215b97b9ee) chore(deps): bump cron-parser from 4.4.0 to 4.5.0 in /ui (#9101)
+ * [801216c44](https://github.com/argoproj/argo-workflows/commit/801216c44053343020f41a9953a5ed1722b36232) chore(deps-dev): bump @babel/preset-env from 7.18.2 to 7.18.6 in /ui (#9099)
+ * [ba225d3aa](https://github.com/argoproj/argo-workflows/commit/ba225d3aa586dd9e6770ec1b2f482f1c15fe2add) chore(deps): bump google.golang.org/api from 0.85.0 to 0.86.0 (#9096)
+ * [ace228486](https://github.com/argoproj/argo-workflows/commit/ace2284869a9574602b602a5bdf4592cd6ae8376) chore(deps): bump github.com/minio/minio-go/v7 from 7.0.29 to 7.0.30 (#9098)
+ * [3967929cf](https://github.com/argoproj/argo-workflows/commit/3967929cfde54c2a3c62c47fd509beaea1832ea4) chore(deps): bump dependabot/fetch-metadata from 1.3.1 to 1.3.3 (#9095)
+ * [f69cb89b1](https://github.com/argoproj/argo-workflows/commit/f69cb89b16bce0b88b63ec3fec14d7abc0b32fef) docs(workflow/artifacts/gcs): correct spelling of BUCKET (#9082)
+ * [61211f9db](https://github.com/argoproj/argo-workflows/commit/61211f9db1568190dd46b7469fa79eb6530bba73) fix: Add workflow failures before hooks run. Fixes #8882 (#9009)
+ * [c1154ff97](https://github.com/argoproj/argo-workflows/commit/c1154ff975bcb580554f78f393fd908b1f64ea6a) feat: redirect to archive on workflow absence. Fixes #7745 (#7854)
+ * [f5f1a3438](https://github.com/argoproj/argo-workflows/commit/f5f1a34384ab4bbbebd9863711a3047a08ced7fb) fix: sync lock should be released only if we're retrying (#9063)
+ * [146e38a3f](https://github.com/argoproj/argo-workflows/commit/146e38a3f91ac8a7b9b749d96c54bd3eab2ce1ab) chore!: Remove dataflow pipelines from codebase (#9071)
+ * [92eaadffc](https://github.com/argoproj/argo-workflows/commit/92eaadffcd0c244f05b23d4f177fd53f000b1a99) feat: inform users on UI if an artifact will be deleted. Fixes #8667 (#9056)
+ * [d0cfc6d10](https://github.com/argoproj/argo-workflows/commit/d0cfc6d10b11d9977007bb14373e699e604c1b74) feat: UI default to the namespace associated with ServiceAccount. Fixes #8533 (#9008)
+ * [1ccc120cd](https://github.com/argoproj/argo-workflows/commit/1ccc120cd5392f877ecbb328cbf5304e6eb89783) feat: added support for binary HTTP template bodies. Fixes #6888 (#8087)
+ * [443155dea](https://github.com/argoproj/argo-workflows/commit/443155deaa1aa9e19688de0580840bd0f8598dd5) feat: If artifact has been deleted, show a message to that effect in the iFrame in the UI (#8966)
+ * [cead295fe](https://github.com/argoproj/argo-workflows/commit/cead295fe8b4cdfbc7eeb3c2dcfa99e2bfb291b6) chore(deps-dev): bump @types/superagent from 3.8.3 to 4.1.15 in /ui (#9057)
+ * [b1e49a471](https://github.com/argoproj/argo-workflows/commit/b1e49a471c7de65a628ac496a4041a2ec9975eb0) chore(deps-dev): bump html-webpack-plugin from 3.2.0 to 4.5.2 in /ui (#9036)
+ * [11801d044](https://github.com/argoproj/argo-workflows/commit/11801d044cfddfc8100d973e91ddfe9a1252a028) chore(deps): bump superagent from 7.1.6 to 8.0.0 in /ui (#9052)
+ * [c30493d72](https://github.com/argoproj/argo-workflows/commit/c30493d722c2fd9aa5ccc528327759d96f99fb23) chore(deps): bump github.com/prometheus/common from 0.34.0 to 0.35.0 (#9049)
+ * [74c1e86b8](https://github.com/argoproj/argo-workflows/commit/74c1e86b8bc302780f36a364d7adb98184bf6e45) chore(deps): bump google.golang.org/api from 0.83.0 to 0.85.0 (#9044)
+ * [77be291da](https://github.com/argoproj/argo-workflows/commit/77be291da21c5057d0c966adce449a7f9177e0db) chore(deps): bump github.com/stretchr/testify from 1.7.2 to 1.7.5 (#9045)
+ * [278f61c46](https://github.com/argoproj/argo-workflows/commit/278f61c46309b9df07ad23497a4fd97817af93cc) chore(deps): bump github.com/spf13/cobra from 1.4.0 to 1.5.0 (#9047)
+ * [e288dfc89](https://github.com/argoproj/argo-workflows/commit/e288dfc8963fdd5e5bff8d7cbed5d227e76afd7b) Revert "chore(deps-dev): bump raw-loader from 0.5.1 to 4.0.2 in /ui (#9034)" (#9041)
+ * [b9318ba93](https://github.com/argoproj/argo-workflows/commit/b9318ba939defe5fdeb46dcbfc44bc8f7cf14a6d) chore(deps-dev): bump webpack-cli from 4.9.2 to 4.10.0 in /ui (#9037)
+ * [891a256a2](https://github.com/argoproj/argo-workflows/commit/891a256a2165a853bc18e5f068d870a232b671f3) chore(deps-dev): bump sass from 1.52.1 to 1.53.0 in /ui (#9038)
+ * [db73db04d](https://github.com/argoproj/argo-workflows/commit/db73db04d033cc5a4e2f113fd090afe773ebcb81) chore(deps-dev): bump @babel/core from 7.18.2 to 7.18.5 in /ui (#9031)
+ * [fa93a6558](https://github.com/argoproj/argo-workflows/commit/fa93a655834138fc549f67f8a4eadd8df7a18c50) chore(deps-dev): bump babel-jest from 28.1.0 to 28.1.1 in /ui (#9035)
+ * [aeed837be](https://github.com/argoproj/argo-workflows/commit/aeed837be8083b8f49242635f3baa1b162a8db8b) chore(deps-dev): bump webpack-dev-server from 4.9.0 to 4.9.2 in /ui (#9032)
+ * [e7d3308ef](https://github.com/argoproj/argo-workflows/commit/e7d3308ef4f755d484c8ca6cf90993a5e1d7f954) chore(deps-dev): bump raw-loader from 0.5.1 to 4.0.2 in /ui (#9034)
+ * [d90f11c3e](https://github.com/argoproj/argo-workflows/commit/d90f11c3e4c1f7d88be3220f57c3184d7beaddaf) [Snyk] Upgrade superagent from 7.1.3 to 7.1.4 (#9020)
+ * [6e962fdca](https://github.com/argoproj/argo-workflows/commit/6e962fdcab5effbb4ac12180249019d7d6241b8c) feat: sanitize config links (#8779)
+ * [89f3433bf](https://github.com/argoproj/argo-workflows/commit/89f3433bf7cbca7092952aa8ffc5e5c254f28999) fix: workflow.status is now set properly in metrics. Fixes #8895 (#8939)
+ * [2aa32aea5](https://github.com/argoproj/argo-workflows/commit/2aa32aea5eaf325bc6a3eff852f2ff0052366bf6) fix: check for nil, and add logging to expose root cause of panic in Issue 8968 (#9010)
+ * [62287487a](https://github.com/argoproj/argo-workflows/commit/62287487a0895a457804f0ac97fdf9c9413dd2ab) fix: Treat 'connection reset by peer' as a transient network error. Fixes #9013 (#9017)
+ * [2e3177617](https://github.com/argoproj/argo-workflows/commit/2e31776175b2cbb123278920e30807244e2f7a3b) fix: add nil check for retryStrategy.Limit in deadline check. Fixes #8990 (#8991)
+ * [73487fbee](https://github.com/argoproj/argo-workflows/commit/73487fbeeb645ac8f6229f98aed2ec6eec756571) chore(deps): bump github.com/minio/minio-go/v7 from 7.0.27 to 7.0.29 (#9004)
+ * [e34e378af](https://github.com/argoproj/argo-workflows/commit/e34e378af05b0ffde14b89e8d9eec9964a903002) chore(deps): bump github.com/argoproj/pkg from 0.13.2 to 0.13.3 (#9002)
+ * [89f82cea4](https://github.com/argoproj/argo-workflows/commit/89f82cea4b3f3f40d1666d2469ab3a97e3665fdd) feat: log workflow size before hydrating/dehydrating. Fixes #8976 (#8988)
+ * [a1535fa44](https://github.com/argoproj/argo-workflows/commit/a1535fa446d15bae56656d20577fdbb000353cc2) fix: Workflow Duration metric shouldn't increase after workflow complete (#8989)
+ * [6106ac722](https://github.com/argoproj/argo-workflows/commit/6106ac7229eeaac9132f8df595b569de2bc68ccf) feat: Support loading manifest from artifacts for resource templates. Fixes #5878 (#8657)
+ * [e0a1afa91](https://github.com/argoproj/argo-workflows/commit/e0a1afa91d8e51ba2c6aed6c604f2a69bdb1b387) fix: sync cluster Workflow Template Informer before it's used (#8961)
+ * [1ed1ee114](https://github.com/argoproj/argo-workflows/commit/1ed1ee114b2069d9cdeb9fd1f3a7513f9f13a396) chore(deps): bump actions/setup-python from 3 to 4 (#8949)
+ * [6c244f3cb](https://github.com/argoproj/argo-workflows/commit/6c244f3cb400f69b641d7e59c5215806a2870604) fix: long code blocks overflow in ui. Fixes #8916 (#8947)
+ * [e31ffcd33](https://github.com/argoproj/argo-workflows/commit/e31ffcd339370d6000f86d552845d7d378620d29) fix: Correct kill command. Fixes #8687 (#8908)
+ * [263977967](https://github.com/argoproj/argo-workflows/commit/263977967a47f24711b9f6110fe950c47d8c5f08) chore(deps): bump google.golang.org/api from 0.82.0 to 0.83.0 (#8951)
+ * [e96b1b3fd](https://github.com/argoproj/argo-workflows/commit/e96b1b3fd4e27608de8a94763782bd2d41cd5761) chore(deps): bump github.com/stretchr/testify from 1.7.1 to 1.7.2 (#8950)
+ * [107ed932d](https://github.com/argoproj/argo-workflows/commit/107ed932de466a89feb71dc04950c86d98747cc5) feat: add indexes for improve archived workflow performance. Fixes #8836 (#8860)
+ * [1d4edb433](https://github.com/argoproj/argo-workflows/commit/1d4edb4333ce4e5efeb44a199b390c3d9d02fc25) feat: Date range filter for workflow list. Fixes #8329 (#8596)
+ * [a6eef41bf](https://github.com/argoproj/argo-workflows/commit/a6eef41bf961cda347b9a9bd8476fc33e3a467a9) feat: add artifact delete to argoexec CLI. Fixes #8669 (#8913)
+ * [416fce705](https://github.com/argoproj/argo-workflows/commit/416fce70543059cc81753ba5131b1661a13a0fed) fix: Fork sub-process. Fixes #8454 (#8906)
+ * [750c4e1f6](https://github.com/argoproj/argo-workflows/commit/750c4e1f699b770a309843f2189b4e703305e44f) fix: Only signal running containers, ignore failures. (#8909)
+ * [ede1a39e7](https://github.com/argoproj/argo-workflows/commit/ede1a39e7cb48890aa5d4c8221e2c9d94e7ef007) fix: workflowMetadata needs to be loaded into globalParams in both ArgoServer and Controller (#8907)
+ * [df3764925](https://github.com/argoproj/argo-workflows/commit/df37649251f5791c40802defd923dd735924eb3a) Add left-margin to the question circle next to parameter name in Submit Workflow Panel (#8927)
+ * [1e17f7ff5](https://github.com/argoproj/argo-workflows/commit/1e17f7ff5232067c9c1c05bfa55322e41e0915d7) chore(deps): bump google.golang.org/api from 0.81.0 to 0.82.0 (#8914)
+ * [7dacb5bca](https://github.com/argoproj/argo-workflows/commit/7dacb5bcaeae8e3be64bb1fbf54024401d42d867) fix: Fixed Swagger error. Fixes #8830 (#8886)
+ * [8592e9ce6](https://github.com/argoproj/argo-workflows/commit/8592e9ce6e4de64e55c23bfda460b0cad67e74f7) feat: enable gcflags (compiler flags) to be passed into 'go build' (#8896)
+ * [7a626aa6a](https://github.com/argoproj/argo-workflows/commit/7a626aa6a1368da59c322f1d768e691b0ee4d7e4) feat: add Artifact.Deleted (#8893)
+ * [f2c748ac4](https://github.com/argoproj/argo-workflows/commit/f2c748ac44ed41b1d672e6c45a34090992b979d7) feat: Artifact GC Finalizer needs to be added if any Output Artifacts have a strategy (#8856)
+ * [093a6fe7e](https://github.com/argoproj/argo-workflows/commit/093a6fe7e1b1926f5feaff07a66edb9ff036f866) Add Orchest to ecosystem (#8884)
+ * [2b5ae622b](https://github.com/argoproj/argo-workflows/commit/2b5ae622bc257a4dafb4fab961e8142accaa484d) Removed Security Nudge and all its invocations (#8838)
+ * [86ab55726](https://github.com/argoproj/argo-workflows/commit/86ab55726e213bc406e69edb14921b501938fa25) chore(deps-dev): bump monaco-editor-webpack-plugin from 1.9.0 to 1.9.1 in /ui (#8877)
+ * [df750d715](https://github.com/argoproj/argo-workflows/commit/df750d7158f7291983aeffe709b7624eb73f964a) chore(deps-dev): bump @babel/preset-env from 7.18.0 to 7.18.2 in /ui (#8876)
+ * [f0447918d](https://github.com/argoproj/argo-workflows/commit/f0447918d6826b21a8e0cf0d0d218113e69059a8) chore(deps): bump github.com/spf13/viper from 1.11.0 to 1.12.0 (#8874)
+ * [8b7bdb713](https://github.com/argoproj/argo-workflows/commit/8b7bdb7139e8aa152e95ad3fe6815e7a801afcbb) chore(deps): bump github.com/minio/minio-go/v7 from 7.0.26 to 7.0.27 (#8875)
+ * [282a72295](https://github.com/argoproj/argo-workflows/commit/282a722950b113008b4efb258309cc4066f925a0) add pismo.io to argo users (#8871)
+ * [1a517e6f5](https://github.com/argoproj/argo-workflows/commit/1a517e6f5b801feae9416acf824c83ff65dea65c) chore(deps): bump superagent from 3.8.3 to 7.1.3 in /ui (#8851)
+ * [53012fe66](https://github.com/argoproj/argo-workflows/commit/53012fe66fb6afcefcf4b237c34264a600ae6804) chore(deps-dev): bump source-map-loader from 0.2.4 to 1.1.3 in /ui (#8850)
+ * [35eb2bb96](https://github.com/argoproj/argo-workflows/commit/35eb2bb96d1489366e9813c14863a79db4ea85df) chore(deps-dev): bump file-loader from 6.0.0 to 6.2.0 in /ui (#8848)
+ * [116dfdb03](https://github.com/argoproj/argo-workflows/commit/116dfdb039611d70dd98aef7eb4428b589d55361) chore(deps-dev): bump @fortawesome/fontawesome-free from 5.15.3 to 6.1.1 in /ui (#8846)
+ * [7af70ff39](https://github.com/argoproj/argo-workflows/commit/7af70ff3926e0400d2fe5260f0ea2eeb8bc9bf53) chore(deps-dev): bump glob from 7.1.6 to 8.0.3 in /ui (#8845)
+ * [67dab5d85](https://github.com/argoproj/argo-workflows/commit/67dab5d854a4b1be693571765eae3857559851c6) chore(deps): bump cron-parser from 2.18.0 to 4.4.0 in /ui (#8844)
+ * [e7d294214](https://github.com/argoproj/argo-workflows/commit/e7d2942148ed876717b24fcd2b8af7735e977cb0) chore(deps-dev): bump @babel/core from 7.12.10 to 7.18.2 in /ui (#8843)
+ * [f676ac59a](https://github.com/argoproj/argo-workflows/commit/f676ac59a0794791dc5bdfd74acd9764110f2d2a) chore(deps): bump google.golang.org/api from 0.80.0 to 0.81.0 (#8841)
+ * [d324faaf8](https://github.com/argoproj/argo-workflows/commit/d324faaf885d32e8666a70e1f20bae7e71db386e) chore(deps): bump github.com/aliyun/aliyun-oss-go-sdk from 2.2.2+incompatible to 2.2.4+incompatible (#8842)
+ * [40ab51766](https://github.com/argoproj/argo-workflows/commit/40ab51766aa7cb511dcc3533aeb917379e6037ad) Revert "chore(deps-dev): bump style-loader from 0.20.3 to 2.0.0 in /ui" (#8839)
+ * [cc9d14cf0](https://github.com/argoproj/argo-workflows/commit/cc9d14cf0d60812e177ebb447181df933199b722) feat: Use Pod Names v2 by default (#8748)
+ * [c0490ec04](https://github.com/argoproj/argo-workflows/commit/c0490ec04be88975c316ff6a9dc007861c8f9254) chore(deps-dev): bump webpack-cli from 3.3.11 to 4.9.2 in /ui (#8726)
+ * [bc4a80a8d](https://github.com/argoproj/argo-workflows/commit/bc4a80a8d63f869a7a607861374e0c206873f250) feat: remove size limit of 128kb for workflow templates. Fixes #8789 (#8796)
+ * [5c91d93af](https://github.com/argoproj/argo-workflows/commit/5c91d93afd07f207769a63730ec72e9a93b584ce) chore(deps-dev): bump @babel/preset-env from 7.12.11 to 7.18.0 in /ui (#8825)
+ * [d61bea949](https://github.com/argoproj/argo-workflows/commit/d61bea94947526e7ca886891152c565cc15abded) chore(deps): bump js-yaml and @types/js-yaml in /ui (#8823)
+ * [4688afcc5](https://github.com/argoproj/argo-workflows/commit/4688afcc51c50edc27eaba92c449bc4bce00a139) chore(deps-dev): bump webpack-dev-server from 3.11.3 to 4.9.0 in /ui (#8818)
+ * [14ac0392c](https://github.com/argoproj/argo-workflows/commit/14ac0392ce79bddbb9fc44c86fcf315ea1746235) chore(deps): bump cloud.google.com/go/storage from 1.22.0 to 1.22.1 (#8816)
+ * [3a21fb8a4](https://github.com/argoproj/argo-workflows/commit/3a21fb8a423047268a50fba22dcdd2b4d4029944) chore(deps-dev): bump tslint from 5.11.0 to 5.20.1 in /ui (#8822)
+ * [eca4bdc49](https://github.com/argoproj/argo-workflows/commit/eca4bdc493332eeaf626f454fb25f1ec5257864a) chore(deps-dev): bump copyfiles from 1.2.0 to 2.4.1 in /ui (#8821)
+ * [3416253be](https://github.com/argoproj/argo-workflows/commit/3416253be1047d5c6e6c0cb69defd92ee7eea5fe) chore(deps-dev): bump style-loader from 0.20.3 to 2.0.0 in /ui (#8820)
+ * [e9ea8ee69](https://github.com/argoproj/argo-workflows/commit/e9ea8ee698d8b0d173d0039eba66b2a017d650d3) chore(deps-dev): bump sass from 1.30.0 to 1.52.1 in /ui (#8817)
+ * [ac92a49d0](https://github.com/argoproj/argo-workflows/commit/ac92a49d0f253111bd14bd72699ca3ad8cbeee1d) chore(deps): bump google.golang.org/api from 0.79.0 to 0.80.0 (#8815)
+ * [1bd841853](https://github.com/argoproj/argo-workflows/commit/1bd841853633ebb71fc569b2975def90afb1a68d) docs(running-locally): update dependencies info (#8810)
+ * [bc0100346](https://github.com/argoproj/argo-workflows/commit/bc01003468186ddcb93d1d32e9a49a75046827e7) fix: Change to distroless. Fixes #8805 (#8806)
+ * [872826591](https://github.com/argoproj/argo-workflows/commit/8728265915fd7c18f05f32e32dc12de1ef3ca46b) Revert "chore(deps-dev): bump style-loader from 0.20.3 to 2.0.0 in /u… (#8804)
+ * [fbb8246cd](https://github.com/argoproj/argo-workflows/commit/fbb8246cdc44d218f70f0de677be0f4dfd0780cf) fix: set NODE_OPTIONS to no-experimental-fetch to prevent yarn start error (#8802)
+ * [39fbdb2a5](https://github.com/argoproj/argo-workflows/commit/39fbdb2a551482c5ae2860fd266695c0113cb7b7) fix: fix a command in the quick-start page (#8782)
+ * [961f731b7](https://github.com/argoproj/argo-workflows/commit/961f731b7e9cb60490dd763a394893154c0b3c60) fix: Omitted task result should also be valid (#8776)
+ * [67cdd5f97](https://github.com/argoproj/argo-workflows/commit/67cdd5f97a16041fd1ec32134158c71c07249e4d) chore(deps-dev): bump babel-loader from 8.2.2 to 8.2.5 in /ui (#8767)
+ * [fce407663](https://github.com/argoproj/argo-workflows/commit/fce40766351440375e6b2761cd6a304474764b9a) chore(deps-dev): bump babel-jest from 26.6.3 to 28.1.0 in /ui (#8774)
+ * [026298671](https://github.com/argoproj/argo-workflows/commit/02629867180367fb21a347c3a36cf2d52b63a2c3) chore(deps-dev): bump style-loader from 0.20.3 to 2.0.0 in /ui (#8775)
+ * [2e1fd11db](https://github.com/argoproj/argo-workflows/commit/2e1fd11db5bbb95ee9bcdbeaeab970fa92fc3588) chore(deps-dev): bump webpack from 4.35.0 to 4.46.0 in /ui (#8768)
+ * [00bda0b06](https://github.com/argoproj/argo-workflows/commit/00bda0b0690ea24fa52603f30eecb40fe8b5cdd7) chore(deps-dev): bump @types/prop-types from 15.5.4 to 15.7.5 in /ui (#8773)
+ * [28b494a67](https://github.com/argoproj/argo-workflows/commit/28b494a674e560a07e5a1c98576a94bbef111fc5) chore(deps-dev): bump @types/dagre from 0.7.44 to 0.7.47 in /ui (#8772)
+ * [b07a57694](https://github.com/argoproj/argo-workflows/commit/b07a576945e87915e529d718101319d2f83cd98a) chore(deps): bump react-monaco-editor from 0.47.0 to 0.48.0 in /ui (#8770)
+ * [2a0ac29d2](https://github.com/argoproj/argo-workflows/commit/2a0ac29d27466a247c3a4fee0429d95aa5b67338) chore(deps-dev): bump webpack-dev-server from 3.7.2 to 3.11.3 in /ui (#8769)
+ * [6b11707f5](https://github.com/argoproj/argo-workflows/commit/6b11707f50301a125eb8349193dd0be8659a4cdf) chore(deps): bump github.com/coreos/go-oidc/v3 from 3.1.0 to 3.2.0 (#8765)
+ * [d23693166](https://github.com/argoproj/argo-workflows/commit/d236931667a60266f87fbc446064ceebaf582996) chore(deps): bump github.com/prometheus/client_golang from 1.12.1 to 1.12.2 (#8763)
+ * [f6d84640f](https://github.com/argoproj/argo-workflows/commit/f6d84640fda435e08cc6a961763669b7572d0e69) fix: Skip TestExitHookWithExpression() completely (#8761)
+ * [178bbbc31](https://github.com/argoproj/argo-workflows/commit/178bbbc31c594f9ded4b8a66b0beecbb16cfa949) fix: Temporarily fix CI build. Fixes #8757. (#8758)
+ * [6b9dc2674](https://github.com/argoproj/argo-workflows/commit/6b9dc2674f2092b2198efb0979e5d7e42efffc30) feat: Add WebHDFS support for HTTP artifacts. Fixes #7540 (#8468)
+ * [354dee866](https://github.com/argoproj/argo-workflows/commit/354dee86616014bcb77afd170685242a18efd07c) fix: Exit lifecycle hook should respect expression. Fixes #8742 (#8744)
+ * [aa366db34](https://github.com/argoproj/argo-workflows/commit/aa366db345d794f0d330336d51eb2a88f14ebbe6) fix: remove list and watch on secrets. Fixes #8534 (#8555)
+ * [342abcd6d](https://github.com/argoproj/argo-workflows/commit/342abcd6d72b4cda64b01f30fa406b2f7b86ac6d) fix: mkdocs uses 4space indent for nested list (#8740)
+ * [567436640](https://github.com/argoproj/argo-workflows/commit/5674366404a09cee5f4e36e338a4292b057fe1b9) chore(deps-dev): bump typescript from 3.9.2 to 4.6.4 in /ui (#8719)
+ * [1f2417e30](https://github.com/argoproj/argo-workflows/commit/1f2417e30937399e96fd4dfcd3fcc2ed7333291a) feat: running locally through dev container (#8677)
+ * [515e0763a](https://github.com/argoproj/argo-workflows/commit/515e0763ad4b1bd9d2941fc5c141c52691fc3b12) fix: Simplify return logic in executeTmplLifeCycleHook (#8736)
+ * [b8f511309](https://github.com/argoproj/argo-workflows/commit/b8f511309adf6443445e6dbf55889538fd39eacc) fix: Template in Lifecycle hook should be optional (#8735)
+ * [98a97d6d9](https://github.com/argoproj/argo-workflows/commit/98a97d6d91c0d9d83430da20e11cea39a0a7919b) chore(deps-dev): bump ts-node from 4.1.0 to 9.1.1 in /ui (#8722)
+ * [e4d35f0ad](https://github.com/argoproj/argo-workflows/commit/e4d35f0ad3665d7d732a16b9e369f8658049bacd) chore(deps-dev): bump react-hot-loader from 3.1.3 to 4.13.0 in /ui (#8723)
+ * [b9ec444fc](https://github.com/argoproj/argo-workflows/commit/b9ec444fc4cf60ed876823b25a41f74a28698f0b) chore(deps-dev): bump copy-webpack-plugin from 4.5.2 to 5.1.2 in /ui (#8718)
+ * [43fb7106a](https://github.com/argoproj/argo-workflows/commit/43fb7106a83634b85a3b934e22a05246e76f7d15) chore(deps-dev): bump tslint-plugin-prettier from 2.1.0 to 2.3.0 in /ui (#8716)
+ * [c0cd1f855](https://github.com/argoproj/argo-workflows/commit/c0cd1f855a5ef89d0f7a0d49f8e11781735cfa86) feat: ui, Dependabot auto dependency update (#8706)
+ * [b3bf327a0](https://github.com/argoproj/argo-workflows/commit/b3bf327a021e4ab5cc329f83bdec8f533c87a4d6) fix: Fix the resursive example to call the coinflip template (#8696)
+ * [427c16072](https://github.com/argoproj/argo-workflows/commit/427c16072b6c9d677265c95f5fd84e6a37fcc848) feat: Increased default significant figures in formatDuration. Fixes #8650 (#8686)
+ * [7e2df8129](https://github.com/argoproj/argo-workflows/commit/7e2df81299f660089cf676f7622638156affedf5) chore(deps): bump google.golang.org/api from 0.78.0 to 0.79.0 (#8710)
+ * [9ddae875f](https://github.com/argoproj/argo-workflows/commit/9ddae875fdb49d3e852f935e3d8b52fae585bc5e) fix: Fixed podName in killing daemon pods. Fixes #8692 (#8708)
+ * [72d3f32e5](https://github.com/argoproj/argo-workflows/commit/72d3f32e5676207d1511c609b00d26df20a2607e) fix: update go-color path/version (#8707)
+ * [92b3ef27a](https://github.com/argoproj/argo-workflows/commit/92b3ef27af7a7e6b930045e95072a47c8745b1d3) fix: upgrade moment from 2.29.2 to 2.29.3 (#8679)
+ * [8d4ac38a1](https://github.com/argoproj/argo-workflows/commit/8d4ac38a158dc2b4708478f7e7db1f2dd488ffed) feat: ui, add node version constraint (#8678)
+ * [2cabddc9a](https://github.com/argoproj/argo-workflows/commit/2cabddc9a9241061d8b89cf671f1c548405f4cb0) chore(deps): bump github.com/minio/minio-go/v7 from 7.0.24 to 7.0.26 (#8673)
+ * [859ebe99f](https://github.com/argoproj/argo-workflows/commit/859ebe99f760c6fb30870993359274a92cec2fb9) fix: Terminate, rather than delete, deadlined pods. Fixes #8545 (#8620)
+ * [dd565208e](https://github.com/argoproj/argo-workflows/commit/dd565208e236bc56230e75bedcc5082d171e6155) fix(git): add auth to fetch (#8664)
+ * [70f70209d](https://github.com/argoproj/argo-workflows/commit/70f70209d693d3933177a7de2cb6e421b763656f) fix: Handle omitted nodes in DAG enhanced depends logic. Fixes #8654 (#8672)
+ * [3fdf30d9f](https://github.com/argoproj/argo-workflows/commit/3fdf30d9f9181d74d81ca3184b53bbe661ecb845) fix: Enhance artifact visualization. Fixes #8619 (#8655)
+ * [16fef4e54](https://github.com/argoproj/argo-workflows/commit/16fef4e5498fac88dc80d33d653c99fec641150d) fix: enable `ARGO_REMOVE_PVC_PROTECTION_FINALIZER` by default. Fixes #8592 (#8661)
+ * [e4d57c6d5](https://github.com/argoproj/argo-workflows/commit/e4d57c6d560e025a336415aa840d2457eeca79f4) feat: `argo cp` to download artifacts. Fixes #695 (#8582)
+ * [e6e0c9bb3](https://github.com/argoproj/argo-workflows/commit/e6e0c9bb3b923a6d977875cbbd2744b8bacfce15) chore(deps): bump docker/login-action from 1 to 2 (#8642)
+ * [05781101d](https://github.com/argoproj/argo-workflows/commit/05781101dc94701aabd1bdbc2d3be4aa383b49f2) chore(deps): bump docker/setup-buildx-action from 1 to 2 (#8641)
+ * [6a4957135](https://github.com/argoproj/argo-workflows/commit/6a495713593f11514500998f6f69ce8f2e463975) chore(deps): bump docker/setup-qemu-action from 1 to 2 (#8640)
+ * [02370b51d](https://github.com/argoproj/argo-workflows/commit/02370b51d59bdd60b07c6c938737ed997807e4f2) feat: Track UI event #8402 (#8460)
+ * [64a2b28a5](https://github.com/argoproj/argo-workflows/commit/64a2b28a5fb51b50fe0e0a30185a8c3400d10548) fix: close http body. Fixes #8622 (#8624)
+ * [68a2cee6a](https://github.com/argoproj/argo-workflows/commit/68a2cee6a3373214803db009c7a6290954107c37) chore(deps): bump google.golang.org/api from 0.77.0 to 0.78.0 (#8602)
+ * [ed351ff08](https://github.com/argoproj/argo-workflows/commit/ed351ff084c4524ff4b2a45b53e539f91f5d423a) fix: ArtifactGC moved from Template to Artifact. Fixes #8556. (#8581)
+ * [87470e1c2](https://github.com/argoproj/argo-workflows/commit/87470e1c2bf703a9110e97bb755614ce8757fdcc) fix: Added artifact Content-Security-Policy (#8585)
+ * [61b80c90f](https://github.com/argoproj/argo-workflows/commit/61b80c90fd93aebff26df73fcddffa75732d10ec) Fix panic on executor plugin eventhandler (#8588)
+ * [974031570](https://github.com/argoproj/argo-workflows/commit/97403157054cb779b2005991fbb65c583aa3644c) fix: Polish artifact visualisation. Fixes #7743 (#8552)
+ * [98dd898be](https://github.com/argoproj/argo-workflows/commit/98dd898bef67e8523a0bf2ed942241dcb69eabe7) fix: Correct CSP. Fixes #8560 (#8579)
+ * [3d892d9b4](https://github.com/argoproj/argo-workflows/commit/3d892d9b481c5eefeb309b462b3f166a31335bc4) feat: New endpoint capable of serving directory listing or raw file, from non-archived or archived workflow (#8548)
+ * [71e2073b6](https://github.com/argoproj/argo-workflows/commit/71e2073b66b3b30b1eda658e88b7f6fd89469a92) chore(deps): bump lodash-es from 4.17.20 to 4.17.21 in /ui (#8577)
+ * [abf3c7411](https://github.com/argoproj/argo-workflows/commit/abf3c7411921dd422804c72b4f68dc2ab2731047) chore(deps): bump github.com/argoproj/pkg from 0.13.1 to 0.13.2 (#8571)
+ * [ffd5544c3](https://github.com/argoproj/argo-workflows/commit/ffd5544c31da026999b78197f55e6f4d2c8d7628) chore(deps): bump google.golang.org/api from 0.76.0 to 0.77.0 (#8572)
+ * [dc8fef3e5](https://github.com/argoproj/argo-workflows/commit/dc8fef3e5b1c0b833cc8568dbea23dbd1b310bdc) fix: Support memoization on plugin node. Fixes #8553 (#8554)
+ * [5b8638fcb](https://github.com/argoproj/argo-workflows/commit/5b8638fcb0f6ab0816f58f35a71f4f178ba9b7d9) fix: modified `SearchArtifact` to return `ArtifactSearchResults`. Fixes #8543 (#8557)
+ * [9398b0717](https://github.com/argoproj/argo-workflows/commit/9398b0717c14e15c78f6fe314ca9168d0104418d) feat: add more options to ArtifactSearchQuery. Fixes #8542. (#8549)
+ * [c781a5828](https://github.com/argoproj/argo-workflows/commit/c781a582821c4e08416eba9a3889eb2588596aa6) feat: Make artifacts discoverable in the DAG. Fixes #8494 (#8496)
+ * [d25b3fec4](https://github.com/argoproj/argo-workflows/commit/d25b3fec49377ea4be6a63d815a2b609636ef607) feat: Improve artifact server response codes. Fixes #8516 (#8524)
+ * [65b7437f7](https://github.com/argoproj/argo-workflows/commit/65b7437f7b26e19581650c0c2078f9dd8c89a73f) chore(deps): bump github.com/argoproj/pkg from 0.13.0 to 0.13.1 (#8537)
+ * [ecd91b1c4](https://github.com/argoproj/argo-workflows/commit/ecd91b1c4215a2ab8742f7c43eaade98a1d47eba) fix: added json tag to ArtifactGCStrategies (#8523)
+ * [f223bb8a3](https://github.com/argoproj/argo-workflows/commit/f223bb8a3c277e96a19e08f30f27ad70c0c425d3) fix: ArtifactGCOnWorkflowDeletion typo quick fix (#8519)
+ * [b4202b338](https://github.com/argoproj/argo-workflows/commit/b4202b338b5f97552fb730e4d07743c365d6f5ec) feat: Do not return cause of internal server error. Fixes #8514 (#8522)
+ * [d7bcaa756](https://github.com/argoproj/argo-workflows/commit/d7bcaa7569ac15d85eb293a72a1a98779275bd6e) feat: add finalizer for artifact GC (#8513)
+ * [c3ae56565](https://github.com/argoproj/argo-workflows/commit/c3ae56565bbe05c9809c5ad1192fcfc3ae717114) fix: Do not log container not found (#8509)
+ * [9a1345323](https://github.com/argoproj/argo-workflows/commit/9a1345323bb4727ba4fa769363b671213c02ded7) feat: Implement Workflow.SearchArtifacts(). Fixes #8473 (#8517)
+ * [30d9f8d77](https://github.com/argoproj/argo-workflows/commit/30d9f8d77caa69467f2b388b045fe9c3f8d05cb8) feat: Add correct CSP/XFO to served artifacts. Fixing #8492 (#8511)
+ * [d3f8db341](https://github.com/argoproj/argo-workflows/commit/d3f8db3417586b307401ecd5d172f9a1f97241db) feat: Save `containerSet` logs in artifact repository. Fixes #7897 (#8491)
+ * [6769ba720](https://github.com/argoproj/argo-workflows/commit/6769ba7209c1c8ffa6ecd5414d9694e743afe557) feat: add ArtifactGC to template spec (#8493)
+ * [19e763a3b](https://github.com/argoproj/argo-workflows/commit/19e763a3ba7ceaa890dc34310abeb4e7e4555641) chore(deps): bump google.golang.org/api from 0.75.0 to 0.76.0 (#8495)
+ * [6e9d42aed](https://github.com/argoproj/argo-workflows/commit/6e9d42aed1623e215a04c98cf1632f08f79a45cb) feat: add capability to choose params in suspend node.Fixes #8425 (#8472)
+ * [8685433e1](https://github.com/argoproj/argo-workflows/commit/8685433e1c183f1eb56add14c3e19c7b676314bb) feat: Added a delete function to the artifacts storage. Fixes #8470 (#8490)
+ * [9f5759b5b](https://github.com/argoproj/argo-workflows/commit/9f5759b5bd2a01d0f2930faa20ad5a769395eb99) feat: Enable git artifact clone of single branch (#8465)
+ * [7376e7cda](https://github.com/argoproj/argo-workflows/commit/7376e7cda4f72f0736fc128d15495acff71b987d) feat: Artifact streaming: enable artifacts to be streamed to users rather than loading the full file to disk first. Fixes #8396 (#8486)
+ * [06e9445ba](https://github.com/argoproj/argo-workflows/commit/06e9445ba71faba6f1132703762ec592a168ca9b) feat: add empty dir into wait container (#8390)
+ * [c61770622](https://github.com/argoproj/argo-workflows/commit/c6177062276cc39c3b21644ab1d6989cbcaf075c) fix: Pod `OOMKilled` should fail workflow. Fixes #8456 (#8478)
+ * [37a8a81df](https://github.com/argoproj/argo-workflows/commit/37a8a81df1d7ef3067596199f96974d31b200b88) feat: add ArtifactGC to workflow and template spec. Fixes #8471 (#8482)
+ * [ae803bba4](https://github.com/argoproj/argo-workflows/commit/ae803bba4f9b0c85f0d0471c22e44eb1c0f8f5f9) fix: Revert controller readiness changes. Fixes #8441 (#8454)
+ * [147ca4637](https://github.com/argoproj/argo-workflows/commit/147ca46376a4d86a09bde689d848396af6750b1e) fix: PodGC works with WorkflowTemplate. Fixes #8448 (#8452)
+ * [b7aeb6298](https://github.com/argoproj/argo-workflows/commit/b7aeb62982d91036edf5ba942eebeb4b22e30a3d) feat: Add darwin-arm64 binary build. Fixes #8450 (#8451)
+ * [8c0a957c3](https://github.com/argoproj/argo-workflows/commit/8c0a957c3ef0149f3f616a8baef2eb9a164436c1) fix: Fix bug in entrypoint lookup (#8453)
+ * [79508cc78](https://github.com/argoproj/argo-workflows/commit/79508cc78bd5b79762719c3b2fbe970981277e1f) chore(deps): bump google.golang.org/api from 0.74.0 to 0.75.0 (#8447)
+ * [24f9db628](https://github.com/argoproj/argo-workflows/commit/24f9db628090e9dfdfc7d657af80d96c176a47fd) chore(deps): bump github.com/argoproj/pkg from 0.11.0 to 0.12.0 (#8439)
+ * [e28fb0744](https://github.com/argoproj/argo-workflows/commit/e28fb0744209529cf0f7562c71f7f645db21ba1a) chore(deps): bump dependabot/fetch-metadata from 1.3.0 to 1.3.1 (#8438)
+ * [72bb11305](https://github.com/argoproj/argo-workflows/commit/72bb1130543a3cc81347fe4fcf3257d8b35cd478) chore(deps): bump github.com/argoproj-labs/argo-dataflow (#8440)
+ * [230c82652](https://github.com/argoproj/argo-workflows/commit/230c8265246d50a095cc3a697fcd437174731aa8) feat: added support for http as option for artifact upload. Fixes #785 (#8414)
+ * [4f067ab4b](https://github.com/argoproj/argo-workflows/commit/4f067ab4bcb9ae570b9af11b2abd64d592e1fbbc) chore(deps): bump github.com/prometheus/common from 0.33.0 to 0.34.0 (#8427)
+ * [a2fd0031e](https://github.com/argoproj/argo-workflows/commit/a2fd0031ef13b63fd65520c615043e2aff89dde8) chore(deps): bump github.com/tidwall/gjson from 1.14.0 to 1.14.1 (#8426)
+ * [3d1ea426a](https://github.com/argoproj/argo-workflows/commit/3d1ea426a28c65c206752e957bb68a57ee8ed32e) fix: Remove binaries from Windows image. Fixes #8417 (#8420)
+ * [e71fdee07](https://github.com/argoproj/argo-workflows/commit/e71fdee07b8ccd7905752808bffb2283e170077a) Revert "feat: added support for http as an option for artifact upload. Fixes #785 (#8405)"
+ * [5845efbb9](https://github.com/argoproj/argo-workflows/commit/5845efbb94da8acfb218787846ea10c37fb2eebb) feat: Log result of HTTP requests & artifacts load/saves. Closes #8257 (#8394)
+ * [d22be825c](https://github.com/argoproj/argo-workflows/commit/d22be825cfb901f1ce59ba3744488cb8e144233b) feat: added support for http as an option for artifact upload. Fixes #785 (#8405)
+ * [4471b59a5](https://github.com/argoproj/argo-workflows/commit/4471b59a52873ca66d6834a06519407c858f5906) fix: open minio dashboard on different port in quick-start (#8407)
+ * [f467cc555](https://github.com/argoproj/argo-workflows/commit/f467cc5558bd22330eebfbc352ad4a7607f9fa4c) fix: Daemon step updated 'pod delete' while pod is running (#8399)
+ * [a648ccdcf](https://github.com/argoproj/argo-workflows/commit/a648ccdcfa3bb4cd5f5684faf921ab9fdab761de) fix: prevent backoff when retryStrategy.limit has been reached. Fixes #7588 (#8090)
+ * [136ebbc45](https://github.com/argoproj/argo-workflows/commit/136ebbc45b7cba346d7ba72f278624647a6b5a1c) chore(deps): bump github.com/minio/minio-go/v7 from 7.0.23 to 7.0.24 (#8397)
+ * [73ea7c72c](https://github.com/argoproj/argo-workflows/commit/73ea7c72c99a073dbe3ec0a420e112945916fb94) feat!: Add entrypoint lookup. Fixes #8344 (#8345)
+ * [283f6b58f](https://github.com/argoproj/argo-workflows/commit/283f6b58f979db1747ca23753d0562a440f95908) fix: Add readiness check to controller. Fixes #8283 (#8285)
+ * [75b533b61](https://github.com/argoproj/argo-workflows/commit/75b533b61eebd00044f2682540f5de15d6be8fbb) chore(deps): bump github.com/spf13/viper from 1.10.1 to 1.11.0 (#8392)
+ * [b09b9bdfb](https://github.com/argoproj/argo-workflows/commit/b09b9bdfb132c3967b81718bbc3c6e37fb2a3a42) fix: Absolute submodules in git artifacts. Fixes #8377 (#8381)
+ * [d47081fb4](https://github.com/argoproj/argo-workflows/commit/d47081fb4664d3a26e802a5c3c36798108388f2f) fix: upgrade react-moment from 1.0.0 to 1.1.1 (#8389)
+ * [010e359e4](https://github.com/argoproj/argo-workflows/commit/010e359e4c29b1af5653c46112ad53ac9b2679be) fix: upgrade react-datepicker from 2.14.1 to 2.16.0 (#8388)
+ * [0c9d88b44](https://github.com/argoproj/argo-workflows/commit/0c9d88b4429ff59c656e7b78b2160a55b49976ce) fix: upgrade prop-types from 15.7.2 to 15.8.1 (#8387)
+ * [54fa39c89](https://github.com/argoproj/argo-workflows/commit/54fa39c897d9883cec841450808102d71bd46fa8) fix: Back-off UI retries. Fixes #5697 (#8333)
+ * [637d14c88](https://github.com/argoproj/argo-workflows/commit/637d14c88f7d12c1c0355d62c2d1d4b03c4934e1) fix: replace `podName` with `nodeId` in `_.primary.swagger.json` (#8385)
+ * [95323f87d](https://github.com/argoproj/argo-workflows/commit/95323f87d42c9cf878563bfcb11460171906684b) fix: removed error from artifact server 401 response. Fixes #8382 (#8383)
+ * [2d91646aa](https://github.com/argoproj/argo-workflows/commit/2d91646aafede0e5671b07b2ac6eb27a057455b1) fix: upgrade js-yaml from 3.13.1 to 3.14.1 (#8374)
+ * [54eaed060](https://github.com/argoproj/argo-workflows/commit/54eaed0604393106b4dde3e7d7e6ccb41a42de6b) fix: upgrade cron-parser from 2.16.3 to 2.18.0 (#8373)
+ * [e97b0e66b](https://github.com/argoproj/argo-workflows/commit/e97b0e66b89f131fe6a12f24c26efbb73e16ef2e) fix: Updating complated node status
+ * [627597b56](https://github.com/argoproj/argo-workflows/commit/627597b5616f4d22e88b89a6d7017a67b6a4143d) fix: Add auth for SDKs. Fixes #8230 (#8367)
+ * [55ecfeb7b](https://github.com/argoproj/argo-workflows/commit/55ecfeb7b0e300a5d5cc6027c9212365cdaf4a2b) chore(deps): bump github.com/go-openapi/jsonreference (#8363)
+ * [163be6d99](https://github.com/argoproj/argo-workflows/commit/163be6d99cc7ee262580196fbfd2cb9e9d7d8833) chore(deps): bump actions/download-artifact from 2 to 3 (#8360)
+ * [765bafb12](https://github.com/argoproj/argo-workflows/commit/765bafb12de25a7589aa1e2733786e0285290c22) chore(deps): bump actions/upload-artifact from 2 to 3 (#8361)
+ * [eafa10de8](https://github.com/argoproj/argo-workflows/commit/eafa10de80d31bbcf1ec030d20ecfe879ab2d171) chore(deps): bump actions/setup-go from 2 to 3 (#8362)
+ * [e9de085d6](https://github.com/argoproj/argo-workflows/commit/e9de085d65a94d4189a54566d99c7177c1a7d735) fix: Erratum in docs. Fixes #8342 (#8359)
+ * [a3d1d07e1](https://github.com/argoproj/argo-workflows/commit/a3d1d07e1cbd19039771c11aa202bd8fd68198e7) fix: upgrade react-chartjs-2 from 2.10.0 to 2.11.2 (#8357)
+ * [b199cb947](https://github.com/argoproj/argo-workflows/commit/b199cb9474f7b1a3303a12858a2545aa85484d28) fix: upgrade history from 4.7.2 to 4.10.1 (#8356)
+ * [e40521556](https://github.com/argoproj/argo-workflows/commit/e4052155679a43cf083daf0c1b3fd5d45a5fbe24) fix: upgrade multiple dependencies with Snyk (#8355)
+ * [8c893bd13](https://github.com/argoproj/argo-workflows/commit/8c893bd13998b7dee09d0dd0c7a292b22509ca20) fix: upgrade com.google.code.gson:gson from 2.8.9 to 2.9.0 (#8354)
+ * [ee3765643](https://github.com/argoproj/argo-workflows/commit/ee3765643632fa6d8dbfb528a395cbb28608e2e8) feat: add message column to `kubectl get wf` and `argo list`. Fixes #8307 (#8353)
+ * [ae3881525](https://github.com/argoproj/argo-workflows/commit/ae3881525ce19a029a4798ff294e1b0c982e3268) fix: examples/README.md: overriten => overridden (#8351)
+ * [242d53596](https://github.com/argoproj/argo-workflows/commit/242d53596a5cf23b4470c2294204030ce11b01c4) fix: Fix response type for artifact service OpenAPI and SDKs. Fixes #7781 (#8332)
+ * [ab21eed52](https://github.com/argoproj/argo-workflows/commit/ab21eed527d15fa2c10272f740bff7c7963891c7) fix: upgrade io.swagger:swagger-annotations from 1.6.2 to 1.6.5 (#8335)
+ * [f708528fb](https://github.com/argoproj/argo-workflows/commit/f708528fbdfb9adecd8a66df866820eaab9a69ea) fix: upgrade react-monaco-editor from 0.36.0 to 0.47.0 (#8339)
+ * [3c35bd2f5](https://github.com/argoproj/argo-workflows/commit/3c35bd2f55dfdf641882cb5f9085b0b14f6d4d93) fix: upgrade cronstrue from 1.109.0 to 1.125.0 (#8338)
+ * [7ee17ddb7](https://github.com/argoproj/argo-workflows/commit/7ee17ddb7804e3f2beae87a8f532b1c0e6d1e520) fix: upgrade com.squareup.okhttp3:logging-interceptor from 4.9.1 to 4.9.3 (#8336)
+ * [68229e37e](https://github.com/argoproj/argo-workflows/commit/68229e37e295e3861cb7f6621ee3b9c7aabf8d67) added new-line to USERS.md (#8340)
+ * [94472c0ba](https://github.com/argoproj/argo-workflows/commit/94472c0bad4ed92ac06efb8c28563eba7b5bd1ab) chore(deps): bump cloud.google.com/go/storage from 1.20.0 to 1.22.0 (#8341)
+ * [aa9ff17d5](https://github.com/argoproj/argo-workflows/commit/aa9ff17d5feaa79aa26d9dc9cf9f67533f886b1c) fix: Remove path traversal CWE-23 (#8331)
+ * [14a9a1dc5](https://github.com/argoproj/argo-workflows/commit/14a9a1dc57f0d83231a19e76095ebdd4711f2594) fix: ui/package.json & ui/yarn.lock to reduce vulnerabilities (#8328)
+ * [58052c2b7](https://github.com/argoproj/argo-workflows/commit/58052c2b7b72daa928f8d427055be01cf896ff3e) fix: sdks/java/pom.xml to reduce vulnerabilities (#8327)
+ * [153540fdd](https://github.com/argoproj/argo-workflows/commit/153540fdd0e3b6f00050550abed67cae16299cbe) feat: Remove binaries from argoexec image. Fixes #7486 (#8292)
+ * [af8077423](https://github.com/argoproj/argo-workflows/commit/af807742343cb1a76926f6a1251466b9af988a47) feat: Always Show Workflow Parameters (#7809)
+ * [62e0a8ce4](https://github.com/argoproj/argo-workflows/commit/62e0a8ce4e74d2e19f3a9c0fb5e52bd58a6b944b) feat: Remove the PNS executor. Fixes #7804 (#8296)
+ * [0cdd2b40a](https://github.com/argoproj/argo-workflows/commit/0cdd2b40a8ee2d31476f8078eaedaa16c6827a76) fix: update docker version to address CVE-2022-24921 (#8312)
+ * [9c901456a](https://github.com/argoproj/argo-workflows/commit/9c901456a44501f11afc2bb1e856f0d0828fd13f) fix: Default value is ignored when loading params from configmap. Fixes #8262 (#8271)
+ * [9ab0e959a](https://github.com/argoproj/argo-workflows/commit/9ab0e959ac497433bcee2bb9c8d5710f87f1e3ea) fix: reduce number of workflows displayed in UI by default. Fixes #8297 (#8303)
+ * [13bc01362](https://github.com/argoproj/argo-workflows/commit/13bc013622c3b681bbd3c334dce0eea6870fcfde) fix: fix: git artifact will be checked out even if local file matches name of tracking branch (#8287)
+ * [65dc0882c](https://github.com/argoproj/argo-workflows/commit/65dc0882c9bb4496f1c4b2e0deb730e775724c82) feat: Fail on invalid config. (#8295)
+ * [5ac0e314d](https://github.com/argoproj/argo-workflows/commit/5ac0e314da80667e8b3b355c55cf9e1ab9b57b34) fix: `taskresults` owned by pod rather than workflow. (#8284)
+ * [996655f4f](https://github.com/argoproj/argo-workflows/commit/996655f4f3f03a30bcb82a1bb03f222fd100b8e0) fix: Snyk security recommendations (Golang). Fixes #8288
+ * [221d99827](https://github.com/argoproj/argo-workflows/commit/221d9982713ca30c060955bb35b48af3143c3754) fix: Snyk security recommendations (Node). Fixes #8288
+ * [b55dead05](https://github.com/argoproj/argo-workflows/commit/b55dead055139d1de33c464beed2b5ef596f5c8e) Revert "build: Enable governance bot. Fixes #8256 (#8259)" (#8294)
+ * [e50ec699c](https://github.com/argoproj/argo-workflows/commit/e50ec699cb33a7b84b0cb3c5b99396fe5365facd) chore(deps): bump google.golang.org/api from 0.73.0 to 0.74.0 (#8281)
+ * [954a3ee7e](https://github.com/argoproj/argo-workflows/commit/954a3ee7e7cc4f02074c07f7add971ca2be3291e) fix: install.yaml missing crb subject ns (#8280)
+ * [a3c326fdf](https://github.com/argoproj/argo-workflows/commit/a3c326fdf0d2133d5e78ef71854499f576e7e530) Remove hardcoded namespace in kustomize file #8250 (#8266)
+ * [b198b334d](https://github.com/argoproj/argo-workflows/commit/b198b334dfdb8e77d2ee51cd05b0716a29ab9169) fix: improve error message when the controller is set `templateReferencing: Secure` (#8277)
+ * [5598b8c7f](https://github.com/argoproj/argo-workflows/commit/5598b8c7fb5d17015e5c941e09953a74d8931436) feat: add resubmit and retry buttons for archived workflows. Fixes #7908 and #7911 (#8272)
+ * [6975607fa](https://github.com/argoproj/argo-workflows/commit/6975607fa33bf39e752b9cefcb8cb707a46bc6d4) chore(deps): bump github.com/prometheus/common from 0.32.1 to 0.33.0 (#8274)
+ * [78f01f2b9](https://github.com/argoproj/argo-workflows/commit/78f01f2b9f24a89db15a119885dfe8eb6420c70d) fix: patch workflow status to workflow (#8265)
+ * [f48998c07](https://github.com/argoproj/argo-workflows/commit/f48998c070c248688d996e5c8a4fec7601f5ab53) feat: Add a link in the UI for WorkflowTemplate. Fixes #4760 (#8208)
+ * [f02d4b72a](https://github.com/argoproj/argo-workflows/commit/f02d4b72adea9fbd23880c70871f92d66dc183c7) chore(deps): bump github.com/argoproj-labs/argo-dataflow (#8264)
+ * [48202fe99](https://github.com/argoproj/argo-workflows/commit/48202fe9976ff39731cf73c03578081a10146596) chore(deps): bump dependabot/fetch-metadata from 1.1.1 to 1.3.0 (#8263)
+ * [f00ec49d6](https://github.com/argoproj/argo-workflows/commit/f00ec49d695bdad108000abcdfd0f82f6af9ca6c) feat!: Refactor/simplify configuration code (#8235)
+ * [c1f72b662](https://github.com/argoproj/argo-workflows/commit/c1f72b66282012e712e28a715c08dddb1a556c16) feat: add archive retry command to argo CLI. Fixes #7907 (#8229)
+ * [7a07805b1](https://github.com/argoproj/argo-workflows/commit/7a07805b183d598847bb9323f1009d7e8bbc1ac6) fix: Update argo-server manifests to have read-only root file-system (#8210)
+ * [0d4b4dc34](https://github.com/argoproj/argo-workflows/commit/0d4b4dc34127a27f7ca6e5c41197f3aaacc79cb8) fix: Panic in Workflow Retry (#8243)
+ * [61f0decd8](https://github.com/argoproj/argo-workflows/commit/61f0decd873a6a422c3a7159d6023170637338ff) fix: Hook with wftemplateRef (#8242)
+ * [e232340cc](https://github.com/argoproj/argo-workflows/commit/e232340cc5191c5904afe87f03c80545bb10e430) fix: grep pattern (#8238)
+ * [1d373c41a](https://github.com/argoproj/argo-workflows/commit/1d373c41afbebcf8de55114582693bcbdc59b342) fix: submodule cloning via git. Fixes #7469 (#8225)
+ * [6ee1b03f9](https://github.com/argoproj/argo-workflows/commit/6ee1b03f9e83c1e129b45a6bc9292a99add6b36e) fix: do not panic when termination-log is not writeable (#8221)
+ * [cae38894f](https://github.com/argoproj/argo-workflows/commit/cae38894f96b0d33cde54ef9cdee3cda53692a8d) chore(deps): bump github.com/aliyun/aliyun-oss-go-sdk (#8232)
+ * [e0e45503e](https://github.com/argoproj/argo-workflows/commit/e0e45503e6704b27e3e9ef0ff4a98169f3b072fa) chore(deps): bump peter-evans/create-pull-request from 3 to 4 (#8216)
+ * [8c77e89fc](https://github.com/argoproj/argo-workflows/commit/8c77e89fc185ff640e1073692dfc7c043037440a) feat: add archive resubmit command to argo CLI. Fixes #7910 (#8166)
+ * [d8aa46731](https://github.com/argoproj/argo-workflows/commit/d8aa46731c74730ccca1a40187109a63a675618b) fix: Support `--parameters-file` where ARGO_SERVER specified. Fixes #8160 (#8213)
+ * [d33d391a4](https://github.com/argoproj/argo-workflows/commit/d33d391a4c06c136b6a0964a51c75850323684e6) feat: Add support to auto-mount service account tokens for plugins. (#8176)
+ * [8a1fbb86e](https://github.com/argoproj/argo-workflows/commit/8a1fbb86e7c83bf14990805166d04d5cb4479ea3) fix: removed deprecated k8sapi executor. Fixes #7802 (#8205)
+ * [4d5079822](https://github.com/argoproj/argo-workflows/commit/4d5079822da17fd644a99a9e4b27259864ae8c36) chore(deps): bump actions/cache from 2 to 3 (#8206)
+ * [12cd8bcaa](https://github.com/argoproj/argo-workflows/commit/12cd8bcaa75381b5a9fa65aff03ac13aec706375) fix: requeue not delete the considererd Task flag (#8194)
+ * [e2b288318](https://github.com/argoproj/argo-workflows/commit/e2b288318b15fa3e3cdc38c3dc7e66774920be8d) fix: Use `latest` image tag when version is `untagged`. Fixes #8188 (#8191)
+ * [6d6d23d81](https://github.com/argoproj/argo-workflows/commit/6d6d23d8110165331d924e97b01d5e26214c72db) fix: task worker requeue wrong task. Fixes #8139 (#8186)
+ * [41fd07aa4](https://github.com/argoproj/argo-workflows/commit/41fd07aa4f8462d70ad3c2c0481d5e09ae97b612) fix: Update `workflowtaskresult` code have own reconciliation loop. (#8135)
+ * [051c7b8d2](https://github.com/argoproj/argo-workflows/commit/051c7b8d2baf50b55e8076a1e09e7340551c04c1) fix: pkg/errors is no longer maintained (#7440)
+ * [fbb43b242](https://github.com/argoproj/argo-workflows/commit/fbb43b2429e45346221a119583aac11df4b5f880) fix: workflow.duration' is not available as a real time metric (#8181)
+ * [0e707cdf6](https://github.com/argoproj/argo-workflows/commit/0e707cdf69f891c7c7483e2244f5ea930d31b1c5) fix: Authentication for plugins. Fixes #8144 (#8147)
+ * [d4b1afe6f](https://github.com/argoproj/argo-workflows/commit/d4b1afe6f68afc3061a924186fa09556290ec3e1) feat: add retry API for archived workflows. Fixes #7906 (#7988)
+ * [e7008eada](https://github.com/argoproj/argo-workflows/commit/e7008eada7a885d80952b5184562a29508323c2a) fix: Correctly order emissary combined output. Fixes #8159 (#8175)
+ * [9101c4939](https://github.com/argoproj/argo-workflows/commit/9101c49396fe95d62ef3040cd4d330fde9f35554) fix: Add instance ID to `workflowtaskresults` (#8150)
+ * [2b5e4a1d2](https://github.com/argoproj/argo-workflows/commit/2b5e4a1d2df7877d9b7b7fbedd7136a125a39c8d) feat: Use pinned executor version. (#8165)
+ * [715f6ced6](https://github.com/argoproj/argo-workflows/commit/715f6ced6f42c0b7b5994bf8d16c561f48025fe8) fix: add /etc/mime.types mapping table (#8171)
+ * [6d6e08aa8](https://github.com/argoproj/argo-workflows/commit/6d6e08aa826c406a912387ac438ec20428c7623d) fix: Limit workflows to 128KB and return a friendly error message (#8169)
+ * [057c3346f](https://github.com/argoproj/argo-workflows/commit/057c3346f9f792cf10888320c4297b09f3c11e2e) feat: add TLS config option to HTTP template. Fixes #7390 (#7929)
+ * [013fa2578](https://github.com/argoproj/argo-workflows/commit/013fa2578bc5cace4de754daef04448b30faae32) chore(deps): bump github.com/stretchr/testify from 1.7.0 to 1.7.1 (#8163)
+ * [ad341c4af](https://github.com/argoproj/argo-workflows/commit/ad341c4af1645c191a5736d91d78a19acc7b2fa7) chore(deps): bump google.golang.org/api from 0.72.0 to 0.73.0 (#8162)
+ * [5efc9fc99](https://github.com/argoproj/argo-workflows/commit/5efc9fc995ac898672a575b514f8bfc83b220c4c) feat: add mysql options (#8157)
+ * [cda5737c3](https://github.com/argoproj/argo-workflows/commit/cda5737c37e3ab7c381869d7d820de71285f55a5) chore(deps): bump google.golang.org/api from 0.71.0 to 0.72.0 (#8156)
+ * [be2dd19a0](https://github.com/argoproj/argo-workflows/commit/be2dd19a0718577348823f1f68b82dbef8d95959) Update USERS.md (#8132)
+ * [af26ff7ed](https://github.com/argoproj/argo-workflows/commit/af26ff7ed54d4fe508edac34f82fe155f2d54a9d) fix: Remove need for `get pods` from Emissary (#8133)
+ * [537dd3be6](https://github.com/argoproj/argo-workflows/commit/537dd3be6bf93be37e06d768d9a610038eafb361) feat: Change pod clean-up to use informer. (#8136)
+ * [1d71fb3c4](https://github.com/argoproj/argo-workflows/commit/1d71fb3c4ebdb2891435ed12257743331ff34436) chore(deps): bump github.com/spf13/cobra from 1.3.0 to 1.4.0 (#8131)
+ * [972a4e989](https://github.com/argoproj/argo-workflows/commit/972a4e98987296a844a28dce31162d59732e6532) fix(plugins): UX improvements (#8122)
+ * [437b37647](https://github.com/argoproj/argo-workflows/commit/437b3764783b48a304034cc4291472c6e490689b) feat: add resubmit API for archived workflows. Fixes #7909 (#8079)
+ * [707cf8321](https://github.com/argoproj/argo-workflows/commit/707cf8321ccaf98b4596695fdbfdb04faf9a9487) update kustomize/kubectl installation (#8095)
+ * [48348247f](https://github.com/argoproj/argo-workflows/commit/48348247f0a0fd949871a9f982d7ee70c39509a1) chore(deps): bump google.golang.org/api from 0.70.0 to 0.71.0 (#8108)
+ * [765333dc9](https://github.com/argoproj/argo-workflows/commit/765333dc95575608fdf87328c7548c5e349b557d) fix(executor): Retry kubectl on internal transient error (#8092)
+ * [4d4890454](https://github.com/argoproj/argo-workflows/commit/4d4890454e454acbc86cef039bb6905c63f79e73) fix: Fix the TestStopBehavior flackiness (#8096)
+ * [6855f4c51](https://github.com/argoproj/argo-workflows/commit/6855f4c51b5bd667599f072ae5ddde48967006f1) fix: pod deleted due to delayed cleanup. Fixes #8022 (#8061)
+
+### Contributors
+
+ * Aatman
+ * Adam Eri
+ * Alex Collins
+ * BOOK
+ * Basanth Jenu H B
+ * Brian Loss
+ * Cash Williams
+ * Clemens Lange
+ * Dakota Lillie
+ * Dana Pieluszczak
+ * Dillen Padhiar
+ * Doğukan
+ * Ezequiel Muns
+ * Felix Seidel
+ * Fernando Luís da Silva
+ * Gaurav Gupta
+ * Grzegorz Bielski
+ * Hao Xin
+ * Iain Lane
+ * Isitha Subasinghe
+ * Iván Sánchez
+ * JasonZhu
+ * Jessie Teng
+ * Juan Luis Cano Rodríguez
+ * Julie Vogelman
+ * Kesavan
+ * LoricAndre
+ * Manik Sidana
+ * Marc Abramowitz
+ * Mark Shields
+ * Markus Lippert
+ * Michael Goodness
+ * Michael Weibel
+ * Mike Tougeron
+ * Ming Yu Shi
+ * Miroslav Boussarov
+ * Noam Gal
+ * Philippe Richard
+ * Rohan Kumar
+ * Sanjay Tiwari
+ * Saravanan Balasubramanian
+ * Shubham Nazare
+ * Snyk bot
+ * Soumya Ghosh Dastidar
+ * Stephanie Palis
+ * Swarnim Pratap Singh
+ * Takumi Sue
+ * Tianchu Zhao
+ * Timo Pagel
+ * Tristan Colgate-McFarlane
+ * Tuan
+ * Vignesh
+ * William Van Hevelingen
+ * Wu Jayway
+ * Yuan Tang
+ * alexdittmann
+ * dependabot[bot]
+ * hadesy
+ * ibuder
+ * kennytrytek
+ * lijie
+ * mihirpandya-greenops
+ * momom-i
+ * shirou
+ * smile-luobin
+ * tatsuya-ogawa
+ * tculp
+ * ybyang
+ * İnanç Dokurel
+
+## v3.3.9 (2022-08-09)
+
+ * [5db53aa0c](https://github.com/argoproj/argo-workflows/commit/5db53aa0ca54e51ca69053e1d3272e37064559d7) Revert "fix: Correct kill command. Fixes #8687 (#8908)"
+ * [b7b37d5aa](https://github.com/argoproj/argo-workflows/commit/b7b37d5aa2229c09365735fab165b4876c30aa4a) fix: Skip TestRunAsNonRootWithOutputParams
+ * [e4dca01f1](https://github.com/argoproj/argo-workflows/commit/e4dca01f1a76cefb7cae944ba0c4e54bc0aec427) fix: SignalsSuite test
+ * [151432f9b](https://github.com/argoproj/argo-workflows/commit/151432f9b754981959e149202d5f4b0617064595) fix: add containerRuntimeExecutor: emissary in ci
+ * [a3d6a58a7](https://github.com/argoproj/argo-workflows/commit/a3d6a58a71e1603077a4b39c4368d11847d500fb) feat: refactoring e2e test timeouts to support multiple environments. (#8925)
+ * [f9e2dd21c](https://github.com/argoproj/argo-workflows/commit/f9e2dd21cb09ac90b639be0f97f07da373240202) fix: lint
+ * [ef3fb421f](https://github.com/argoproj/argo-workflows/commit/ef3fb421f02f96195046ba327beca7b08753530b) fix: Correct kill command. Fixes #8687 (#8908)
+ * [e85c815a1](https://github.com/argoproj/argo-workflows/commit/e85c815a10fb59cb95cfdf6d2a171cea7c6aec47) fix: set NODE_OPTIONS to no-experimental-fetch to prevent yarn start error (#8802)
+ * [a19c94bb6](https://github.com/argoproj/argo-workflows/commit/a19c94bb6639540f309883ff0f41b14dd557324b) fix: Omitted task result should also be valid (#8776)
+ * [15f9d5227](https://github.com/argoproj/argo-workflows/commit/15f9d52270af4bca44553755d095d2dd8badfa14) fix: Fixed podName in killing daemon pods. Fixes #8692 (#8708)
+ * [6ec0ca088](https://github.com/argoproj/argo-workflows/commit/6ec0ca0883cf4e2222176ab413b3318017a30796) fix: open minio dashboard on different port in quick-start (#8407)
+ * [d874c1a87](https://github.com/argoproj/argo-workflows/commit/d874c1a87b65b300b2a4c93032bd2970d6f91d8f) fix: ui/package.json & ui/yarn.lock to reduce vulnerabilities (#8328)
+ * [481137c25](https://github.com/argoproj/argo-workflows/commit/481137c259b05c6a5b3c0e3adab1649c2b512364) fix: sdks/java/pom.xml to reduce vulnerabilities (#8327)
+ * [f54fb5c24](https://github.com/argoproj/argo-workflows/commit/f54fb5c24dd52a64da6d5aad5972a6554e386769) fix: grep pattern (#8238)
+ * [73334cae9](https://github.com/argoproj/argo-workflows/commit/73334cae9fbaef96b63889e16a3a2f78c725995e) fix: removed deprecated k8sapi executor. Fixes #7802 (#8205)
+ * [9c9efa67f](https://github.com/argoproj/argo-workflows/commit/9c9efa67f38620eeb08d1a9d2bb612bf14bf33de) fix: retryStrategy.Limit is now read properly for backoff strategy. Fixes #9170. (#9213)
+ * [69b5f1d79](https://github.com/argoproj/argo-workflows/commit/69b5f1d7945247a9e219b53f12fb8b3eec6e5e52) fix: Add missing Go module entries
+
+### Contributors
+
+ * Alex Collins
+ * Dillen Padhiar
+ * Grzegorz Bielski
+ * Julie Vogelman
+ * Kesavan
+ * Rohan Kumar
+ * Saravanan Balasubramanian
+ * Snyk bot
+ * Takumi Sue
+ * Yuan Tang
+
+## v3.3.8 (2022-06-23)
+
+ * [621b0d1a8](https://github.com/argoproj/argo-workflows/commit/621b0d1a8e09634666ebe403ee7b8fc29db1dc4e) fix: check for nil, and add logging to expose root cause of panic in Issue 8968 (#9010)
+ * [b7c218c0f](https://github.com/argoproj/argo-workflows/commit/b7c218c0f7b3ea0035dc44ccc9e8416f30429d16) feat: log workflow size before hydrating/dehydrating. Fixes #8976 (#8988)
+
+### Contributors
+
+ * Dillen Padhiar
+ * Julie Vogelman
+
+## v3.3.7 (2022-06-20)
+
+ * [479763c04](https://github.com/argoproj/argo-workflows/commit/479763c04036db98cd1e9a7a4fc0cc932affb8bf) fix: Skip TestExitHookWithExpression() completely (#8761)
+ * [a1ba42140](https://github.com/argoproj/argo-workflows/commit/a1ba42140154e757b024fe29c61fc7043c741cee) fix: Template in Lifecycle hook should be optional (#8735)
+ * [f10d6238d](https://github.com/argoproj/argo-workflows/commit/f10d6238d83b410a461d1860d0bb3c7ae4d74383) fix: Simplify return logic in executeTmplLifeCycleHook (#8736)
+ * [f2ace043b](https://github.com/argoproj/argo-workflows/commit/f2ace043bb7d050e8d539a781486c9f932bca931) fix: Exit lifecycle hook should respect expression. Fixes #8742 (#8744)
+ * [8c0b43569](https://github.com/argoproj/argo-workflows/commit/8c0b43569bb3e9c9ace21afcdd89d2cec862939c) fix: long code blocks overflow in ui. Fixes #8916 (#8947)
+ * [1d26628b8](https://github.com/argoproj/argo-workflows/commit/1d26628b8bc5f5a4d90d7a31b6f8185f280a4538) fix: sync cluster Workflow Template Informer before it's used (#8961)
+ * [4d9f8f7c8](https://github.com/argoproj/argo-workflows/commit/4d9f8f7c832ff888c11a41dad7a755ef594552c7) fix: Workflow Duration metric shouldn't increase after workflow complete (#8989)
+ * [72e0c6f00](https://github.com/argoproj/argo-workflows/commit/72e0c6f006120f901f02ea3a6bf8b3e7f639eb48) fix: add nil check for retryStrategy.Limit in deadline check. Fixes #8990 (#8991)
+
+### Contributors
+
+ * Dakota Lillie
+ * Dillen Padhiar
+ * Julie Vogelman
+ * Saravanan Balasubramanian
+ * Yuan Tang
+
+## v3.3.6 (2022-05-25)
+
+ * [2b428be80](https://github.com/argoproj/argo-workflows/commit/2b428be8001a9d5d232dbd52d7e902812107eb28) fix: Handle omitted nodes in DAG enhanced depends logic. Fixes #8654 (#8672)
+ * [7889af614](https://github.com/argoproj/argo-workflows/commit/7889af614c354f4716752942891cbca0a0889df0) fix: close http body. Fixes #8622 (#8624)
+ * [622c3d594](https://github.com/argoproj/argo-workflows/commit/622c3d59467a2d0449717ab866bd29bbd0469795) fix: Do not log container not found (#8509)
+ * [7091d8003](https://github.com/argoproj/argo-workflows/commit/7091d800360ad940ec605378324909823911d853) fix: pkg/errors is no longer maintained (#7440)
+ * [3f4c79fa5](https://github.com/argoproj/argo-workflows/commit/3f4c79fa5f54edcb50b6003178af85c70b5a8a1f) feat: remove size limit of 128kb for workflow templates. Fixes #8789 (#8796)
+
+### Contributors
+
+ * Alex Collins
+ * Dillen Padhiar
+ * Stephanie Palis
+ * Yuan Tang
+ * lijie
+
+## v3.3.5 (2022-05-03)
+
+ * [93cb050e3](https://github.com/argoproj/argo-workflows/commit/93cb050e3933638f0dbe2cdd69630e133b3ad52a) Revert "fix: Pod `OOMKilled` should fail workflow. Fixes #8456 (#8478)"
+ * [29f3ad844](https://github.com/argoproj/argo-workflows/commit/29f3ad8446ac5f07abda0f6844f3a31a7d50eb23) fix: Added artifact Content-Security-Policy (#8585)
+ * [a40d27cd7](https://github.com/argoproj/argo-workflows/commit/a40d27cd7535f6d36d5fb8d10cea0226b784fa65) fix: Support memoization on plugin node. Fixes #8553 (#8554)
+ * [f2b075c29](https://github.com/argoproj/argo-workflows/commit/f2b075c29ee97c95cfebb453b18c0ce5f16a5f04) fix: Pod `OOMKilled` should fail workflow. Fixes #8456 (#8478)
+ * [ba8c60022](https://github.com/argoproj/argo-workflows/commit/ba8c600224b7147d1832de1bea694fd376570ae9) fix: prevent backoff when retryStrategy.limit has been reached. Fixes #7588 (#8090)
+ * [c17f8c71d](https://github.com/argoproj/argo-workflows/commit/c17f8c71d40d4e34ef0a87dbc95eda005a57dc39) fix: update docker version to address CVE-2022-24921 (#8312)
+ * [9d0b7aa56](https://github.com/argoproj/argo-workflows/commit/9d0b7aa56cf065bf70c2cfb43f71ea9f92b5f964) fix: Default value is ignored when loading params from configmap. Fixes #8262 (#8271)
+ * [beab5b6ef](https://github.com/argoproj/argo-workflows/commit/beab5b6ef40a187e90ff23294bb1d9e2db9cb90a) fix: install.yaml missing crb subject ns (#8280)
+ * [b0d8be2ef](https://github.com/argoproj/argo-workflows/commit/b0d8be2ef3d3c1c96b15aeda572fcd1596fca9f1) fix: requeue not delete the considererd Task flag (#8194)
+
+### Contributors
+
+ * Alex Collins
+ * Cash Williams
+ * Rohan Kumar
+ * Soumya Ghosh Dastidar
+ * Wu Jayway
+ * Yuan Tang
+ * ybyang
+
+## v3.3.4 (2022-04-29)
+
+ * [02fb874f5](https://github.com/argoproj/argo-workflows/commit/02fb874f5deb3fc3e16f033c6f60b10e03504d00) feat: add capability to choose params in suspend node.Fixes #8425 (#8472)
+ * [32b1b3a3d](https://github.com/argoproj/argo-workflows/commit/32b1b3a3d505dea1d42fdeb0104444ca4f5e5795) feat: Add support to auto-mount service account tokens for plugins. (#8176)
+
+### Contributors
+
+ * Alex Collins
+ * Basanth Jenu H B
+
+## v3.3.3 (2022-04-25)
+
+ * [9c08aedc8](https://github.com/argoproj/argo-workflows/commit/9c08aedc880026161d394207acbac0f64db29a53) fix: Revert controller readiness changes. Fixes #8441 (#8454)
+ * [9854dd3fc](https://github.com/argoproj/argo-workflows/commit/9854dd3fccccd34bf3e4f110412dbd063f3316c2) fix: PodGC works with WorkflowTemplate. Fixes #8448 (#8452)
+
+### Contributors
+
+ * Alex Collins
+
+## v3.3.2 (2022-04-20)
+
+ * [35492a170](https://github.com/argoproj/argo-workflows/commit/35492a1700a0f279694cac874b6d9c07a08265d1) fix: Remove binaries from Windows image. Fixes #8417 (#8420)
+ * [bfc3b6cad](https://github.com/argoproj/argo-workflows/commit/bfc3b6cad02c0a38141201d7f77e14e3f0e637a4) fix: Skip TestRunAsNonRootWithOutputParams
+ * [1c34f9801](https://github.com/argoproj/argo-workflows/commit/1c34f9801b502d1566064726145ce5d68124b213) fix: go.sum
+ * [be35b54b0](https://github.com/argoproj/argo-workflows/commit/be35b54b00e44339f8dcb63d0411bc80f8983764) fix: create cache lint
+ * [017a31518](https://github.com/argoproj/argo-workflows/commit/017a3151837ac05cca1b2425a8395d547d86ed09) fix: create cache lint
+ * [20d601b3d](https://github.com/argoproj/argo-workflows/commit/20d601b3dd2ebef102a1a610e4dbef6924f842ff) fix: create cache lint
+ * [d8f28586f](https://github.com/argoproj/argo-workflows/commit/d8f28586f82b1bdb9e43446bd1792b3b01b2928a) fix: empty push
+ * [f41d94e91](https://github.com/argoproj/argo-workflows/commit/f41d94e91648961dfdc6e8536768012569dcd28f) fix: codegen
+ * [ce195dd52](https://github.com/argoproj/argo-workflows/commit/ce195dd521e195df4edd96bcd27fd950f23ff611) fix: Add auth for SDKs. Fixes #8230 (#8367)
+ * [00c960619](https://github.com/argoproj/argo-workflows/commit/00c9606197c30c138714b27ca5624dd0272c662d) fix: unittest
+ * [a0148c1b3](https://github.com/argoproj/argo-workflows/commit/a0148c1b32fef820a0cde5a5fed1975abedb7f82) chore(deps): bump github.com/minio/minio-go/v7 from 7.0.23 to 7.0.24 (#8397)
+ * [5207d287b](https://github.com/argoproj/argo-workflows/commit/5207d287b5657d9049edd1b67c2b681a13c40420) fix: codegen
+ * [e68e06c34](https://github.com/argoproj/argo-workflows/commit/e68e06c3453453d70a76c08b1a6cb00635b2d941) fix: Daemon step updated 'pod delete' while pod is running (#8399)
+ * [b9f8b3587](https://github.com/argoproj/argo-workflows/commit/b9f8b3587345eda47edfaebb7bc18ea1193d430b) fix: Add readiness check to controller. Fixes #8283 (#8285)
+ * [ed26dc0a0](https://github.com/argoproj/argo-workflows/commit/ed26dc0a09bc38ac2366124621ea98918b95b34a) fix: Absolute submodules in git artifacts. Fixes #8377 (#8381)
+ * [6f77c0af0](https://github.com/argoproj/argo-workflows/commit/6f77c0af03545611dfef0222bcf5f5f76f30f4d4) fix: Back-off UI retries. Fixes #5697 (#8333)
+ * [8d5c2f2a3](https://github.com/argoproj/argo-workflows/commit/8d5c2f2a39033972e1f389029f5c08290aa19ccd) fix: replace `podName` with `nodeId` in `_.primary.swagger.json` (#8385)
+ * [a327edd5a](https://github.com/argoproj/argo-workflows/commit/a327edd5a5c5e7aff4c64131f1a9c3d9e5d9d3eb) fix: removed error from artifact server 401 response. Fixes #8382 (#8383)
+ * [502cf6d88](https://github.com/argoproj/argo-workflows/commit/502cf6d882ac51fd80950c2f25f90e491b3f13f6) fix: Updating complated node status
+ * [0a0956864](https://github.com/argoproj/argo-workflows/commit/0a09568648199fcc5a8997e4f5eed55c40bfa974) fix: Fix response type for artifact service OpenAPI and SDKs. Fixes #7781 (#8332)
+ * [a3bce2aaf](https://github.com/argoproj/argo-workflows/commit/a3bce2aaf94b07a73c3a7a4c9205872be7dc360c) fix: patch workflow status to workflow (#8265)
+ * [c5174fbee](https://github.com/argoproj/argo-workflows/commit/c5174fbeec69aa0ea4dbad8b239b7e46c76e5873) fix: Update argo-server manifests to have read-only root file-system (#8210)
+ * [ba795e656](https://github.com/argoproj/argo-workflows/commit/ba795e6562902d66adadd15554f791bc85b779a8) fix: Panic in Workflow Retry (#8243)
+ * [c95de6bb2](https://github.com/argoproj/argo-workflows/commit/c95de6bb25b8d7294f8f48490fccb2ba95d96f9b) fix: Hook with wftemplateRef (#8242)
+ * [187c21fa7](https://github.com/argoproj/argo-workflows/commit/187c21fa7b45d87c55dd71f247e439f6c9b776b3) fix: submodule cloning via git. Fixes #7469 (#8225)
+ * [289d44b9b](https://github.com/argoproj/argo-workflows/commit/289d44b9b0234baf24f1384a0b6743ca10bfb060) fix: do not panic when termination-log is not writeable (#8221)
+ * [c10ba38a8](https://github.com/argoproj/argo-workflows/commit/c10ba38a86eb2ba4e70812b172a02bea901073f1) fix: Support `--parameters-file` where ARGO_SERVER specified. Fixes #8160 (#8213)
+ * [239781109](https://github.com/argoproj/argo-workflows/commit/239781109e62e405a6596e88c706df21cf152a6e) fix: Use `latest` image tag when version is `untagged`. Fixes #8188 (#8191)
+ * [7d00fa9d9](https://github.com/argoproj/argo-workflows/commit/7d00fa9d94427e5b30bea3c3bd7fecd673b95870) fix: task worker requeue wrong task. Fixes #8139 (#8186)
+ * [ed6907f1c](https://github.com/argoproj/argo-workflows/commit/ed6907f1cafb1cd53a877c1bdebbf0497ab53278) fix: Authentication for plugins. Fixes #8144 (#8147)
+ * [5ff9bc9aa](https://github.com/argoproj/argo-workflows/commit/5ff9bc9aaba80db7833d513321bb6ae2d305f1f9) fix: Correctly order emissary combined output. Fixes #8159 (#8175)
+ * [918c27311](https://github.com/argoproj/argo-workflows/commit/918c273113ed14349c8df87d727a5b8070d301a1) fix: Add instance ID to `workflowtaskresults` (#8150)
+ * [af0cfab8f](https://github.com/argoproj/argo-workflows/commit/af0cfab8f3bd5b62ebe967381fed0bccbd7c7ada) fix: Update `workflowtaskresult` code have own reconciliation loop. (#8135)
+ * [3a425ec5a](https://github.com/argoproj/argo-workflows/commit/3a425ec5a1010e9b9ac2aac054095e5e9d240693) fix: Authentication for plugins. Fixes #8144 (#8147)
+ * [cdd1633e4](https://github.com/argoproj/argo-workflows/commit/cdd1633e428d8596467e7673d0d6d5c50ade41af) fix: Correctly order emissary combined output. Fixes #8159 (#8175)
+ * [22c203fc4](https://github.com/argoproj/argo-workflows/commit/22c203fc44a005e4207fff5b8ce7f4854ed0bf78) fix: Add instance ID to `workflowtaskresults` (#8150)
+ * [79a9a5b6f](https://github.com/argoproj/argo-workflows/commit/79a9a5b6fcca7953e740a5e171d3bc7f08953854) fix: improve error message when the controller is set `templateReferencing: Secure` (#8277)
+ * [7e880216a](https://github.com/argoproj/argo-workflows/commit/7e880216a1bf384d15d836877d170bbeea19814d) fix: `taskresults` owned by pod rather than workflow. (#8284)
+ * [347583132](https://github.com/argoproj/argo-workflows/commit/347583132916fd2f87b3885381fe86281ea3ec33) fix: fix: git artifact will be checked out even if local file matches name of tracking branch (#8287)
+ * [aa460b9ad](https://github.com/argoproj/argo-workflows/commit/aa460b9adc40ed4854dc373d0d755e6d36b633f8) fix: reduce number of workflows displayed in UI by default. Fixes #8297 (#8303)
+
+### Contributors
+
+ * Aatman
+ * Alex Collins
+ * Dillen Padhiar
+ * Markus Lippert
+ * Michael Weibel
+ * Rohan Kumar
+ * Saravanan Balasubramanian
+ * Takumi Sue
+ * Tristan Colgate-McFarlane
+ * Wu Jayway
+ * dependabot[bot]
+
## v3.3.1 (2022-03-18)
* [76ff748d4](https://github.com/argoproj/argo-workflows/commit/76ff748d41c67e1a38ace1352ca3bab8d7ec8a39) feat: add TLS config option to HTTP template. Fixes #7390 (#7929)
@@ -444,6 +1172,22 @@
* zorulo
* 大雄
+## v3.2.11 (2022-05-03)
+
+ * [8faf269a7](https://github.com/argoproj/argo-workflows/commit/8faf269a795c0c9cc251152f9e4db4cd49234e52) fix: Remove binaries from Windows image. Fixes #8417 (#8420)
+
+### Contributors
+
+ * Markus Lippert
+
+## v3.2.10 (2022-05-03)
+
+ * [877216e21](https://github.com/argoproj/argo-workflows/commit/877216e2159f07bfb27aa1991aa249bc2e9a250c) fix: Added artifact Content-Security-Policy (#8585)
+
+### Contributors
+
+ * Alex Collins
+
## v3.2.9 (2022-03-02)
* [ce91d7b1d](https://github.com/argoproj/argo-workflows/commit/ce91d7b1d0115d5c73f6472dca03ddf5cc2c98f4) fix(controller): fix pod stuck in running when using podSpecPatch and emissary (#7407)
diff --git a/Dockerfile b/Dockerfile
index 76fa22175e07..7ad207b20e97 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,12 +1,6 @@
#syntax=docker/dockerfile:1.2
-ARG DOCKER_CHANNEL=stable
-ARG DOCKER_VERSION=20.10.12
-# NOTE: kubectl version should be one minor version less than https://storage.googleapis.com/kubernetes-release/release/stable.txt
-ARG KUBECTL_VERSION=1.22.3
-ARG JQ_VERSION=1.6
-
-FROM golang:1.17 as builder
+FROM golang:1.18 as builder
RUN apt-get update && apt-get --no-install-recommends install -y \
git \
@@ -15,9 +9,7 @@ RUN apt-get update && apt-get --no-install-recommends install -y \
apt-transport-https \
ca-certificates \
wget \
- gcc \
- libcap2-bin \
- zip && \
+ gcc && \
apt-get clean \
&& rm -rf \
/var/lib/apt/lists/* \
@@ -37,33 +29,6 @@ RUN go mod download
COPY . .
-####################################################################################################
-
-FROM alpine:3 as argoexec-base
-
-ARG DOCKER_CHANNEL
-ARG DOCKER_VERSION
-ARG KUBECTL_VERSION
-
-RUN apk --no-cache add curl procps git tar libcap jq
-
-COPY hack/arch.sh hack/os.sh /bin/
-
-RUN if [ $(arch.sh) = ppc64le ] || [ $(arch.sh) = s390x ]; then \
- curl -o docker.tgz https://download.docker.com/$(os.sh)/static/${DOCKER_CHANNEL}/$(uname -m)/docker-18.06.3-ce.tgz; \
- else \
- curl -o docker.tgz https://download.docker.com/$(os.sh)/static/${DOCKER_CHANNEL}/$(uname -m)/docker-${DOCKER_VERSION}.tgz; \
- fi && \
- tar --extract --file docker.tgz --strip-components 1 --directory /usr/local/bin/ && \
- rm docker.tgz
-RUN curl -o /usr/local/bin/kubectl https://storage.googleapis.com/kubernetes-release/release/v${KUBECTL_VERSION}/bin/$(os.sh)/$(arch.sh)/kubectl && \
- chmod +x /usr/local/bin/kubectl
-RUN rm /bin/arch.sh /bin/os.sh
-
-COPY hack/ssh_known_hosts /etc/ssh/
-COPY hack/nsswitch.conf /etc/
-
-
####################################################################################################
FROM node:16 as argo-ui
@@ -81,13 +46,21 @@ RUN NODE_OPTIONS="--max-old-space-size=2048" JOBS=max yarn --cwd ui build
FROM builder as argoexec-build
+COPY hack/arch.sh hack/os.sh /bin/
+
+# NOTE: kubectl version should be one minor version less than https://storage.googleapis.com/kubernetes-release/release/stable.txt
+RUN curl -o /usr/local/bin/kubectl https://storage.googleapis.com/kubernetes-release/release/v1.22.3/bin/$(os.sh)/$(arch.sh)/kubectl && \
+ chmod +x /usr/local/bin/kubectl
+
+RUN curl -o /usr/local/bin/jq https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64 && \
+ chmod +x /usr/local/bin/jq
+
# Tell git to forget about all of the files that were not included because of .dockerignore in order to ensure that
# the git state is "clean" even though said .dockerignore files are not present
RUN cat .dockerignore >> .gitignore
RUN git status --porcelain | cut -c4- | xargs git update-index --skip-worktree
RUN --mount=type=cache,target=/root/.cache/go-build make dist/argoexec
-RUN setcap CAP_SYS_PTRACE,CAP_SYS_CHROOT+ei dist/argoexec
####################################################################################################
@@ -119,27 +92,32 @@ RUN --mount=type=cache,target=/root/.cache/go-build make dist/argo
####################################################################################################
-FROM argoexec-base as argoexec
+FROM gcr.io/distroless/static as argoexec
-COPY --from=argoexec-build /go/src/github.com/argoproj/argo-workflows/dist/argoexec /usr/local/bin/
+COPY --from=argoexec-build /usr/local/bin/kubectl /bin/
+COPY --from=argoexec-build /usr/local/bin/jq /bin/
+COPY --from=argoexec-build /go/src/github.com/argoproj/argo-workflows/dist/argoexec /bin/
COPY --from=argoexec-build /etc/mime.types /etc/mime.types
+COPY hack/ssh_known_hosts /etc/ssh/
+COPY hack/nsswitch.conf /etc/
ENTRYPOINT [ "argoexec" ]
####################################################################################################
-FROM scratch as workflow-controller
+FROM gcr.io/distroless/static as workflow-controller
USER 8737
-COPY --chown=8737 --from=workflow-controller-build /usr/share/zoneinfo /usr/share/zoneinfo
+COPY hack/ssh_known_hosts /etc/ssh/
+COPY hack/nsswitch.conf /etc/
COPY --chown=8737 --from=workflow-controller-build /go/src/github.com/argoproj/argo-workflows/dist/workflow-controller /bin/
ENTRYPOINT [ "workflow-controller" ]
####################################################################################################
-FROM scratch as argocli
+FROM gcr.io/distroless/static as argocli
USER 8737
@@ -147,7 +125,6 @@ WORKDIR /home/argo
COPY hack/ssh_known_hosts /etc/ssh/
COPY hack/nsswitch.conf /etc/
-COPY --from=argocli-build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
COPY --from=argocli-build /go/src/github.com/argoproj/argo-workflows/dist/argo /bin/
ENTRYPOINT [ "argo" ]
diff --git a/Dockerfile.windows b/Dockerfile.windows
index 2840066c04c7..fcdc1aa06e55 100644
--- a/Dockerfile.windows
+++ b/Dockerfile.windows
@@ -8,7 +8,7 @@ ARG IMAGE_OS_VERSION=1809
# had issues with official golange image for windows so I'm using plain servercore
FROM mcr.microsoft.com/windows/servercore:${IMAGE_OS_VERSION} as builder
-ENV GOLANG_VERSION=1.17
+ENV GOLANG_VERSION=1.18
SHELL ["powershell", "-Command"]
# install chocolatey package manager
@@ -19,7 +19,7 @@ RUN iex ((new-object net.webclient).DownloadString('https://chocolatey.org/insta
# install golang, dep and other tools
RUN choco install golang --version=$env:GOLANG_VERSION ; \
- choco install make dep docker-cli git.portable 7zip.portable
+ choco install make dep git.portable 7zip.portable
####################################################################################################
# argoexec-base
@@ -36,13 +36,11 @@ RUN mkdir C:\app && \
curl -L -o C:\app\kubectl.exe "https://storage.googleapis.com/kubernetes-release/release/v%KUBECTL_VERSION%/bin/windows/amd64/kubectl.exe" && \
curl -L -o C:\app\jq.exe "https://github.com/stedolan/jq/releases/download/jq-%JQ_VERSION%/jq-win64.exe"
-COPY --from=builder C:/ProgramData/chocolatey/lib/docker-cli/tools/docker/docker.exe C:/app/docker.exe
-COPY --from=builder C:/tools/git C:/app/git
COPY --from=builder C:/ProgramData/chocolatey/lib/7zip.portable/tools/7z-extra/x64/7za.exe C:/app/7za.exe
# add binaries to path
USER Administrator
-RUN SETX /m path C:\app;C:\app\git\bin;%path%
+RUN SETX /m path C:\app;%path%
####################################################################################################
# Argo Build stage which performs the actual build of Argo binaries
diff --git a/Makefile b/Makefile
index 2eaecfc8da20..c032c0b414c5 100644
--- a/Makefile
+++ b/Makefile
@@ -17,6 +17,7 @@ SRC := $(GOPATH)/src/github.com/argoproj/argo-workflows
GREP_LOGS := ""
+
# docker image publishing options
IMAGE_NAMESPACE ?= quay.io/argoproj
DEV_IMAGE ?= $(shell [ `uname -s` = Darwin ] && echo true || echo false)
@@ -28,6 +29,12 @@ K3D_CLUSTER_NAME ?= k3s-default
KUBE_NAMESPACE ?= argo
MANAGED_NAMESPACE ?= $(KUBE_NAMESPACE)
+# Timeout for wait conditions
+E2E_WAIT_TIMEOUT ?= 1m
+
+E2E_PARALLEL ?= 20
+E2E_SUITE_TIMEOUT ?= 15m
+
VERSION := latest
DOCKER_PUSH := false
@@ -44,10 +51,15 @@ else
STATIC_FILES ?= $(shell [ $(DEV_BRANCH) = true ] && echo false || echo true)
endif
-UI ?= false
+# start the Controller
+CTRL ?= true
+# tail logs
+LOGS ?= $(CTRL)
+# start the UI
+UI ?= $(shell [ $(CTRL) = true ] && echo false || echo true)
# start the Argo Server
API ?= $(UI)
-GOTEST ?= go test -v
+GOTEST ?= go test -v -p 20
PROFILE ?= minimal
PLUGINS ?= $(shell [ $PROFILE = plugins ] && echo false || echo true)
# by keeping this short we speed up the tests
@@ -58,6 +70,8 @@ AUTH_MODE := hybrid
ifeq ($(PROFILE),sso)
AUTH_MODE := sso
endif
+# whether or not to start the Azurite test service for Azure Blob Storage
+AZURE := false
# Which mode to run in:
# * `local` run the workflow–controller and argo-server as single replicas on the local machine (default)
@@ -80,7 +94,7 @@ ALWAYS_OFFLOAD_NODE_STATUS := false
$(info GIT_COMMIT=$(GIT_COMMIT) GIT_BRANCH=$(GIT_BRANCH) GIT_TAG=$(GIT_TAG) GIT_TREE_STATE=$(GIT_TREE_STATE) RELEASE_TAG=$(RELEASE_TAG) DEV_BRANCH=$(DEV_BRANCH) VERSION=$(VERSION))
$(info KUBECTX=$(KUBECTX) DOCKER_DESKTOP=$(DOCKER_DESKTOP) K3D=$(K3D) DOCKER_PUSH=$(DOCKER_PUSH))
-$(info RUN_MODE=$(RUN_MODE) PROFILE=$(PROFILE) AUTH_MODE=$(AUTH_MODE) SECURE=$(SECURE) STATIC_FILES=$(STATIC_FILES) ALWAYS_OFFLOAD_NODE_STATUS=$(ALWAYS_OFFLOAD_NODE_STATUS) UPPERIO_DB_DEBUG=$(UPPERIO_DB_DEBUG) LOG_LEVEL=$(LOG_LEVEL) NAMESPACED=$(NAMESPACED))
+$(info RUN_MODE=$(RUN_MODE) PROFILE=$(PROFILE) AUTH_MODE=$(AUTH_MODE) SECURE=$(SECURE) STATIC_FILES=$(STATIC_FILES) ALWAYS_OFFLOAD_NODE_STATUS=$(ALWAYS_OFFLOAD_NODE_STATUS) UPPERIO_DB_DEBUG=$(UPPERIO_DB_DEBUG) LOG_LEVEL=$(LOG_LEVEL) NAMESPACED=$(NAMESPACED) AZURE=$(AZURE))
override LDFLAGS += \
-X github.com/argoproj/argo-workflows/v3.version=$(VERSION) \
@@ -100,7 +114,6 @@ endif
ARGOEXEC_PKGS := $(shell echo cmd/argoexec && go list -f '{{ join .Deps "\n" }}' ./cmd/argoexec/ | grep 'argoproj/argo-workflows/v3/' | cut -c 39-)
CLI_PKGS := $(shell echo cmd/argo && go list -f '{{ join .Deps "\n" }}' ./cmd/argo/ | grep 'argoproj/argo-workflows/v3/' | cut -c 39-)
CONTROLLER_PKGS := $(shell echo cmd/workflow-controller && go list -f '{{ join .Deps "\n" }}' ./cmd/workflow-controller/ | grep 'argoproj/argo-workflows/v3/' | cut -c 39-)
-E2E_EXECUTOR ?= emissary
TYPES := $(shell find pkg/apis/workflow/v1alpha1 -type f -name '*.go' -not -name openapi_generated.go -not -name '*generated*' -not -name '*test.go')
CRDS := $(shell find manifests/base/crds -type f -name 'argoproj.io_*.yaml')
SWAGGER_FILES := pkg/apiclient/_.primary.swagger.json \
@@ -110,12 +123,11 @@ SWAGGER_FILES := pkg/apiclient/_.primary.swagger.json \
pkg/apiclient/event/event.swagger.json \
pkg/apiclient/eventsource/eventsource.swagger.json \
pkg/apiclient/info/info.swagger.json \
- pkg/apiclient/pipeline/pipeline.swagger.json \
pkg/apiclient/sensor/sensor.swagger.json \
pkg/apiclient/workflow/workflow.swagger.json \
pkg/apiclient/workflowarchive/workflow-archive.swagger.json \
pkg/apiclient/workflowtemplate/workflow-template.swagger.json
-PROTO_BINARIES := $(GOPATH)/bin/protoc-gen-gogo $(GOPATH)/bin/protoc-gen-gogofast $(GOPATH)/bin/goimports $(GOPATH)/bin/protoc-gen-grpc-gateway $(GOPATH)/bin/protoc-gen-swagger
+PROTO_BINARIES := $(GOPATH)/bin/protoc-gen-gogo $(GOPATH)/bin/protoc-gen-gogofast $(GOPATH)/bin/goimports $(GOPATH)/bin/protoc-gen-grpc-gateway $(GOPATH)/bin/protoc-gen-swagger /usr/local/bin/clang-format
# protoc,my.proto
define protoc
@@ -136,12 +148,6 @@ define protoc
endef
-.PHONY: build
-build: clis images
-
-.PHONY: images
-images: argocli-image argoexec-image workflow-controller-image
-
# cli
.PHONY: cli
@@ -167,36 +173,37 @@ server/static/files.go:
endif
dist/argo-linux-amd64: GOARGS = GOOS=linux GOARCH=amd64
-dist/argo-darwin-amd64: GOARGS = GOOS=darwin GOARCH=amd64
-dist/argo-windows-amd64: GOARGS = GOOS=windows GOARCH=amd64
dist/argo-linux-arm64: GOARGS = GOOS=linux GOARCH=arm64
dist/argo-linux-ppc64le: GOARGS = GOOS=linux GOARCH=ppc64le
dist/argo-linux-s390x: GOARGS = GOOS=linux GOARCH=s390x
+dist/argo-darwin-amd64: GOARGS = GOOS=darwin GOARCH=amd64
+dist/argo-darwin-arm64: GOARGS = GOOS=darwin GOARCH=arm64
+dist/argo-windows-amd64: GOARGS = GOOS=windows GOARCH=amd64
dist/argo-windows-%.gz: dist/argo-windows-%
gzip --force --keep dist/argo-windows-$*.exe
dist/argo-windows-%: server/static/files.go $(CLI_PKGS) go.sum
- CGO_ENABLED=0 $(GOARGS) go build -v -ldflags '${LDFLAGS} -extldflags -static' -o $@.exe ./cmd/argo
+ CGO_ENABLED=0 $(GOARGS) go build -v -gcflags '${GCFLAGS}' -ldflags '${LDFLAGS} -extldflags -static' -o $@.exe ./cmd/argo
dist/argo-%.gz: dist/argo-%
gzip --force --keep dist/argo-$*
dist/argo-%: server/static/files.go $(CLI_PKGS) go.sum
- CGO_ENABLED=0 $(GOARGS) go build -v -ldflags '${LDFLAGS} -extldflags -static' -o $@ ./cmd/argo
+ CGO_ENABLED=0 $(GOARGS) go build -v -gcflags '${GCFLAGS}' -ldflags '${LDFLAGS} -extldflags -static' -o $@ ./cmd/argo
dist/argo: server/static/files.go $(CLI_PKGS) go.sum
ifeq ($(shell uname -s),Darwin)
# if local, then build fast: use CGO and dynamic-linking
- go build -v -ldflags '${LDFLAGS}' -o $@ ./cmd/argo
+ go build -v -gcflags '${GCFLAGS}' -ldflags '${LDFLAGS}' -o $@ ./cmd/argo
else
- CGO_ENABLED=0 go build -v -ldflags '${LDFLAGS} -extldflags -static' -o $@ ./cmd/argo
+ CGO_ENABLED=0 go build -gcflags '${GCFLAGS}' -v -ldflags '${LDFLAGS} -extldflags -static' -o $@ ./cmd/argo
endif
argocli-image:
.PHONY: clis
-clis: dist/argo-linux-amd64.gz dist/argo-linux-arm64.gz dist/argo-linux-ppc64le.gz dist/argo-linux-s390x.gz dist/argo-darwin-amd64.gz dist/argo-windows-amd64.gz
+clis: dist/argo-linux-amd64.gz dist/argo-linux-arm64.gz dist/argo-linux-ppc64le.gz dist/argo-linux-s390x.gz dist/argo-darwin-amd64.gz dist/argo-darwin-arm64.gz dist/argo-windows-amd64.gz
# controller
@@ -206,9 +213,9 @@ controller: dist/workflow-controller
dist/workflow-controller: $(CONTROLLER_PKGS) go.sum
ifeq ($(shell uname -s),Darwin)
# if local, then build fast: use CGO and dynamic-linking
- go build -v -ldflags '${LDFLAGS}' -o $@ ./cmd/workflow-controller
+ go build -gcflags '${GCFLAGS}' -v -ldflags '${LDFLAGS}' -o $@ ./cmd/workflow-controller
else
- CGO_ENABLED=0 go build -v -ldflags '${LDFLAGS} -extldflags -static' -o $@ ./cmd/workflow-controller
+ CGO_ENABLED=0 go build -gcflags '${GCFLAGS}' -v -ldflags '${LDFLAGS} -extldflags -static' -o $@ ./cmd/workflow-controller
endif
workflow-controller-image:
@@ -217,9 +224,9 @@ workflow-controller-image:
dist/argoexec: $(ARGOEXEC_PKGS) go.sum
ifeq ($(shell uname -s),Darwin)
- CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -v -ldflags '${LDFLAGS} -extldflags -static' -o $@ ./cmd/argoexec
+ CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -gcflags '${GCFLAGS}' -v -ldflags '${LDFLAGS} -extldflags -static' -o $@ ./cmd/argoexec
else
- CGO_ENABLED=0 go build -v -ldflags '${LDFLAGS} -extldflags -static' -o $@ ./cmd/argoexec
+ CGO_ENABLED=0 go build -v -gcflags '${GCFLAGS}' -ldflags '${LDFLAGS} -extldflags -static' -o $@ ./cmd/argoexec
endif
argoexec-image:
@@ -236,7 +243,8 @@ argoexec-image:
if [ $(DOCKER_PUSH) = true ] && [ $(IMAGE_NAMESPACE) != argoproj ] ; then docker push $(IMAGE_NAMESPACE)/$*:$(VERSION) ; fi
.PHONY: codegen
-codegen: types swagger docs manifests
+codegen: types swagger manifests $(GOPATH)/bin/mockery docs/fields.md docs/cli/argo.md
+ go generate ./...
make --directory sdks/java generate
make --directory sdks/python generate
@@ -258,7 +266,6 @@ swagger: \
pkg/apiclient/eventsource/eventsource.swagger.json \
pkg/apiclient/info/info.swagger.json \
pkg/apiclient/sensor/sensor.swagger.json \
- pkg/apiclient/pipeline/pipeline.swagger.json \
pkg/apiclient/workflow/workflow.swagger.json \
pkg/apiclient/workflowarchive/workflow-archive.swagger.json \
pkg/apiclient/workflowtemplate/workflow-template.swagger.json \
@@ -267,19 +274,9 @@ swagger: \
api/openapi-spec/swagger.json \
api/jsonschema/schema.json
-.PHONY: docs
-docs: \
- docs/fields.md \
- docs/cli/argo.md \
- $(GOPATH)/bin/mockery
- rm -Rf vendor v3
- go mod tidy
- # `go generate ./...` takes around 10s, so we only run on specific packages.
- go generate ./persist/sqldb ./pkg/plugins ./pkg/apiclient/workflow ./server/auth ./server/auth/sso ./workflow/executor
- ./hack/check-env-doc.sh
$(GOPATH)/bin/mockery:
- go install github.com/vektra/mockery/v2@v2.9.4
+ go install github.com/vektra/mockery/v2@v2.10.0
$(GOPATH)/bin/controller-gen:
go install sigs.k8s.io/controller-tools/cmd/controller-gen@v0.4.1
$(GOPATH)/bin/go-to-protobuf:
@@ -301,9 +298,19 @@ $(GOPATH)/bin/swagger:
$(GOPATH)/bin/goimports:
go install golang.org/x/tools/cmd/goimports@v0.1.7
+/usr/local/bin/clang-format:
+ifeq ($(shell uname),Darwin)
+ brew install clang-format
+else
+ sudo apt-get install clang-format
+endif
+
pkg/apis/workflow/v1alpha1/generated.proto: $(GOPATH)/bin/go-to-protobuf $(PROTO_BINARIES) $(TYPES) $(GOPATH)/src/github.com/gogo/protobuf
# These files are generated on a v3/ folder by the tool. Link them to the root folder
[ -e ./v3 ] || ln -s . v3
+ # Format proto files. Formatting changes generated code, so we do it here, rather that at lint time.
+ # Why clang-format? Google uses it.
+ find pkg/apiclient -name '*.proto'|xargs clang-format -i
$(GOPATH)/bin/go-to-protobuf \
--go-header-file=./hack/custom-boilerplate.go.txt \
--packages=github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1 \
@@ -333,9 +340,6 @@ pkg/apiclient/info/info.swagger.json: $(PROTO_BINARIES) $(TYPES) pkg/apiclient/i
pkg/apiclient/sensor/sensor.swagger.json: $(PROTO_BINARIES) $(TYPES) pkg/apiclient/sensor/sensor.proto
$(call protoc,pkg/apiclient/sensor/sensor.proto)
-pkg/apiclient/pipeline/pipeline.swagger.json: $(PROTO_BINARIES) $(TYPES) pkg/apiclient/pipeline/pipeline.proto
- $(call protoc,pkg/apiclient/pipeline/pipeline.proto)
-
pkg/apiclient/workflow/workflow.swagger.json: $(PROTO_BINARIES) $(TYPES) pkg/apiclient/workflow/workflow.proto
$(call protoc,pkg/apiclient/workflow/workflow.proto)
@@ -349,6 +353,7 @@ pkg/apiclient/workflowtemplate/workflow-template.swagger.json: $(PROTO_BINARIES)
manifests/base/crds/full/argoproj.io_workflows.yaml: $(GOPATH)/bin/controller-gen $(TYPES) ./hack/crdgen.sh ./hack/crds.go
./hack/crdgen.sh
+.PHONY: manifests
manifests: \
manifests/install.yaml \
manifests/namespace-install.yaml \
@@ -361,14 +366,23 @@ manifests: \
dist/manifests/quick-start-mysql.yaml \
dist/manifests/quick-start-postgres.yaml
+.PHONY: manifests/install.yaml
manifests/install.yaml: /dev/null
kubectl kustomize --load-restrictor=LoadRestrictionsNone manifests/cluster-install | ./hack/auto-gen-msg.sh > manifests/install.yaml
+
+.PHONY: manifests/namespace-install.yaml
manifests/namespace-install.yaml: /dev/null
kubectl kustomize --load-restrictor=LoadRestrictionsNone manifests/namespace-install | ./hack/auto-gen-msg.sh > manifests/namespace-install.yaml
+
+.PHONY: manifests/quick-start-minimal.yaml
manifests/quick-start-minimal.yaml: /dev/null
kubectl kustomize --load-restrictor=LoadRestrictionsNone manifests/quick-start/minimal | ./hack/auto-gen-msg.sh > manifests/quick-start-minimal.yaml
+
+.PHONY: manifests/quick-start-mysql.yaml
manifests/quick-start-mysql.yaml: /dev/null
kubectl kustomize --load-restrictor=LoadRestrictionsNone manifests/quick-start/mysql | ./hack/auto-gen-msg.sh > manifests/quick-start-mysql.yaml
+
+.PHONY: manifests/quick-start-postgres.yaml
manifests/quick-start-postgres.yaml: /dev/null
kubectl kustomize --load-restrictor=LoadRestrictionsNone manifests/quick-start/postgres | ./hack/auto-gen-msg.sh > manifests/quick-start-postgres.yaml
@@ -379,33 +393,37 @@ dist/manifests/%: manifests/%
# lint/test/etc
$(GOPATH)/bin/golangci-lint:
- curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b `go env GOPATH`/bin v1.42.0
+ curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b `go env GOPATH`/bin v1.47.1
.PHONY: lint
lint: server/static/files.go $(GOPATH)/bin/golangci-lint
rm -Rf v3 vendor
+ # If you're using `woc.wf.Spec` or `woc.execWf.Status` your code probably won't work with WorkflowTemplate.
+ # * Change `woc.wf.Spec` to `woc.execWf.Spec`.
+ # * Change `woc.execWf.Status` to `woc.wf.Status`.
+ @awk '(/woc.wf.Spec/ || /woc.execWf.Status/) && !/not-woc-misuse/ {print FILENAME ":" FNR "\t" $0 ; exit 1}' $(shell find workflow/controller -type f -name '*.go' -not -name '*test*')
# Tidy Go modules
go mod tidy
# Lint Go files
$(GOPATH)/bin/golangci-lint run --fix --verbose
+ # Lint the UI
+ if [ -e ui/node_modules ]; then yarn --cwd ui lint ; fi
# for local we have a faster target that prints to stdout, does not use json, and can cache because it has no coverage
.PHONY: test
-test: server/static/files.go dist/argosay
+test: server/static/files.go
go build ./...
env KUBECONFIG=/dev/null $(GOTEST) ./...
+ # marker file, based on it's modification time, we know how long ago this target was run
+ @mkdir -p dist
+ touch dist/test
.PHONY: install
install: githooks
kubectl get ns $(KUBE_NAMESPACE) || kubectl create ns $(KUBE_NAMESPACE)
kubectl config set-context --current --namespace=$(KUBE_NAMESPACE)
- @echo "installing PROFILE=$(PROFILE), E2E_EXECUTOR=$(E2E_EXECUTOR)"
+ @echo "installing PROFILE=$(PROFILE)"
kubectl kustomize --load-restrictor=LoadRestrictionsNone test/e2e/manifests/$(PROFILE) | sed 's|quay.io/argoproj/|$(IMAGE_NAMESPACE)/|' | sed 's/namespace: argo/namespace: $(KUBE_NAMESPACE)/' | kubectl -n $(KUBE_NAMESPACE) apply --prune -l app.kubernetes.io/part-of=argo -f -
-ifneq ($(E2E_EXECUTOR),emissary)
- # only change the executor from the default it we need to
- kubectl patch cm/workflow-controller-configmap -p "{\"data\": {\"containerRuntimeExecutor\": \"$(E2E_EXECUTOR)\"}}"
- kubectl apply -f manifests/quick-start/base/executor/$(E2E_EXECUTOR)
-endif
ifeq ($(PROFILE),stress)
kubectl -n $(KUBE_NAMESPACE) apply -f test/stress/massive-workflow.yaml
endif
@@ -413,6 +431,9 @@ ifeq ($(RUN_MODE),kubernetes)
kubectl -n $(KUBE_NAMESPACE) scale deploy/workflow-controller --replicas 1
kubectl -n $(KUBE_NAMESPACE) scale deploy/argo-server --replicas 1
endif
+ifeq ($(AZURE),true)
+ kubectl -n $(KUBE_NAMESPACE) apply -f test/e2e/azure/deploy-azurite.yaml
+endif
.PHONY: argosay
argosay:
@@ -428,17 +449,8 @@ dist/argosay:
mkdir -p dist
cp test/e2e/images/argosay/v2/argosay dist/
-.PHONY: pull-images
-pull-images:
- docker pull golang:1.17
- docker pull debian:10.7-slim
- docker pull mysql:8
- docker pull argoproj/argosay:v1
- docker pull argoproj/argosay:v2
- docker pull python:alpine3.6
-
$(GOPATH)/bin/goreman:
- go install github.com/mattn/goreman@v0.3.7
+ go install github.com/mattn/goreman@v0.3.11
.PHONY: start
ifeq ($(RUN_MODE),local)
@@ -450,7 +462,13 @@ endif
else
start: install
endif
- @echo "starting STATIC_FILES=$(STATIC_FILES) (DEV_BRANCH=$(DEV_BRANCH), GIT_BRANCH=$(GIT_BRANCH)), AUTH_MODE=$(AUTH_MODE), RUN_MODE=$(RUN_MODE), MANAGED_NAMESPACE=$(MANAGED_NAMESPACE)"
+ @echo "starting STATIC_FILES=$(STATIC_FILES) (DEV_BRANCH=$(DEV_BRANCH), GIT_BRANCH=$(GIT_BRANCH)), AUTH_MODE=$(AUTH_MODE), RUN_MODE=$(RUN_MODE), MANAGED_NAMESPACE=$(MANAGED_NAMESPACE), AZURE=$(AZURE)"
+ifneq ($(CTRL),true)
+ @echo "⚠️️ not starting controller. If you want to test the controller, use 'make start CTRL=true' to start it"
+endif
+ifneq ($(LOGS),true)
+ @echo "⚠️️ not starting logs. If you want to tail logs, use 'make start LOGS=true' to start it"
+endif
ifneq ($(API),true)
@echo "⚠️️ not starting API. If you want to test the API, use 'make start API=true' to start it"
endif
@@ -462,22 +480,25 @@ ifneq ($(PLUGINS),true)
endif
# Check dex, minio, postgres and mysql are in hosts file
ifeq ($(AUTH_MODE),sso)
- grep '127.0.0.1[[:blank:]]*dex' /etc/hosts
+ grep '127.0.0.1.*dex' /etc/hosts
endif
- grep '127.0.0.1[[:blank:]]*minio' /etc/hosts
- grep '127.0.0.1[[:blank:]]*postgres' /etc/hosts
- grep '127.0.0.1[[:blank:]]*mysql' /etc/hosts
+ifeq ($(AZURE),true)
+ grep '127.0.0.1.*azurite' /etc/hosts
+endif
+ grep '127.0.0.1.*minio' /etc/hosts
+ grep '127.0.0.1.*postgres' /etc/hosts
+ grep '127.0.0.1.*mysql' /etc/hosts
./hack/port-forward.sh
ifeq ($(RUN_MODE),local)
- env DEFAULT_REQUEUE_TIME=$(DEFAULT_REQUEUE_TIME) SECURE=$(SECURE) ALWAYS_OFFLOAD_NODE_STATUS=$(ALWAYS_OFFLOAD_NODE_STATUS) LOG_LEVEL=$(LOG_LEVEL) UPPERIO_DB_DEBUG=$(UPPERIO_DB_DEBUG) IMAGE_NAMESPACE=$(IMAGE_NAMESPACE) VERSION=$(VERSION) AUTH_MODE=$(AUTH_MODE) NAMESPACED=$(NAMESPACED) NAMESPACE=$(KUBE_NAMESPACE) MANAGED_NAMESPACE=$(MANAGED_NAMESPACE) UI=$(UI) API=$(API) PLUGINS=$(PLUGINS) $(GOPATH)/bin/goreman -set-ports=false -logtime=false start $(shell if [ -z $GREP_LOGS ]; then echo; else echo "| grep \"$(GREP_LOGS)\""; fi)
+ env DEFAULT_REQUEUE_TIME=$(DEFAULT_REQUEUE_TIME) SECURE=$(SECURE) ALWAYS_OFFLOAD_NODE_STATUS=$(ALWAYS_OFFLOAD_NODE_STATUS) LOG_LEVEL=$(LOG_LEVEL) UPPERIO_DB_DEBUG=$(UPPERIO_DB_DEBUG) IMAGE_NAMESPACE=$(IMAGE_NAMESPACE) VERSION=$(VERSION) AUTH_MODE=$(AUTH_MODE) NAMESPACED=$(NAMESPACED) NAMESPACE=$(KUBE_NAMESPACE) MANAGED_NAMESPACE=$(MANAGED_NAMESPACE) CTRL=$(CTRL) LOGS=$(LOGS) UI=$(UI) API=$(API) PLUGINS=$(PLUGINS) $(GOPATH)/bin/goreman -set-ports=false -logtime=false start $(shell if [ -z $GREP_LOGS ]; then echo; else echo "| grep \"$(GREP_LOGS)\""; fi)
endif
$(GOPATH)/bin/stern:
- ./hack/recurl.sh $(GOPATH)/bin/stern https://github.com/wercker/stern/releases/download/1.11.0/stern_`uname -s|tr '[:upper:]' '[:lower:]'`_amd64
+ go install github.com/stern/stern@latest
.PHONY: logs
logs: $(GOPATH)/bin/stern
- stern -l workflows.argoproj.io/workflow 2>&1
+ $(GOPATH)/bin/stern -l workflows.argoproj.io/workflow 2>&1
.PHONY: wait
wait:
@@ -499,12 +520,20 @@ mysql-cli:
test-cli: ./dist/argo
test-%:
- go test -v -timeout 15m -count 1 --tags $* -parallel 10 ./test/e2e
+ go test -failfast -v -timeout $(E2E_SUITE_TIMEOUT) -count 1 --tags $* -parallel $(E2E_PARALLEL) ./test/e2e
.PHONY: test-examples
test-examples:
./hack/test-examples.sh
+.PHONY: test-%-sdk
+test-%-sdk:
+ make --directory sdks/$* install test -B
+
+Test%:
+ go test -failfast -v -timeout $(E2E_SUITE_TIMEOUT) -count 1 --tags api,cli,cron,executor,examples,corefunctional,functional,plugins -parallel $(E2E_PARALLEL) ./test/e2e -run='.*/$*'
+
+
# clean
.PHONY: clean
@@ -583,24 +612,74 @@ docs/fields.md: api/openapi-spec/swagger.json $(shell find examples -type f) hac
docs/cli/argo.md: $(CLI_PKGS) go.sum server/static/files.go hack/cli/main.go
go run ./hack/cli
-# pre-push
+# docs
+
+/usr/local/bin/mdspell:
+ npm i -g markdown-spellcheck
+
+.PHONY: docs-spellcheck
+docs-spellcheck: /usr/local/bin/mdspell
+ # check docs for spelling mistakes
+ mdspell --ignore-numbers --ignore-acronyms --en-us --no-suggestions --report $(shell find docs -name '*.md' -not -name upgrading.md -not -name fields.md -not -name upgrading.md -not -name executor_swagger.md -not -path '*/cli/*')
+
+/usr/local/bin/markdown-link-check:
+ npm i -g markdown-link-check
+
+.PHONY: docs-linkcheck
+docs-linkcheck: /usr/local/bin/markdown-link-check
+ # check docs for broken links
+ markdown-link-check -q -c .mlc_config.json $(shell find docs -name '*.md' -not -name fields.md -not -name executor_swagger.md)
+
+/usr/local/bin/markdownlint:
+ npm i -g markdownlint-cli
-.git/hooks/commit-msg: hack/git/hooks/commit-msg
- cp -v hack/git/hooks/commit-msg .git/hooks/commit-msg
+.PHONY: docs-lint
+docs-lint: /usr/local/bin/markdownlint
+ # lint docs
+ markdownlint docs --fix --ignore docs/fields.md --ignore docs/executor_swagger.md --ignore docs/cli --ignore docs/walk-through/the-structure-of-workflow-specs.md
+
+/usr/local/bin/mkdocs:
+ python -m pip install mkdocs==1.2.4 mkdocs_material==8.1.9 mkdocs-spellcheck==0.2.1
+
+.PHONY: docs
+docs: /usr/local/bin/mkdocs \
+ docs-spellcheck \
+ docs-lint \
+ docs-linkcheck
+ # check environment-variables.md contains all variables mentioned in the code
+ ./hack/check-env-doc.sh
+ # check all docs are listed in mkdocs.yml
+ ./hack/check-mkdocs.sh
+ # build the docs
+ mkdocs build
+ # fix the fields.md document
+ go run -tags fields ./hack parseexamples
+ # tell the user the fastest way to edit docs
+ @echo "ℹ️ If you want to preview you docs, open site/index.html. If you want to edit them with hot-reload, run 'make docs-serve' to start mkdocs on port 8000"
+
+.PHONY: docs-serve
+docs-serve: docs
+ mkdocs serve
+
+# pre-commit checks
+
+.git/hooks/%: hack/git/hooks/%
+ @mkdir -p .git/hooks
+ cp hack/git/hooks/$* .git/hooks/$*
.PHONY: githooks
-githooks: .git/hooks/commit-msg
+githooks: .git/hooks/pre-commit .git/hooks/commit-msg
.PHONY: pre-commit
-pre-commit: githooks codegen lint
+pre-commit: codegen lint docs
+ # marker file, based on it's modification time, we know how long ago this target was run
+ touch dist/pre-commit
+
+# release
release-notes: /dev/null
version=$(VERSION) envsubst < hack/release-notes.md > release-notes
-.PHONY: parse-examples
-parse-examples:
- go run -tags fields ./hack parseexamples
-
.PHONY: checksums
checksums:
for f in ./dist/argo-*.gz; do openssl dgst -sha256 "$$f" | awk ' { print $$2 }' > "$$f".sha256 ; done
diff --git a/OWNERS b/OWNERS
index 43af4181e280..1b1c0889d153 100644
--- a/OWNERS
+++ b/OWNERS
@@ -1,7 +1,9 @@
owners:
- alexec
+- sarabala1979
reviewers:
+- tczhao
- xianlubird
approvers:
@@ -10,6 +12,5 @@ approvers:
- dtaniwaki
- edlee2121
- jessesuen
-- sarabala1979
- simster7
- terrytangyuan
diff --git a/Procfile b/Procfile
index 7f136708af75..bdf3714af7bd 100644
--- a/Procfile
+++ b/Procfile
@@ -1,4 +1,4 @@
-controller: ./hack/free-port.sh 9090 && ARGO_EXECUTOR_PLUGINS=${PLUGINS} ARGO_REMOVE_PVC_PROTECTION_FINALIZER=true ARGO_PROGRESS_PATCH_TICK_DURATION=7s DEFAULT_REQUEUE_TIME=${DEFAULT_REQUEUE_TIME} LEADER_ELECTION_IDENTITY=local ALWAYS_OFFLOAD_NODE_STATUS=${ALWAYS_OFFLOAD_NODE_STATUS} OFFLOAD_NODE_STATUS_TTL=30s WORKFLOW_GC_PERIOD=30s UPPERIO_DB_DEBUG=${UPPERIO_DB_DEBUG} ARCHIVED_WORKFLOW_GC_PERIOD=30s ./dist/workflow-controller --executor-image ${IMAGE_NAMESPACE}/argoexec:${VERSION} --namespaced=${NAMESPACED} --namespace ${NAMESPACE} --managed-namespace=${MANAGED_NAMESPACE} --loglevel ${LOG_LEVEL}
-argo-server: ./hack/free-port.sh 2746 && [ "$API" = "true" ] && UPPERIO_DB_DEBUG=${UPPERIO_DB_DEBUG} ./dist/argo --loglevel ${LOG_LEVEL} server --namespaced=${NAMESPACED} --namespace ${NAMESPACE} --auth-mode ${AUTH_MODE} --secure=$SECURE --x-frame-options=SAMEORIGIN
-ui: ./hack/free-port.sh 8080 && [ "$UI" = "true" ] && yarn --cwd ui install && yarn --cwd ui start
-logs: make logs
\ No newline at end of file
+controller: [ "$CTRL" = "true" ] && ./hack/free-port.sh 9090 && ARGO_EXECUTOR_PLUGINS=${PLUGINS} ARGO_REMOVE_PVC_PROTECTION_FINALIZER=true ARGO_PROGRESS_PATCH_TICK_DURATION=7s DEFAULT_REQUEUE_TIME=${DEFAULT_REQUEUE_TIME} LEADER_ELECTION_IDENTITY=local ALWAYS_OFFLOAD_NODE_STATUS=${ALWAYS_OFFLOAD_NODE_STATUS} OFFLOAD_NODE_STATUS_TTL=30s WORKFLOW_GC_PERIOD=30s UPPERIO_DB_DEBUG=${UPPERIO_DB_DEBUG} ARCHIVED_WORKFLOW_GC_PERIOD=30s ./dist/workflow-controller --executor-image ${IMAGE_NAMESPACE}/argoexec:${VERSION} --namespaced=${NAMESPACED} --namespace ${NAMESPACE} --managed-namespace=${MANAGED_NAMESPACE} --loglevel ${LOG_LEVEL}
+argo-server: [ "$API" = "true" ] &&./hack/free-port.sh 2746 && UPPERIO_DB_DEBUG=${UPPERIO_DB_DEBUG} ./dist/argo --loglevel ${LOG_LEVEL} server --namespaced=${NAMESPACED} --namespace ${NAMESPACE} --auth-mode ${AUTH_MODE} --secure=$SECURE --x-frame-options=SAMEORIGIN
+ui: [ "$UI" = "true" ] && ./hack/free-port.sh 8080 && yarn --cwd ui install && yarn --cwd ui start
+logs: [ "$LOGS" = "true" ] && make logs
\ No newline at end of file
diff --git a/README.md b/README.md
index 58fb44bd29e3..3034c42f9c04 100644
--- a/README.md
+++ b/README.md
@@ -1,7 +1,4 @@
-[](https://argoproj.github.io/community/join-slack)
-[](https://github.com/argoproj/argo-workflows/actions?query=event%3Apush+branch%3Amaster)
-[](https://bestpractices.coreinfrastructure.org/projects/3830)
-[](https://twitter.com/argoproj)
+[](https://argoproj.github.io/community/join-slack) [](https://github.com/argoproj/argo-workflows/actions?query=event%3Apush+branch%3Amaster) [](https://bestpractices.coreinfrastructure.org/projects/3830) [](https://twitter.com/argoproj)
## What is Argo Workflows?
@@ -43,30 +40,25 @@ Argo is a [Cloud Native Computing Foundation (CNCF)](https://cncf.io/) hosted pr

+## Documentation
+
+[View the docs](https://argoproj.github.io/argo-workflows/)
+
## Ecosystem
-
-
-[Argo Events](https://github.com/argoproj/argo-events)
-|
-[Argo Workflows Catalog](https://argoproj-labs.github.io/argo-workflows-catalog/)
-|
-[Couler](https://github.com/couler-proj/couler)
-|
-[Katib](https://github.com/kubeflow/katib)
-|
-[Kedro](https://kedro.readthedocs.io/en/stable/)
-|
-[Kubeflow Pipelines](https://github.com/kubeflow/pipelines)
-|
-[Onepanel](https://www.onepanel.ai/)
-|
-[Ploomber](https://github.com/ploomber/ploomber)
-|
-[Seldon](https://github.com/SeldonIO/seldon-core)
-|
-[SQLFlow](https://github.com/sql-machine-learning/sqlflow)
+Just some of the projects that use or rely on Argo Workflows:
+* [Argo Events](https://github.com/argoproj/argo-events)
+* [Couler](https://github.com/couler-proj/couler)
+* [Katib](https://github.com/kubeflow/katib)
+* [Kedro](https://kedro.readthedocs.io/en/stable/)
+* [Kubeflow Pipelines](https://github.com/kubeflow/pipelines)
+* [Netflix Metaflow](https://metaflow.org)
+* [Onepanel](https://www.onepanel.ai/)
+* [Ploomber](https://github.com/ploomber/ploomber)
+* [Seldon](https://github.com/SeldonIO/seldon-core)
+* [SQLFlow](https://github.com/sql-machine-learning/sqlflow)
+* [Orchest](https://github.com/orchest/orchest/)
## Client Libraries
@@ -74,11 +66,15 @@ Check out our [Java, Golang and Python clients](docs/client-libraries.md).
## Quickstart
+The following commands install Argo Workflows as well as some commmonly used components:
+
```bash
-kubectl create namespace argo
-kubectl apply -n argo -f https://raw.githubusercontent.com/argoproj/argo-workflows/master/manifests/install.yaml
+kubectl create ns argo
+kubectl apply -n argo -f https://raw.githubusercontent.com/argoproj/argo-workflows/master/manifests/quick-start-postgres.yaml
```
+> **These manifests are intended to help you get started quickly. They contain hard-coded passwords that are publicly available and are not suitable in production.**
+
## Who uses Argo Workflows?
[Official Argo Workflows user list](USERS.md)
@@ -92,7 +88,7 @@ kubectl apply -n argo -f https://raw.githubusercontent.com/argoproj/argo-workflo
## Features
* UI to visualize and manage Workflows
-* Artifact support (S3, Artifactory, Alibaba Cloud OSS, HTTP, Git, GCS, raw)
+* Artifact support (S3, Artifactory, Alibaba Cloud OSS, Azure Blob Storage, HTTP, Git, GCS, raw)
* Workflow templating to store commonly used Workflows in the cluster
* Archiving Workflows after executing for later access
* Scheduled workflows using cron
diff --git a/USERS.md b/USERS.md
index d15e43876f57..3a053067d915 100644
--- a/USERS.md
+++ b/USERS.md
@@ -1,6 +1,6 @@
## Argo Workflows User Community Surveys & Feedback
-Please find [here](https://github.com/argoproj/argoproj/blob/master/community/user_surveys/ArgoWorkflows2020SurveySummary.pdf) Argo Workflows user community 2020 survey results.
+Please find [here](https://github.com/argoproj/argoproj/blob/master/community/user_surveys/ArgoWorkflows2020SurveySummary.pdf) Argo Workflows user community 2020 survey results and the 2021 results [here](https://blog.argoproj.io/argo-workflows-2021-survey-results-d6fa890030ee?gi=857daaa1faa9).
## Who uses Argo Workflows?
@@ -24,9 +24,12 @@ Currently, the following organizations are **officially** using Argo Workflows:
1. [Ant Group](https://www.antgroup.com/)
1. [AppDirect](https://www.appdirect.com/)
1. [Arabesque](https://www.arabesque.com/)
+1. [Argonaut](https://www.argonaut.dev/)
+1. [ArthurAI](https://arthur.ai/)
1. [Astraea](https://astraea.earth/)
1. [BasisAI](https://basis-ai.com/)
1. [BEI.RE](https://www.bei.re/)
+1. [bimspot](https://bimspot.io)
1. [BioBox Analytics](https://biobox.io)
1. [BlackRock](https://www.blackrock.com/)
1. [Bloomberg](https://www.bloomberg.com/)
@@ -43,13 +46,15 @@ Currently, the following organizations are **officially** using Argo Workflows:
1. [Cisco](https://www.cisco.com/)
1. [CloudSeeds](https://www.cloudseeds.de/)
1. [Codec](https://www.codec.ai/)
+1. [Codefresh](https://www.codefresh.io/)
1. [Commodus Tech](https://www.commodus.tech)
1. [Concierge Render](https://www.conciergerender.com)
1. [Cookpad](https://cookpad.com/)
1. [CoreFiling](https://www.corefiling.com/)
-1. [Cratejoy](https://www.cratejoy.com/)
1. [CoreWeave Cloud](https://www.coreweave.com)
+1. [Cratejoy](https://www.cratejoy.com/)
1. [Cruise](https://getcruise.com/)
+1. [CVision AI](https://www.cvisionai.com)
1. [CyberAgent](https://www.cyberagent.co.jp/en/)
1. [Cyrus Biotechnology](https://cyrusbio.com/)
1. [Data4Risk](https://www.data4risk.com/)
@@ -57,12 +62,14 @@ Currently, the following organizations are **officially** using Argo Workflows:
1. [DataRobot](https://www.datarobot.com/)
1. [DataStax](https://www.datastax.com/)
1. [DDEV](https://www.ddev.com/)
-1. [Devtron Labs](https://github.com/devtron-labs/devtron)
1. [DevSamurai](https://www.devsamurai.com/)
-1. [Dyno Therapeutics](https://dynotx.com)
+1. [Devtron Labs](https://github.com/devtron-labs/devtron)
1. [DLR](https://www.dlr.de/eoc/)
+1. [Dyno Therapeutics](https://dynotx.com)
1. [EBSCO Information Services](https://www.ebsco.com/)
+1. [Enso Finance](https://enso.finance/)
1. [Equinor](https://www.equinor.com/)
+1. [Elastic](https://www.elastic.co/)
1. [Fairwinds](https://fairwinds.com/)
1. [FOLIO](http://corp.folio-sec.com/)
1. [FreeWheel](https://freewheel.com/)
@@ -77,8 +84,10 @@ Currently, the following organizations are **officially** using Argo Workflows:
1. [Google](https://www.google.com/intl/en/about/our-company/)
1. [Graviti](https://www.graviti.com)
1. [Greenhouse](https://greenhouse.io)
+1. [H2O.ai](https://h2o.ai/)
1. [Habx](https://www.habx.com/)
1. [Helio](https://helio.exchange)
+1. [Hemisphere Digital](https://hemisphere.digital)
1. [HOVER](https://hover.to)
1. [HSBC](https://hsbc.com)
1. [IBM](https://ibm.com)
@@ -115,19 +124,24 @@ Currently, the following organizations are **officially** using Argo Workflows:
1. [One Concern](https://oneconcern.com/)
1. [Onepanel](https://docs.onepanel.ai)
1. [Oracle](https://www.oracle.com/)
+1. [Orchest](https://www.orchest.io/)
1. [OVH](https://www.ovh.com/)
-1. [Peak AI](https://www.peak.ai/)
+1. [PathAI](https://www.pathai.com)
1. [PDOK](https://www.pdok.nl/)
+1. [Peak AI](https://www.peak.ai/)
1. [Pipekit](https://pipeit.io)
+1. [Pismo](https://pismo.io/)
1. [Polarpoint.io](https://polarpoint.io)
1. [Pollination](https://pollination.cloud)
1. [Preferred Networks](https://www.preferred-networks.jp/en/)
1. [Promaton](https://www.promaton.com/)
1. [Prudential](https://www.prudential.com.sg/)
1. [Quantibio](http://quantibio.com/us/en/)
+1. [QuantumBlack](https://quantumblack.com/)
1. [Raccoon Digital Marketing](https://raccoon.ag/)
1. [Ramboll Shair](https://ramboll-shair.com/)
1. [Ravelin](https://www.ravelin.com/)
+1. [Reco](https://reco.ai)
1. [Red Hat](https://www.redhat.com/en)
1. [Reserved AI](https://reserved.ai/)
1. [Riskified](https://www.riskified.com)
@@ -152,21 +166,21 @@ Currently, the following organizations are **officially** using Argo Workflows:
1. [Threekit](https://www.threekit.com/)
1. [Tiger Analytics](https://www.tigeranalytics.com/)
1. [Tradeshift](https://tradeshift.com/)
+1. [Trendyol](https://trendyol.com)
1. [Tulip](https://tulip.com/)
1. [Ubie](https://ubie.life/)
1. [UFirstGroup](https://www.ufirstgroup.com)
1. [Vispera](https://www.vispera.co)
1. [VMware](https://www.vmware.com/)
+1. [Voyager](https://investvoyager.com/)
1. [Wavefront](https://www.wavefront.com/)
1. [Wellcome Trust](https://wellcome.ac.uk/)
1. [WooliesX](https://wooliesx.com.au/)
1. [Woolworths Group](https://www.woolworthsgroup.com.au/)
1. [Workiva](https://www.workiva.com/)
-1. [Voyager](https://investvoyager.com/)
+1. [Xueqiu](https://www.xueqiu.com/)
1. [Yubo](https://www.yubo.live/)
1. [Zhihu](https://www.zhihu.com/)
-1. [Xueqiu](https://www.xueqiu.com/)
-1. [bimspot](https://bimspot.io)
### Projects Using Argo
@@ -175,5 +189,8 @@ In addition, the following projects are **officially** using Argo Workflows:
1. [Couler](https://github.com/couler-proj/couler)
1. [Hera Workflows](https://github.com/argoproj-labs/hera-workflows)
1. [Kubeflow](https://www.kubeflow.org/)
+1. [Metaflow](https://www.metaflow.org)
1. [Onepanel](https://github.com/onepanelio/onepanel)
1. [SQLFlow](https://github.com/sql-machine-learning/sqlflow)
+1. [BisQue](https://github.com/UCSB-VRL/bisqueUCSB)
+1. [Tator](https://www.tator.io)
diff --git a/api/jsonschema/schema.json b/api/jsonschema/schema.json
index 6b23d91bafd0..1c1955b433b3 100644
--- a/api/jsonschema/schema.json
+++ b/api/jsonschema/schema.json
@@ -70,1167 +70,6 @@
},
"type": "object"
},
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.AWSCredentials": {
- "properties": {
- "accessKeyId": {
- "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector"
- },
- "secretAccessKey": {
- "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector"
- },
- "sessionToken": {
- "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector"
- }
- },
- "type": "object"
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.AWSEndpoint": {
- "properties": {
- "url": {
- "type": "string"
- }
- },
- "type": "object"
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.AbstractStep": {
- "properties": {
- "resources": {
- "$ref": "#/definitions/io.k8s.api.core.v1.ResourceRequirements",
- "title": "+kubebuilder:default={limits: {\"cpu\": \"500m\", \"memory\": \"256Mi\"}, requests: {\"cpu\": \"100m\", \"memory\": \"64Mi\"}}"
- }
- },
- "type": "object"
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.AbstractVolumeSource": {
- "properties": {
- "awsElasticBlockStore": {
- "$ref": "#/definitions/io.k8s.api.core.v1.AWSElasticBlockStoreVolumeSource",
- "title": "AWSElasticBlockStore represents an AWS Disk resource that is attached to a\nkubelet's host machine and then exposed to the pod.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore\n+optional"
- },
- "azureDisk": {
- "$ref": "#/definitions/io.k8s.api.core.v1.AzureDiskVolumeSource",
- "title": "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.\n+optional"
- },
- "azureFile": {
- "$ref": "#/definitions/io.k8s.api.core.v1.AzureFileVolumeSource",
- "title": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.\n+optional"
- },
- "cephfs": {
- "$ref": "#/definitions/io.k8s.api.core.v1.CephFSVolumeSource",
- "title": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime\n+optional"
- },
- "cinder": {
- "$ref": "#/definitions/io.k8s.api.core.v1.CinderVolumeSource",
- "title": "Cinder represents a cinder volume attached and mounted on kubelets host machine.\nMore info: https://examples.io.k8s.mysql-cinder-pd/README.md\n+optional"
- },
- "configMap": {
- "$ref": "#/definitions/io.k8s.api.core.v1.ConfigMapVolumeSource",
- "title": "ConfigMap represents a configMap that should populate this volume\n+optional"
- },
- "csi": {
- "$ref": "#/definitions/io.k8s.api.core.v1.CSIVolumeSource",
- "title": "CSI (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature).\n+optional"
- },
- "downwardAPI": {
- "$ref": "#/definitions/io.k8s.api.core.v1.DownwardAPIVolumeSource",
- "title": "DownwardAPI represents downward API about the pod that should populate this volume\n+optional"
- },
- "emptyDir": {
- "$ref": "#/definitions/io.k8s.api.core.v1.EmptyDirVolumeSource",
- "title": "EmptyDir represents a temporary directory that shares a pod's lifetime.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir\n+optional"
- },
- "ephemeral": {
- "$ref": "#/definitions/io.k8s.api.core.v1.EphemeralVolumeSource",
- "description": "Ephemeral represents a volume that is handled by a cluster storage driver.\nThe volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts,\nand deleted when the pod is removed.\n\nUse this if:\na) the volume is only needed while the pod runs,\nb) features of normal volumes like restoring from snapshot or capacity\n tracking are needed,\nc) the storage driver is specified through a storage class, and\nd) the storage driver supports dynamic volume provisioning through\n a PersistentVolumeClaim (see EphemeralVolumeSource for more\n information on the connection between this volume type\n and PersistentVolumeClaim).\n\nUse PersistentVolumeClaim or one of the vendor-specific\nAPIs for volumes that persist for longer than the lifecycle\nof an individual pod.\n\nUse CSI for light-weight local ephemeral volumes if the CSI driver is meant to\nbe used that way - see the documentation of the driver for\nmore information.\n\nA pod can use both types of ephemeral volumes and\npersistent volumes at the same time.\n\n+optional"
- },
- "fc": {
- "$ref": "#/definitions/io.k8s.api.core.v1.FCVolumeSource",
- "title": "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.\n+optional"
- },
- "flexVolume": {
- "$ref": "#/definitions/io.k8s.api.core.v1.FlexVolumeSource",
- "title": "FlexVolume represents a generic volume resource that is\nprovisioned/attached using an exec based plugin.\n+optional"
- },
- "flocker": {
- "$ref": "#/definitions/io.k8s.api.core.v1.FlockerVolumeSource",
- "title": "Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running\n+optional"
- },
- "gcePersistentDisk": {
- "$ref": "#/definitions/io.k8s.api.core.v1.GCEPersistentDiskVolumeSource",
- "title": "GCEPersistentDisk represents a GCE Disk resource that is attached to a\nkubelet's host machine and then exposed to the pod.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk\n+optional"
- },
- "gitRepo": {
- "$ref": "#/definitions/io.k8s.api.core.v1.GitRepoVolumeSource",
- "title": "GitRepo represents a git repository at a particular revision.\nDEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an\nEmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir\ninto the Pod's container.\n+optional"
- },
- "glusterfs": {
- "$ref": "#/definitions/io.k8s.api.core.v1.GlusterfsVolumeSource",
- "title": "Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.\nMore info: https://examples.io.k8s.volumes/glusterfs/README.md\n+optional"
- },
- "hostPath": {
- "$ref": "#/definitions/io.k8s.api.core.v1.HostPathVolumeSource",
- "title": "HostPath represents a pre-existing file or directory on the host\nmachine that is directly exposed to the container. This is generally\nused for system agents or other privileged things that are allowed\nto see the host machine. Most containers will NOT need this.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath\n---\nTODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not\nmount host directories as read/write.\n+optional"
- },
- "iscsi": {
- "$ref": "#/definitions/io.k8s.api.core.v1.ISCSIVolumeSource",
- "title": "ISCSI represents an ISCSI Disk resource that is attached to a\nkubelet's host machine and then exposed to the pod.\nMore info: https://examples.io.k8s.volumes/iscsi/README.md\n+optional"
- },
- "nfs": {
- "$ref": "#/definitions/io.k8s.api.core.v1.NFSVolumeSource",
- "title": "NFS represents an NFS mount on the host that shares a pod's lifetime\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#nfs\n+optional"
- },
- "persistentVolumeClaim": {
- "$ref": "#/definitions/io.k8s.api.core.v1.PersistentVolumeClaimVolumeSource",
- "title": "PersistentVolumeClaimVolumeSource represents a reference to a\nPersistentVolumeClaim in the same namespace.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims\n+optional"
- },
- "photonPersistentDisk": {
- "$ref": "#/definitions/io.k8s.api.core.v1.PhotonPersistentDiskVolumeSource",
- "title": "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine"
- },
- "portworxVolume": {
- "$ref": "#/definitions/io.k8s.api.core.v1.PortworxVolumeSource",
- "title": "PortworxVolume represents a portworx volume attached and mounted on kubelets host machine\n+optional"
- },
- "projected": {
- "$ref": "#/definitions/io.k8s.api.core.v1.ProjectedVolumeSource",
- "title": "Items for all in one resources secrets, configmaps, and downward API"
- },
- "quobyte": {
- "$ref": "#/definitions/io.k8s.api.core.v1.QuobyteVolumeSource",
- "title": "Quobyte represents a Quobyte mount on the host that shares a pod's lifetime\n+optional"
- },
- "rbd": {
- "$ref": "#/definitions/io.k8s.api.core.v1.RBDVolumeSource",
- "title": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime.\nMore info: https://examples.io.k8s.volumes/rbd/README.md\n+optional"
- },
- "scaleIO": {
- "$ref": "#/definitions/io.k8s.api.core.v1.ScaleIOVolumeSource",
- "title": "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.\n+optional"
- },
- "secret": {
- "$ref": "#/definitions/io.k8s.api.core.v1.SecretVolumeSource",
- "title": "Secret represents a secret that should populate this volume.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#secret\n+optional"
- },
- "storageos": {
- "$ref": "#/definitions/io.k8s.api.core.v1.StorageOSVolumeSource",
- "title": "StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.\n+optional"
- },
- "vsphereVolume": {
- "$ref": "#/definitions/io.k8s.api.core.v1.VsphereVirtualDiskVolumeSource",
- "title": "VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine\n+optional"
- }
- },
- "type": "object"
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Backoff": {
- "properties": {
- "FactorPercentage": {
- "title": "+kubebuilder:default=200",
- "type": "integer"
- },
- "cap": {
- "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Duration",
- "title": "+kubebuilder:default=\"0ms\""
- },
- "duration": {
- "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Duration",
- "title": "+kubebuilder:default=\"100ms\""
- },
- "jitterPercentage": {
- "title": "the amount of jitter per step, typically 10-20%, \u003e100% is valid, but strange\n+kubebuilder:default=10",
- "type": "integer"
- },
- "steps": {
- "format": "uint64",
- "title": "the number of backoff steps, zero means no retries\n+kubebuilder:default=20",
- "type": "string"
- }
- },
- "type": "object"
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Cat": {
- "properties": {
- "abstractStep": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.AbstractStep"
- }
- },
- "type": "object"
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Code": {
- "properties": {
- "image": {
- "description": "Image is used in preference to Runtime.",
- "type": "string"
- },
- "runtime": {
- "type": "string"
- },
- "source": {
- "type": "string"
- }
- },
- "type": "object"
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Container": {
- "properties": {
- "args": {
- "items": {
- "type": "string"
- },
- "type": "array"
- },
- "command": {
- "items": {
- "type": "string"
- },
- "type": "array"
- },
- "env": {
- "items": {
- "$ref": "#/definitions/io.k8s.api.core.v1.EnvVar"
- },
- "type": "array"
- },
- "image": {
- "type": "string"
- },
- "in": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Interface"
- },
- "resources": {
- "$ref": "#/definitions/io.k8s.api.core.v1.ResourceRequirements"
- },
- "volumeMounts": {
- "items": {
- "$ref": "#/definitions/io.k8s.api.core.v1.VolumeMount"
- },
- "type": "array"
- }
- },
- "type": "object"
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Cron": {
- "properties": {
- "layout": {
- "title": "+kubebuilder:default=\"2006-01-02T15:04:05Z07:00\"",
- "type": "string"
- },
- "schedule": {
- "type": "string"
- }
- },
- "type": "object"
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.DBDataSource": {
- "properties": {
- "value": {
- "type": "string"
- },
- "valueFrom": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.DBDataSourceFrom"
- }
- },
- "type": "object"
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.DBDataSourceFrom": {
- "properties": {
- "secretKeyRef": {
- "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector"
- }
- },
- "type": "object"
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.DBSink": {
- "properties": {
- "actions": {
- "items": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.SQLAction"
- },
- "type": "array"
- },
- "database": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Database"
- }
- },
- "type": "object"
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.DBSource": {
- "properties": {
- "commitInterval": {
- "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Duration",
- "title": "+kubebuilder:default=\"5s\""
- },
- "database": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Database"
- },
- "initSchema": {
- "title": "+kubebuilder:default=true",
- "type": "boolean"
- },
- "offsetColumn": {
- "type": "string"
- },
- "pollInterval": {
- "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Duration",
- "title": "+kubebuilder:default=\"1s\""
- },
- "query": {
- "type": "string"
- }
- },
- "type": "object"
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Database": {
- "properties": {
- "dataSource": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.DBDataSource"
- },
- "driver": {
- "title": "+kubebuilder:default=default",
- "type": "string"
- }
- },
- "type": "object"
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Dedupe": {
- "properties": {
- "abstractStep": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.AbstractStep"
- },
- "maxSize": {
- "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity",
- "title": "MaxSize is the maximum number of entries to keep in the in-memory database used to store recent UIDs.\nLarger number mean bigger windows of time for dedupe, but greater memory usage.\n+kubebuilder:default=\"1M\""
- },
- "uid": {
- "title": "+kubebuilder:default=\"sha1(msg)\"",
- "type": "string"
- }
- },
- "type": "object"
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Expand": {
- "properties": {
- "abstractStep": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.AbstractStep"
- }
- },
- "type": "object"
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Filter": {
- "properties": {
- "abstractStep": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.AbstractStep"
- },
- "expression": {
- "type": "string"
- }
- },
- "type": "object"
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Flatten": {
- "properties": {
- "abstractStep": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.AbstractStep"
- }
- },
- "type": "object"
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Git": {
- "properties": {
- "branch": {
- "title": "+kubebuilder:default=main",
- "type": "string"
- },
- "command": {
- "items": {
- "type": "string"
- },
- "type": "array"
- },
- "env": {
- "items": {
- "$ref": "#/definitions/io.k8s.api.core.v1.EnvVar"
- },
- "type": "array"
- },
- "image": {
- "type": "string"
- },
- "insecureIgnoreHostKey": {
- "title": "InsecureIgnoreHostKey is the bool value for ignoring check for host key",
- "type": "boolean"
- },
- "passwordSecret": {
- "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector",
- "title": "PasswordSecret is the secret selector to the repository password"
- },
- "path": {
- "description": "+kubebuilder:default=.",
- "type": "string"
- },
- "sshPrivateKeySecret": {
- "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector",
- "title": "SSHPrivateKeySecret is the secret selector to the repository ssh private key"
- },
- "url": {
- "type": "string"
- },
- "usernameSecret": {
- "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector",
- "title": "UsernameSecret is the secret selector to the repository username"
- }
- },
- "type": "object"
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Group": {
- "properties": {
- "endOfGroup": {
- "type": "string"
- },
- "format": {
- "type": "string"
- },
- "key": {
- "type": "string"
- },
- "storage": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Storage"
- }
- },
- "type": "object"
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.HTTP": {
- "type": "object"
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.HTTPHeader": {
- "properties": {
- "name": {
- "type": "string"
- },
- "value": {
- "type": "string"
- },
- "valueFrom": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.HTTPHeaderSource"
- }
- },
- "type": "object"
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.HTTPHeaderSource": {
- "properties": {
- "secretKeyRef": {
- "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector"
- }
- },
- "type": "object"
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.HTTPSink": {
- "properties": {
- "headers": {
- "items": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.HTTPHeader"
- },
- "type": "array"
- },
- "insecureSkipVerify": {
- "type": "boolean"
- },
- "url": {
- "type": "string"
- }
- },
- "type": "object"
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.HTTPSource": {
- "properties": {
- "serviceName": {
- "type": "string"
- }
- },
- "type": "object"
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Interface": {
- "properties": {
- "fifo": {
- "type": "boolean"
- },
- "http": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.HTTP"
- }
- },
- "type": "object"
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.JetStream": {
- "properties": {
- "auth": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.NATSAuth"
- },
- "name": {
- "title": "+kubebuilder:default=default",
- "type": "string"
- },
- "natsUrl": {
- "type": "string"
- },
- "subject": {
- "type": "string"
- }
- },
- "type": "object"
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.JetStreamSink": {
- "properties": {
- "jetstream": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.JetStream"
- }
- },
- "type": "object"
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.JetStreamSource": {
- "properties": {
- "jetstream": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.JetStream"
- }
- },
- "type": "object"
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Kafka": {
- "properties": {
- "kafkaConfig": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.KafkaConfig"
- },
- "name": {
- "title": "+kubebuilder:default=default",
- "type": "string"
- },
- "topic": {
- "type": "string"
- }
- },
- "type": "object"
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.KafkaConfig": {
- "properties": {
- "brokers": {
- "items": {
- "type": "string"
- },
- "type": "array"
- },
- "maxMessageBytes": {
- "type": "integer"
- },
- "net": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.KafkaNET"
- }
- },
- "type": "object"
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.KafkaNET": {
- "properties": {
- "sasl": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.SASL"
- },
- "tls": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.TLS"
- }
- },
- "type": "object"
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.KafkaSink": {
- "properties": {
- "acks": {
- "$ref": "#/definitions/io.k8s.apimachinery.pkg.util.intstr.IntOrString",
- "title": "+kubebuilder:default=\"all\""
- },
- "async": {
- "type": "boolean"
- },
- "batchSize": {
- "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity",
- "title": "+kubebuilder:default=\"100Ki\""
- },
- "compressionType": {
- "title": "+kubebuilder:default=\"lz4\"",
- "type": "string"
- },
- "enableIdempotence": {
- "title": "+kubebuilder:default=true",
- "type": "boolean"
- },
- "kafka": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Kafka"
- },
- "linger": {
- "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Duration"
- },
- "maxInflight": {
- "title": "The maximum number of messages to be in-flight when async.\n+kubebuilder:default=20",
- "type": "integer"
- },
- "messageTimeout": {
- "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Duration",
- "title": "+kubebuilder:default=\"30s\""
- }
- },
- "type": "object"
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.KafkaSource": {
- "properties": {
- "fetchMin": {
- "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity",
- "title": "+kubebuilder:default=\"100Ki\""
- },
- "fetchWaitMax": {
- "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Duration",
- "title": "+kubebuilder:default=\"500ms\""
- },
- "groupId": {
- "description": "GroupID is the consumer group ID. If not specified, a unique deterministic group ID is generated.",
- "type": "string"
- },
- "kafka": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Kafka"
- },
- "startOffset": {
- "title": "+kubebuilder:default=Last",
- "type": "string"
- }
- },
- "type": "object"
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Log": {
- "properties": {
- "truncate": {
- "format": "uint64",
- "type": "string"
- }
- },
- "type": "object"
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Map": {
- "properties": {
- "abstractStep": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.AbstractStep"
- },
- "expression": {
- "type": "string"
- }
- },
- "type": "object"
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Metadata": {
- "properties": {
- "annotations": {
- "additionalProperties": {
- "type": "string"
- },
- "type": "object"
- },
- "labels": {
- "additionalProperties": {
- "type": "string"
- },
- "type": "object"
- }
- },
- "type": "object"
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.NATSAuth": {
- "properties": {
- "token": {
- "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector"
- }
- },
- "type": "object"
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Pipeline": {
- "properties": {
- "metadata": {
- "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"
- },
- "spec": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.PipelineSpec"
- },
- "status": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.PipelineStatus"
- }
- },
- "title": "+kubebuilder:object:root=true\n+kubebuilder:resource:shortName=pl\n+kubebuilder:subresource:status\n+kubebuilder:printcolumn:name=\"Phase\",type=string,JSONPath=`.status.phase`\n+kubebuilder:printcolumn:name=\"Message\",type=string,JSONPath=`.status.message`",
- "type": "object"
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.PipelineList": {
- "properties": {
- "items": {
- "items": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Pipeline"
- },
- "type": "array"
- },
- "metadata": {
- "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"
- }
- },
- "type": "object"
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.PipelineSpec": {
- "properties": {
- "deletionDelay": {
- "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Duration",
- "title": "+kubebuilder:default=\"72h\""
- },
- "steps": {
- "items": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.StepSpec"
- },
- "title": "+patchStrategy=merge\n+patchMergeKey=name",
- "type": "array"
- }
- },
- "type": "object"
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.PipelineStatus": {
- "properties": {
- "conditions": {
- "items": {
- "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Condition"
- },
- "type": "array"
- },
- "lastUpdated": {
- "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"
- },
- "message": {
- "type": "string"
- },
- "phase": {
- "type": "string"
- }
- },
- "type": "object"
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.S3": {
- "properties": {
- "bucket": {
- "type": "string"
- },
- "credentials": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.AWSCredentials"
- },
- "endpoint": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.AWSEndpoint"
- },
- "name": {
- "title": "+kubebuilder:default=default",
- "type": "string"
- },
- "region": {
- "type": "string"
- }
- },
- "type": "object"
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.S3Sink": {
- "properties": {
- "s3": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.S3"
- }
- },
- "type": "object"
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.S3Source": {
- "properties": {
- "concurrency": {
- "title": "+kubebuilder:default=1",
- "type": "integer"
- },
- "pollPeriod": {
- "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Duration",
- "title": "+kubebuilder:default=\"1m\""
- },
- "s3": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.S3"
- }
- },
- "type": "object"
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.SASL": {
- "properties": {
- "mechanism": {
- "title": "SASLMechanism is the name of the enabled SASL mechanism.\nPossible values: OAUTHBEARER, PLAIN (defaults to PLAIN).\n+optional",
- "type": "string"
- },
- "password": {
- "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector",
- "title": "Password for SASL/PLAIN authentication"
- },
- "user": {
- "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector",
- "title": "User is the authentication identity (authcid) to present for\nSASL/PLAIN or SASL/SCRAM authentication"
- }
- },
- "type": "object"
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.SQLAction": {
- "properties": {
- "onError": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.SQLStatement"
- },
- "onRecordNotFound": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.SQLStatement"
- },
- "statement": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.SQLStatement"
- }
- },
- "type": "object"
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.SQLStatement": {
- "properties": {
- "args": {
- "items": {
- "type": "string"
- },
- "type": "array"
- },
- "sql": {
- "type": "string"
- }
- },
- "type": "object"
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.STAN": {
- "properties": {
- "auth": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.NATSAuth"
- },
- "clusterId": {
- "type": "string"
- },
- "maxInflight": {
- "title": "Max inflight messages when subscribing to the stan server, which means how many messages\nbetween commits, therefore potential duplicates during disruption\n+kubebuilder:default=20",
- "type": "integer"
- },
- "name": {
- "title": "+kubebuilder:default=default",
- "type": "string"
- },
- "natsMonitoringUrl": {
- "type": "string"
- },
- "natsUrl": {
- "type": "string"
- },
- "subject": {
- "type": "string"
- },
- "subjectPrefix": {
- "type": "string"
- }
- },
- "type": "object"
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Scale": {
- "properties": {
- "desiredReplicas": {
- "description": "An expression to determine the number of replicas. Must evaluation to an `int`.",
- "type": "string"
- },
- "peekDelay": {
- "title": "An expression to determine the delay for peeking. Maybe string or duration, e.g. `\"4m\"`\n+kubebuilder:default=\"defaultPeekDelay\"",
- "type": "string"
- },
- "scalingDelay": {
- "title": "An expression to determine the delay for scaling. Maybe string or duration, e.g. `\"1m\"`\n+kubebuilder:default=\"defaultScalingDelay\"",
- "type": "string"
- }
- },
- "type": "object"
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Sidecar": {
- "properties": {
- "resources": {
- "$ref": "#/definitions/io.k8s.api.core.v1.ResourceRequirements",
- "title": "+kubebuilder:default={limits: {\"cpu\": \"500m\", \"memory\": \"256Mi\"}, requests: {\"cpu\": \"100m\", \"memory\": \"64Mi\"}}"
- }
- },
- "type": "object"
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Sink": {
- "properties": {
- "db": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.DBSink"
- },
- "deadLetterQueue": {
- "type": "boolean"
- },
- "http": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.HTTPSink"
- },
- "jetstream": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.JetStreamSink"
- },
- "kafka": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.KafkaSink"
- },
- "log": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Log"
- },
- "name": {
- "title": "+kubebuilder:default=default",
- "type": "string"
- },
- "s3": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.S3Sink"
- },
- "stan": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.STAN"
- },
- "volume": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.VolumeSink"
- }
- },
- "type": "object"
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Source": {
- "properties": {
- "cron": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Cron"
- },
- "db": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.DBSource"
- },
- "http": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.HTTPSource"
- },
- "jetstream": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.JetStreamSource"
- },
- "kafka": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.KafkaSource"
- },
- "name": {
- "title": "+kubebuilder:default=default",
- "type": "string"
- },
- "retry": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Backoff",
- "title": "+kubebuilder:default={duration: \"100ms\", steps: 20, factorPercentage: 200, jitterPercentage: 10}"
- },
- "s3": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.S3Source"
- },
- "stan": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.STAN"
- },
- "volume": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.VolumeSource"
- }
- },
- "type": "object"
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Step": {
- "properties": {
- "metadata": {
- "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"
- },
- "spec": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.StepSpec"
- },
- "status": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.StepStatus"
- }
- },
- "title": "+kubebuilder:object:root=true\n+kubebuilder:subresource:status\n+kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas,selectorpath=.status.selector\n+kubebuilder:printcolumn:name=\"Phase\",type=string,JSONPath=`.status.phase`\n+kubebuilder:printcolumn:name=\"Reason\",type=string,JSONPath=`.status.reason`\n+kubebuilder:printcolumn:name=\"Message\",type=string,JSONPath=`.status.message`\n+kubebuilder:printcolumn:name=\"Desired\",type=string,JSONPath=`.spec.replicas`\n+kubebuilder:printcolumn:name=\"Current\",type=string,JSONPath=`.status.replicas`",
- "type": "object"
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.StepSpec": {
- "properties": {
- "affinity": {
- "$ref": "#/definitions/io.k8s.api.core.v1.Affinity"
- },
- "cat": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Cat"
- },
- "code": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Code"
- },
- "container": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Container"
- },
- "dedupe": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Dedupe"
- },
- "expand": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Expand"
- },
- "filter": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Filter"
- },
- "flatten": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Flatten"
- },
- "git": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Git"
- },
- "group": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Group"
- },
- "imagePullSecrets": {
- "items": {
- "$ref": "#/definitions/io.k8s.api.core.v1.LocalObjectReference"
- },
- "title": "ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images\nin pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets\ncan be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet.\nMore info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod\n+patchStrategy=merge\n+patchMergeKey=name",
- "type": "array"
- },
- "map": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Map"
- },
- "metadata": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Metadata"
- },
- "name": {
- "title": "+kubebuilder:default=default",
- "type": "string"
- },
- "nodeSelector": {
- "additionalProperties": {
- "type": "string"
- },
- "type": "object"
- },
- "replicas": {
- "title": "+kubebuilder:default=1",
- "type": "integer"
- },
- "restartPolicy": {
- "title": "+kubebuilder:default=OnFailure",
- "type": "string"
- },
- "scale": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Scale",
- "title": "+kubebuilder:default={peekDelay: \"defaultPeekDelay\", scalingDelay: \"defaultScalingDelay\", desiredReplicas: \"\"}"
- },
- "serviceAccountName": {
- "title": "+kubebuilder:default=pipeline",
- "type": "string"
- },
- "sidecar": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Sidecar",
- "title": "+kubebuilder:default={resources: {limits: {\"cpu\": \"500m\", \"memory\": \"256Mi\"}, requests: {\"cpu\": \"100m\", \"memory\": \"64Mi\"}}}"
- },
- "sinks": {
- "items": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Sink"
- },
- "title": "+patchStrategy=merge\n+patchMergeKey=name",
- "type": "array"
- },
- "sources": {
- "items": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Source"
- },
- "title": "+patchStrategy=merge\n+patchMergeKey=name",
- "type": "array"
- },
- "terminator": {
- "type": "boolean"
- },
- "tolerations": {
- "items": {
- "$ref": "#/definitions/io.k8s.api.core.v1.Toleration"
- },
- "type": "array"
- },
- "volumes": {
- "items": {
- "$ref": "#/definitions/io.k8s.api.core.v1.Volume"
- },
- "title": "+patchStrategy=merge\n+patchMergeKey=name",
- "type": "array"
- }
- },
- "type": "object"
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.StepStatus": {
- "properties": {
- "lastScaledAt": {
- "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"
- },
- "message": {
- "type": "string"
- },
- "phase": {
- "type": "string"
- },
- "reason": {
- "type": "string"
- },
- "replicas": {
- "type": "integer"
- },
- "selector": {
- "type": "string"
- }
- },
- "type": "object"
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Storage": {
- "properties": {
- "name": {
- "type": "string"
- },
- "subPath": {
- "title": "volume name",
- "type": "string"
- }
- },
- "type": "object"
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.TLS": {
- "properties": {
- "caCertSecret": {
- "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector",
- "title": "CACertSecret refers to the secret that contains the CA cert"
- },
- "certSecret": {
- "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector",
- "title": "CertSecret refers to the secret that contains the cert"
- },
- "keySecret": {
- "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector",
- "title": "KeySecret refers to the secret that contains the key"
- }
- },
- "type": "object"
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.VolumeSink": {
- "properties": {
- "abstractVolumeSource": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.AbstractVolumeSource"
- }
- },
- "type": "object"
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.VolumeSource": {
- "properties": {
- "abstractVolumeSource": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.AbstractVolumeSource"
- },
- "concurrency": {
- "title": "+kubebuilder:default=1",
- "type": "integer"
- },
- "pollPeriod": {
- "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Duration",
- "title": "+kubebuilder:default=\"1m\""
- },
- "readOnly": {
- "type": "boolean"
- }
- },
- "type": "object"
- },
"google.protobuf.Any": {
"properties": {
"type_url": {
@@ -4485,6 +3324,30 @@
},
"type": "object"
},
+ "io.argoproj.workflow.v1alpha1.ArtGCStatus": {
+ "description": "ArtGCStatus maintains state related to ArtifactGC",
+ "properties": {
+ "notSpecified": {
+ "description": "if this is true, we already checked to see if we need to do it and we don't",
+ "type": "boolean"
+ },
+ "podsRecouped": {
+ "additionalProperties": {
+ "type": "boolean"
+ },
+ "description": "have completed Pods been processed? (mapped by Pod name) used to prevent re-processing the Status of a Pod more than once",
+ "type": "object"
+ },
+ "strategiesProcessed": {
+ "additionalProperties": {
+ "type": "boolean"
+ },
+ "description": "have Pods been started to perform this strategy? (enables us not to re-process what we've already done)",
+ "type": "object"
+ }
+ },
+ "type": "object"
+ },
"io.argoproj.workflow.v1alpha1.Artifact": {
"description": "Artifact indicates an artifact to place at a specified path",
"properties": {
@@ -4496,10 +3359,22 @@
"description": "ArchiveLogs indicates if the container logs should be archived",
"type": "boolean"
},
+ "artifactGC": {
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.ArtifactGC",
+ "description": "ArtifactGC describes the strategy to use when to deleting an artifact from completed or deleted workflows"
+ },
"artifactory": {
"$ref": "#/definitions/io.argoproj.workflow.v1alpha1.ArtifactoryArtifact",
"description": "Artifactory contains artifactory artifact location details"
},
+ "azure": {
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.AzureArtifact",
+ "description": "Azure contains Azure Storage artifact location details"
+ },
+ "deleted": {
+ "description": "Has this been deleted?",
+ "type": "boolean"
+ },
"from": {
"description": "From allows an artifact to reference an artifact from a previous step",
"type": "string"
@@ -4570,6 +3445,50 @@
],
"type": "object"
},
+ "io.argoproj.workflow.v1alpha1.ArtifactGC": {
+ "description": "ArtifactGC describes how to delete artifacts from completed Workflows",
+ "properties": {
+ "podMetadata": {
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.Metadata",
+ "description": "PodMetadata is an optional field for specifying the Labels and Annotations that should be assigned to the Pod doing the deletion"
+ },
+ "serviceAccountName": {
+ "description": "ServiceAccountName is an optional field for specifying the Service Account that should be assigned to the Pod doing the deletion",
+ "type": "string"
+ },
+ "strategy": {
+ "description": "Strategy is the strategy to use.",
+ "type": "string"
+ }
+ },
+ "type": "object"
+ },
+ "io.argoproj.workflow.v1alpha1.ArtifactGCSpec": {
+ "description": "ArtifactGCSpec specifies the Artifacts that need to be deleted",
+ "properties": {
+ "artifactsByNode": {
+ "additionalProperties": {
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.ArtifactNodeSpec"
+ },
+ "description": "ArtifactsByNode maps Node name to information pertaining to Artifacts on that Node",
+ "type": "object"
+ }
+ },
+ "type": "object"
+ },
+ "io.argoproj.workflow.v1alpha1.ArtifactGCStatus": {
+ "description": "ArtifactGCStatus describes the result of the deletion",
+ "properties": {
+ "artifactResultsByNode": {
+ "additionalProperties": {
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.ArtifactResultNodeStatus"
+ },
+ "description": "ArtifactResultsByNode maps Node name to result",
+ "type": "object"
+ }
+ },
+ "type": "object"
+ },
"io.argoproj.workflow.v1alpha1.ArtifactLocation": {
"description": "ArtifactLocation describes a location for a single or multiple artifacts. It is used as single artifact in the context of inputs/outputs (e.g. outputs.artifacts.artname). It is also used to describe the location of multiple artifacts such as the archive location of a single workflow step, which the executor will use as a default location to store its files.",
"properties": {
@@ -4581,6 +3500,10 @@
"$ref": "#/definitions/io.argoproj.workflow.v1alpha1.ArtifactoryArtifact",
"description": "Artifactory contains artifactory artifact location details"
},
+ "azure": {
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.AzureArtifact",
+ "description": "Azure contains Azure Storage artifact location details"
+ },
"gcs": {
"$ref": "#/definitions/io.argoproj.workflow.v1alpha1.GCSArtifact",
"description": "GCS contains GCS artifact location details"
@@ -4612,6 +3535,23 @@
},
"type": "object"
},
+ "io.argoproj.workflow.v1alpha1.ArtifactNodeSpec": {
+ "description": "ArtifactNodeSpec specifies the Artifacts that need to be deleted for a given Node",
+ "properties": {
+ "archiveLocation": {
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.ArtifactLocation",
+ "description": "ArchiveLocation is the template-level Artifact location specification"
+ },
+ "artifacts": {
+ "additionalProperties": {
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.Artifact"
+ },
+ "description": "Artifacts maps artifact name to Artifact description",
+ "type": "object"
+ }
+ },
+ "type": "object"
+ },
"io.argoproj.workflow.v1alpha1.ArtifactPaths": {
"description": "ArtifactPaths expands a step from a collection of artifacts",
"properties": {
@@ -4623,10 +3563,22 @@
"description": "ArchiveLogs indicates if the container logs should be archived",
"type": "boolean"
},
+ "artifactGC": {
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.ArtifactGC",
+ "description": "ArtifactGC describes the strategy to use when to deleting an artifact from completed or deleted workflows"
+ },
"artifactory": {
"$ref": "#/definitions/io.argoproj.workflow.v1alpha1.ArtifactoryArtifact",
"description": "Artifactory contains artifactory artifact location details"
},
+ "azure": {
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.AzureArtifact",
+ "description": "Azure contains Azure Storage artifact location details"
+ },
+ "deleted": {
+ "description": "Has this been deleted?",
+ "type": "boolean"
+ },
"from": {
"description": "From allows an artifact to reference an artifact from a previous step",
"type": "string"
@@ -4708,6 +3660,10 @@
"$ref": "#/definitions/io.argoproj.workflow.v1alpha1.ArtifactoryArtifactRepository",
"description": "Artifactory stores artifacts to JFrog Artifactory"
},
+ "azure": {
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.AzureArtifactRepository",
+ "description": "Azure stores artifact in an Azure Storage account"
+ },
"gcs": {
"$ref": "#/definitions/io.argoproj.workflow.v1alpha1.GCSArtifactRepository",
"description": "GCS stores artifact in a GCS object store"
@@ -4765,6 +3721,40 @@
},
"type": "object"
},
+ "io.argoproj.workflow.v1alpha1.ArtifactResult": {
+ "description": "ArtifactResult describes the result of attempting to delete a given Artifact",
+ "properties": {
+ "error": {
+ "description": "Error is an optional error message which should be set if Success==false",
+ "type": "string"
+ },
+ "name": {
+ "description": "Name is the name of the Artifact",
+ "type": "string"
+ },
+ "success": {
+ "description": "Success describes whether the deletion succeeded",
+ "type": "boolean"
+ }
+ },
+ "required": [
+ "name"
+ ],
+ "type": "object"
+ },
+ "io.argoproj.workflow.v1alpha1.ArtifactResultNodeStatus": {
+ "description": "ArtifactResultNodeStatus describes the result of the deletion on a given node",
+ "properties": {
+ "artifactResults": {
+ "additionalProperties": {
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.ArtifactResult"
+ },
+ "description": "ArtifactResults maps Artifact name to result of the deletion",
+ "type": "object"
+ }
+ },
+ "type": "object"
+ },
"io.argoproj.workflow.v1alpha1.ArtifactoryArtifact": {
"description": "ArtifactoryArtifact is the location of an artifactory artifact",
"properties": {
@@ -4804,6 +3794,67 @@
},
"type": "object"
},
+ "io.argoproj.workflow.v1alpha1.AzureArtifact": {
+ "description": "AzureArtifact is the location of a an Azure Storage artifact",
+ "properties": {
+ "accountKeySecret": {
+ "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector",
+ "description": "AccountKeySecret is the secret selector to the Azure Blob Storage account access key"
+ },
+ "blob": {
+ "description": "Blob is the blob name (i.e., path) in the container where the artifact resides",
+ "type": "string"
+ },
+ "container": {
+ "description": "Container is the container where resources will be stored",
+ "type": "string"
+ },
+ "endpoint": {
+ "description": "Endpoint is the service url associated with an account. It is most likely \"https://\u003cACCOUNT_NAME\u003e.blob.core.windows.net\"",
+ "type": "string"
+ },
+ "useSDKCreds": {
+ "description": "UseSDKCreds tells the driver to figure out credentials based on sdk defaults.",
+ "type": "boolean"
+ }
+ },
+ "required": [
+ "endpoint",
+ "container",
+ "blob"
+ ],
+ "type": "object"
+ },
+ "io.argoproj.workflow.v1alpha1.AzureArtifactRepository": {
+ "description": "AzureArtifactRepository defines the controller configuration for an Azure Blob Storage artifact repository",
+ "properties": {
+ "accountKeySecret": {
+ "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector",
+ "description": "AccountKeySecret is the secret selector to the Azure Blob Storage account access key"
+ },
+ "blobNameFormat": {
+ "description": "BlobNameFormat is defines the format of how to store blob names. Can reference workflow variables",
+ "type": "string"
+ },
+ "container": {
+ "description": "Container is the container where resources will be stored",
+ "type": "string"
+ },
+ "endpoint": {
+ "description": "Endpoint is the service url associated with an account. It is most likely \"https://\u003cACCOUNT_NAME\u003e.blob.core.windows.net\"",
+ "type": "string"
+ },
+ "useSDKCreds": {
+ "description": "UseSDKCreds tells the driver to figure out credentials based on sdk defaults.",
+ "type": "boolean"
+ }
+ },
+ "required": [
+ "endpoint",
+ "container"
+ ],
+ "type": "object"
+ },
"io.argoproj.workflow.v1alpha1.Backoff": {
"description": "Backoff is a backoff strategy to use within retryStrategy",
"properties": {
@@ -4822,6 +3873,20 @@
},
"type": "object"
},
+ "io.argoproj.workflow.v1alpha1.BasicAuth": {
+ "description": "BasicAuth describes the secret selectors required for basic authentication",
+ "properties": {
+ "passwordSecret": {
+ "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector",
+ "description": "PasswordSecret is the secret selector to the repository password"
+ },
+ "usernameSecret": {
+ "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector",
+ "description": "UsernameSecret is the secret selector to the repository username"
+ }
+ },
+ "type": "object"
+ },
"io.argoproj.workflow.v1alpha1.Cache": {
"description": "Cache is the configuration for the type of cache to be used",
"properties": {
@@ -4835,6 +3900,18 @@
],
"type": "object"
},
+ "io.argoproj.workflow.v1alpha1.ClientCertAuth": {
+ "description": "ClientCertAuth holds necessary information for client authentication via certificates",
+ "properties": {
+ "clientCertSecret": {
+ "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector"
+ },
+ "clientKeySecret": {
+ "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector"
+ }
+ },
+ "type": "object"
+ },
"io.argoproj.workflow.v1alpha1.ClusterWorkflowTemplate": {
"description": "ClusterWorkflowTemplate is the definition of a workflow template resource in cluster scope",
"properties": {
@@ -4932,6 +4009,17 @@
},
"type": "object"
},
+ "io.argoproj.workflow.v1alpha1.CollectEventRequest": {
+ "properties": {
+ "name": {
+ "type": "string"
+ }
+ },
+ "type": "object"
+ },
+ "io.argoproj.workflow.v1alpha1.CollectEventResponse": {
+ "type": "object"
+ },
"io.argoproj.workflow.v1alpha1.Condition": {
"properties": {
"message": {
@@ -4952,14 +4040,14 @@
"io.argoproj.workflow.v1alpha1.ContainerNode": {
"properties": {
"args": {
- "description": "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
+ "description": "Arguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
"items": {
"type": "string"
},
"type": "array"
},
"command": {
- "description": "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
+ "description": "Entrypoint array. Not executed within a shell. The container image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
"items": {
"type": "string"
},
@@ -4988,7 +4076,7 @@
"type": "array"
},
"image": {
- "description": "Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.",
+ "description": "Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.",
"type": "string"
},
"imagePullPolicy": {
@@ -5570,6 +4658,9 @@
"serviceAccountName": {
"type": "string"
},
+ "serviceAccountNamespace": {
+ "type": "string"
+ },
"subject": {
"type": "string"
}
@@ -5579,6 +4670,10 @@
"io.argoproj.workflow.v1alpha1.GitArtifact": {
"description": "GitArtifact is the location of an git artifact",
"properties": {
+ "branch": {
+ "description": "Branch is the branch to fetch when `SingleBranch` is enabled",
+ "type": "string"
+ },
"depth": {
"description": "Depth specifies clones/fetches should be shallow and include the given number of commits from the branch tip",
"type": "integer"
@@ -5610,6 +4705,10 @@
"description": "Revision is the git commit, tag, branch to checkout",
"type": "string"
},
+ "singleBranch": {
+ "description": "SingleBranch enables single branch clone, using the `branch` parameter",
+ "type": "boolean"
+ },
"sshPrivateKeySecret": {
"$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector",
"description": "SSHPrivateKeySecret is the secret selector to the repository ssh private key"
@@ -5731,6 +4830,10 @@
"description": "Body is content of the HTTP Request",
"type": "string"
},
+ "bodyFrom": {
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.HTTPBodySource",
+ "description": "BodyFrom is content of the HTTP Request as Bytes"
+ },
"headers": {
"description": "Headers are an optional list of headers to send with HTTP requests",
"items": {
@@ -5739,7 +4842,7 @@
"type": "array"
},
"insecureSkipVerify": {
- "description": "insecureSkipVerify is a bool when if set to true will skip TLS verification for the HTTP client",
+ "description": "InsecureSkipVerify is a bool when if set to true will skip TLS verification for the HTTP client",
"type": "boolean"
},
"method": {
@@ -5765,8 +4868,12 @@
"type": "object"
},
"io.argoproj.workflow.v1alpha1.HTTPArtifact": {
- "description": "HTTPArtifact allows an file served on HTTP to be placed as an input artifact in a container",
+ "description": "HTTPArtifact allows a file served on HTTP to be placed as an input artifact in a container",
"properties": {
+ "auth": {
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.HTTPAuth",
+ "description": "Auth contains information for client authentication"
+ },
"headers": {
"description": "Headers are an optional list of headers to send with HTTP requests for artifacts",
"items": {
@@ -5784,6 +4891,30 @@
],
"type": "object"
},
+ "io.argoproj.workflow.v1alpha1.HTTPAuth": {
+ "properties": {
+ "basicAuth": {
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.BasicAuth"
+ },
+ "clientCert": {
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.ClientCertAuth"
+ },
+ "oauth2": {
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.OAuth2Auth"
+ }
+ },
+ "type": "object"
+ },
+ "io.argoproj.workflow.v1alpha1.HTTPBodySource": {
+ "description": "HTTPBodySource contains the source of the HTTP body.",
+ "properties": {
+ "bytes": {
+ "format": "byte",
+ "type": "string"
+ }
+ },
+ "type": "object"
+ },
"io.argoproj.workflow.v1alpha1.HTTPHeader": {
"properties": {
"name": {
@@ -5953,9 +5084,6 @@
"description": "TemplateRef is the reference to the template resource to execute by the hook"
}
},
- "required": [
- "template"
- ],
"type": "object"
},
"io.argoproj.workflow.v1alpha1.Link": {
@@ -6005,6 +5133,18 @@
},
"type": "object"
},
+ "io.argoproj.workflow.v1alpha1.ManifestFrom": {
+ "properties": {
+ "artifact": {
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.Artifact",
+ "description": "Artifact contains the artifact to use"
+ }
+ },
+ "required": [
+ "artifact"
+ ],
+ "type": "object"
+ },
"io.argoproj.workflow.v1alpha1.MemoizationStatus": {
"description": "MemoizationStatus is the status of this memoized node",
"properties": {
@@ -6295,6 +5435,50 @@
"description": "NoneStrategy indicates to skip tar process and upload the files or directory tree as independent files. Note that if the artifact is a directory, the artifact driver must support the ability to save/load the directory appropriately.",
"type": "object"
},
+ "io.argoproj.workflow.v1alpha1.OAuth2Auth": {
+ "description": "OAuth2Auth holds all information for client authentication via OAuth2 tokens",
+ "properties": {
+ "clientIDSecret": {
+ "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector"
+ },
+ "clientSecretSecret": {
+ "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector"
+ },
+ "endpointParams": {
+ "items": {
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.OAuth2EndpointParam"
+ },
+ "type": "array"
+ },
+ "scopes": {
+ "items": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "tokenURLSecret": {
+ "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector"
+ }
+ },
+ "type": "object"
+ },
+ "io.argoproj.workflow.v1alpha1.OAuth2EndpointParam": {
+ "description": "EndpointParam is for requesting optional fields that should be sent in the oauth request",
+ "properties": {
+ "key": {
+ "description": "Name is the header name",
+ "type": "string"
+ },
+ "value": {
+ "description": "Value is the literal value to use for the header",
+ "type": "string"
+ }
+ },
+ "required": [
+ "key"
+ ],
+ "type": "object"
+ },
"io.argoproj.workflow.v1alpha1.OSSArtifact": {
"description": "OSSArtifact is the location of an Alibaba Cloud OSS artifact",
"properties": {
@@ -6560,6 +5744,10 @@
"description": "Manifest contains the kubernetes manifest",
"type": "string"
},
+ "manifestFrom": {
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.ManifestFrom",
+ "description": "ManifestFrom is the source for a single kubernetes manifest"
+ },
"mergeStrategy": {
"description": "MergeStrategy is the strategy used to merge a patch. It defaults to \"strategic\" Must be one of: strategic, merge, json",
"type": "string"
@@ -6589,6 +5777,12 @@
"namespace": {
"type": "string"
},
+ "parameters": {
+ "items": {
+ "type": "string"
+ },
+ "type": "array"
+ },
"uid": {
"type": "string"
}
@@ -6615,6 +5809,12 @@
"nodeFieldSelector": {
"type": "string"
},
+ "parameters": {
+ "items": {
+ "type": "string"
+ },
+ "type": "array"
+ },
"restartSuccessful": {
"type": "boolean"
},
@@ -6645,7 +5845,7 @@
},
"limit": {
"$ref": "#/definitions/io.k8s.apimachinery.pkg.util.intstr.IntOrString",
- "description": "Limit is the maximum number of attempts when retrying a container"
+ "description": "Limit is the maximum number of retry attempts when retrying a container. It does not include the original container; the maximum number of total attempts will be `limit + 1`."
},
"retryPolicy": {
"description": "RetryPolicy is a policy of NodePhase statuses that will be retried",
@@ -6782,14 +5982,14 @@
"description": "ScriptTemplate is a template subtype to enable scripting through code steps",
"properties": {
"args": {
- "description": "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
+ "description": "Arguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
"items": {
"type": "string"
},
"type": "array"
},
"command": {
- "description": "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
+ "description": "Entrypoint array. Not executed within a shell. The container image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
"items": {
"type": "string"
},
@@ -6812,7 +6012,7 @@
"type": "array"
},
"image": {
- "description": "Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.",
+ "description": "Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.",
"type": "string"
},
"imagePullPolicy": {
@@ -7371,14 +6571,14 @@
"description": "UserContainer is a container specified by a user.",
"properties": {
"args": {
- "description": "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
+ "description": "Arguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
"items": {
"type": "string"
},
"type": "array"
},
"command": {
- "description": "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
+ "description": "Entrypoint array. Not executed within a shell. The container image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
"items": {
"type": "string"
},
@@ -7401,7 +6601,7 @@
"type": "array"
},
"image": {
- "description": "Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.",
+ "description": "Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.",
"type": "string"
},
"imagePullPolicy": {
@@ -7800,6 +7000,12 @@
},
"namespace": {
"type": "string"
+ },
+ "parameters": {
+ "items": {
+ "type": "string"
+ },
+ "type": "array"
}
},
"type": "object"
@@ -7829,6 +7035,12 @@
"nodeFieldSelector": {
"type": "string"
},
+ "parameters": {
+ "items": {
+ "type": "string"
+ },
+ "type": "array"
+ },
"restartSuccessful": {
"type": "boolean"
}
@@ -7877,6 +7089,10 @@
"$ref": "#/definitions/io.argoproj.workflow.v1alpha1.Arguments",
"description": "Arguments contain the parameters and artifacts sent to the workflow entrypoint Parameters are referencable globally using the 'workflow' variable prefix. e.g. {{io.argoproj.workflow.v1alpha1.parameters.myparam}}"
},
+ "artifactGC": {
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.ArtifactGC",
+ "description": "ArtifactGC describes the strategy to use when deleting artifacts from completed or deleted workflows (applies to all output Artifacts unless Artifact.ArtifactGC is specified, which overrides this)"
+ },
"artifactRepositoryRef": {
"$ref": "#/definitions/io.argoproj.workflow.v1alpha1.ArtifactRepositoryRef",
"description": "ArtifactRepositoryRef specifies the configMap name and key containing the artifact repository config."
@@ -7954,14 +7170,14 @@
},
"podGC": {
"$ref": "#/definitions/io.argoproj.workflow.v1alpha1.PodGC",
- "description": "PodGC describes the strategy to use when to deleting completed pods"
+ "description": "PodGC describes the strategy to use when deleting completed pods"
},
"podMetadata": {
"$ref": "#/definitions/io.argoproj.workflow.v1alpha1.Metadata",
"description": "PodMetadata defines additional metadata that should be applied to workflow pods"
},
"podPriority": {
- "description": "Priority to apply to workflow pods.",
+ "description": "Priority to apply to workflow pods. DEPRECATED: Use PodPriorityClassName instead.",
"type": "integer"
},
"podPriorityClassName": {
@@ -8032,7 +7248,7 @@
},
"volumeClaimGC": {
"$ref": "#/definitions/io.argoproj.workflow.v1alpha1.VolumeClaimGC",
- "description": "VolumeClaimGC describes the strategy to use when to deleting volumes from completed workflows"
+ "description": "VolumeClaimGC describes the strategy to use when deleting volumes from completed workflows"
},
"volumeClaimTemplates": {
"description": "VolumeClaimTemplates is a list of claims that containers are allowed to reference. The Workflow controller will create the claims at the beginning of the workflow and delete the claims upon completion of the workflow",
@@ -8054,7 +7270,7 @@
},
"workflowMetadata": {
"$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowMetadata",
- "description": "WorkflowMetadata contains some metadata of the workflow to be refer"
+ "description": "WorkflowMetadata contains some metadata of the workflow to refer to"
},
"workflowTemplateRef": {
"$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowTemplateRef",
@@ -8066,6 +7282,10 @@
"io.argoproj.workflow.v1alpha1.WorkflowStatus": {
"description": "WorkflowStatus contains overall status information about a workflow",
"properties": {
+ "artifactGCStatus": {
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.ArtGCStatus",
+ "description": "ArtifactGCStatus maintains the status of Artifact Garbage Collection"
+ },
"artifactRepositoryRef": {
"$ref": "#/definitions/io.argoproj.workflow.v1alpha1.ArtifactRepositoryRefStatus",
"description": "ArtifactRepositoryRef is used to cache the repository to use so we do not need to determine it everytime we reconcile."
@@ -10997,43 +10217,6 @@
"description": "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\n (Note that \u003csuffix\u003e may be empty, from the \"\" case in \u003cdecimalSI\u003e.)\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \"+\" | \"-\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\u003cdecimalSI\u003e ::= m | \"\" | k | M | G | T | P | E\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\u003cdecimalExponent\u003e ::= \"e\" \u003csignedNumber\u003e | \"E\" \u003csignedNumber\u003e\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n a. No precision is lost\n b. No fractional digits will be emitted\n c. The exponent (or suffix) is as large as possible.\nThe sign will be omitted unless the number is negative.\n\nExamples:\n 1.5 will be serialized as \"1500m\"\n 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.",
"type": "string"
},
- "io.k8s.apimachinery.pkg.apis.meta.v1.Condition": {
- "description": "Condition contains details for one aspect of the current state of this API Resource.",
- "properties": {
- "lastTransitionTime": {
- "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time",
- "description": "lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable."
- },
- "message": {
- "description": "message is a human readable message indicating details about the transition. This may be an empty string.",
- "type": "string"
- },
- "observedGeneration": {
- "description": "observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance.",
- "type": "integer"
- },
- "reason": {
- "description": "reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty.",
- "type": "string"
- },
- "status": {
- "description": "status of the condition, one of True, False, Unknown.",
- "type": "string"
- },
- "type": {
- "description": "type of condition in CamelCase or in foo.example.com/CamelCase.",
- "type": "string"
- }
- },
- "required": [
- "type",
- "status",
- "lastTransitionTime",
- "reason",
- "message"
- ],
- "type": "object"
- },
"io.k8s.apimachinery.pkg.apis.meta.v1.CreateOptions": {
"description": "CreateOptions may be provided when creating an API object.",
"properties": {
@@ -11049,16 +10232,7 @@
"type": "string"
},
"fieldValidation": {
- "title": "fieldValidation determines how the server should respond to\nunknown/duplicate fields in the object in the request.\nIntroduced as alpha in 1.23, older servers or servers with the\n`ServerSideFieldValidation` feature disabled will discard valid values\nspecified in this param and not perform any server side field validation.\nValid values are:\n- Ignore: ignores unknown/duplicate fields.\n- Warn: responds with a warning for each\nunknown/duplicate field, but successfully serves the request.\n- Strict: fails the request on unknown/duplicate fields.\n+optional",
- "type": "string"
- }
- },
- "type": "object"
- },
- "io.k8s.apimachinery.pkg.apis.meta.v1.Duration": {
- "description": "Duration is a wrapper around time.Duration which supports correct\nmarshaling to YAML and JSON. In particular, it marshals into strings, which\ncan be used as map keys in json.",
- "properties": {
- "duration": {
+ "title": "fieldValidation instructs the server on how to handle\nobjects in the request (POST/PUT/PATCH) containing unknown\nor duplicate fields, provided that the `ServerSideFieldValidation`\nfeature gate is also enabled. Valid values are:\n- Ignore: This will ignore any unknown fields that are silently\ndropped from the object, and will ignore all but the last duplicate\nfield that the decoder encounters. This is the default behavior\nprior to v1.23 and is the default behavior when the\n`ServerSideFieldValidation` feature gate is disabled.\n- Warn: This will send a warning via the standard warning response\nheader for each unknown field that is dropped from the object, and\nfor each duplicate field that is encountered. The request will\nstill succeed if there are no other errors, and will only persist\nthe last of any duplicate fields. This is the default when the\n`ServerSideFieldValidation` feature gate is enabled.\n- Strict: This will fail the request with a BadRequest error if\nany unknown fields would be dropped from the object, or if any\nduplicate fields are present. The error returned from the server\nwill contain all unknown and duplicate fields encountered.\n+optional",
"type": "string"
}
},
@@ -11344,55 +10518,6 @@
"io.k8s.apimachinery.pkg.util.intstr.IntOrString": {
"type": "string"
},
- "pipeline.DeletePipelineResponse": {
- "type": "object"
- },
- "pipeline.LogEntry": {
- "properties": {
- "msg": {
- "type": "string"
- },
- "namespace": {
- "type": "string"
- },
- "pipelineName": {
- "type": "string"
- },
- "stepName": {
- "type": "string"
- },
- "time": {
- "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"
- }
- },
- "title": "structured log entry",
- "type": "object"
- },
- "pipeline.PipelineWatchEvent": {
- "properties": {
- "object": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Pipeline"
- },
- "type": {
- "type": "string"
- }
- },
- "type": "object"
- },
- "pipeline.RestartPipelineResponse": {
- "type": "object"
- },
- "pipeline.StepWatchEvent": {
- "properties": {
- "object": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Step"
- },
- "type": {
- "type": "string"
- }
- },
- "type": "object"
- },
"sensor.CreateSensorRequest": {
"properties": {
"createOptions": {
diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json
index 497112b2980a..4c7c6cc229c9 100644
--- a/api/openapi-spec/swagger.json
+++ b/api/openapi-spec/swagger.json
@@ -1374,245 +1374,6 @@
}
}
},
- "/api/v1/pipelines/{namespace}": {
- "get": {
- "tags": [
- "PipelineService"
- ],
- "operationId": "PipelineService_ListPipelines",
- "parameters": [
- {
- "type": "string",
- "name": "namespace",
- "in": "path",
- "required": true
- },
- {
- "type": "string",
- "description": "A selector to restrict the list of returned objects by their labels.\nDefaults to everything.\n+optional.",
- "name": "listOptions.labelSelector",
- "in": "query"
- },
- {
- "type": "string",
- "description": "A selector to restrict the list of returned objects by their fields.\nDefaults to everything.\n+optional.",
- "name": "listOptions.fieldSelector",
- "in": "query"
- },
- {
- "type": "boolean",
- "description": "Watch for changes to the described resources and return them as a stream of\nadd, update, and remove notifications. Specify resourceVersion.\n+optional.",
- "name": "listOptions.watch",
- "in": "query"
- },
- {
- "type": "boolean",
- "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\".\nServers that do not implement bookmarks may ignore this flag and\nbookmarks are sent at the server's discretion. Clients should not\nassume bookmarks are returned at any specific interval, nor may they\nassume the server will send any BOOKMARK event during a session.\nIf this is not a watch, this field is ignored.\n+optional.",
- "name": "listOptions.allowWatchBookmarks",
- "in": "query"
- },
- {
- "type": "string",
- "description": "resourceVersion sets a constraint on what resource versions a request may be served from.\nSee https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for\ndetails.\n\nDefaults to unset\n+optional",
- "name": "listOptions.resourceVersion",
- "in": "query"
- },
- {
- "type": "string",
- "description": "resourceVersionMatch determines how resourceVersion is applied to list calls.\nIt is highly recommended that resourceVersionMatch be set for list calls where\nresourceVersion is set\nSee https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for\ndetails.\n\nDefaults to unset\n+optional",
- "name": "listOptions.resourceVersionMatch",
- "in": "query"
- },
- {
- "type": "string",
- "format": "int64",
- "description": "Timeout for the list/watch call.\nThis limits the duration of the call, regardless of any activity or inactivity.\n+optional.",
- "name": "listOptions.timeoutSeconds",
- "in": "query"
- },
- {
- "type": "string",
- "format": "int64",
- "description": "limit is a maximum number of responses to return for a list call. If more items exist, the\nserver will set the `continue` field on the list metadata to a value that can be used with the\nsame initial query to retrieve the next set of results. Setting a limit may return fewer than\nthe requested amount of items (up to zero items) in the event all requested objects are\nfiltered out and clients should only use the presence of the continue field to determine whether\nmore results are available. Servers may choose not to support the limit argument and will return\nall of the available results. If limit is specified and the continue field is empty, clients may\nassume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing\na single list call without a limit - that is, no objects created, modified, or deleted after the\nfirst request is issued will be included in any subsequent continued requests. This is sometimes\nreferred to as a consistent snapshot, and ensures that a client that is using limit to receive\nsmaller chunks of a very large result can ensure they see all possible objects. If objects are\nupdated during a chunked list the version of the object that was present at the time the first list\nresult was calculated is returned.",
- "name": "listOptions.limit",
- "in": "query"
- },
- {
- "type": "string",
- "description": "The continue option should be set when retrieving more results from the server. Since this value is\nserver defined, clients may only use the continue value from a previous query result with identical\nquery parameters (except for the value of continue) and the server may reject a continue value it\ndoes not recognize. If the specified continue value is no longer valid whether due to expiration\n(generally five to fifteen minutes) or a configuration change on the server, the server will\nrespond with a 410 ResourceExpired error together with a continue token. If the client needs a\nconsistent list, it must restart their list without the continue field. Otherwise, the client may\nsend another list request with the token received with the 410 error, the server will respond with\na list starting from the next key, but from the latest snapshot, which is inconsistent from the\nprevious list results - objects that are created, modified, or deleted after the first list request\nwill be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last\nresourceVersion value returned by the server and not miss any modifications.",
- "name": "listOptions.continue",
- "in": "query"
- }
- ],
- "responses": {
- "200": {
- "description": "A successful response.",
- "schema": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.PipelineList"
- }
- },
- "default": {
- "description": "An unexpected error response.",
- "schema": {
- "$ref": "#/definitions/grpc.gateway.runtime.Error"
- }
- }
- }
- }
- },
- "/api/v1/pipelines/{namespace}/{name}": {
- "get": {
- "tags": [
- "PipelineService"
- ],
- "operationId": "PipelineService_GetPipeline",
- "parameters": [
- {
- "type": "string",
- "name": "namespace",
- "in": "path",
- "required": true
- },
- {
- "type": "string",
- "name": "name",
- "in": "path",
- "required": true
- },
- {
- "type": "string",
- "description": "resourceVersion sets a constraint on what resource versions a request may be served from.\nSee https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for\ndetails.\n\nDefaults to unset\n+optional",
- "name": "getOptions.resourceVersion",
- "in": "query"
- }
- ],
- "responses": {
- "200": {
- "description": "A successful response.",
- "schema": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Pipeline"
- }
- },
- "default": {
- "description": "An unexpected error response.",
- "schema": {
- "$ref": "#/definitions/grpc.gateway.runtime.Error"
- }
- }
- }
- },
- "delete": {
- "tags": [
- "PipelineService"
- ],
- "operationId": "PipelineService_DeletePipeline",
- "parameters": [
- {
- "type": "string",
- "name": "namespace",
- "in": "path",
- "required": true
- },
- {
- "type": "string",
- "name": "name",
- "in": "path",
- "required": true
- },
- {
- "type": "string",
- "format": "int64",
- "description": "The duration in seconds before the object should be deleted. Value must be non-negative integer.\nThe value zero indicates delete immediately. If this value is nil, the default grace period for the\nspecified type will be used.\nDefaults to a per object value if not specified. zero means delete immediately.\n+optional.",
- "name": "deleteOptions.gracePeriodSeconds",
- "in": "query"
- },
- {
- "type": "string",
- "description": "Specifies the target UID.\n+optional.",
- "name": "deleteOptions.preconditions.uid",
- "in": "query"
- },
- {
- "type": "string",
- "description": "Specifies the target ResourceVersion\n+optional.",
- "name": "deleteOptions.preconditions.resourceVersion",
- "in": "query"
- },
- {
- "type": "boolean",
- "description": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7.\nShould the dependent objects be orphaned. If true/false, the \"orphan\"\nfinalizer will be added to/removed from the object's finalizers list.\nEither this field or PropagationPolicy may be set, but not both.\n+optional.",
- "name": "deleteOptions.orphanDependents",
- "in": "query"
- },
- {
- "type": "string",
- "description": "Whether and how garbage collection will be performed.\nEither this field or OrphanDependents may be set, but not both.\nThe default policy is decided by the existing finalizer set in the\nmetadata.finalizers and the resource-specific default policy.\nAcceptable values are: 'Orphan' - orphan the dependents; 'Background' -\nallow the garbage collector to delete the dependents in the background;\n'Foreground' - a cascading policy that deletes all dependents in the\nforeground.\n+optional.",
- "name": "deleteOptions.propagationPolicy",
- "in": "query"
- },
- {
- "type": "array",
- "items": {
- "type": "string"
- },
- "collectionFormat": "multi",
- "description": "When present, indicates that modifications should not be\npersisted. An invalid or unrecognized dryRun directive will\nresult in an error response and no further processing of the\nrequest. Valid values are:\n- All: all dry run stages will be processed\n+optional.",
- "name": "deleteOptions.dryRun",
- "in": "query"
- }
- ],
- "responses": {
- "200": {
- "description": "A successful response.",
- "schema": {
- "$ref": "#/definitions/pipeline.DeletePipelineResponse"
- }
- },
- "default": {
- "description": "An unexpected error response.",
- "schema": {
- "$ref": "#/definitions/grpc.gateway.runtime.Error"
- }
- }
- }
- }
- },
- "/api/v1/pipelines/{namespace}/{name}/restart": {
- "post": {
- "tags": [
- "PipelineService"
- ],
- "operationId": "PipelineService_RestartPipeline",
- "parameters": [
- {
- "type": "string",
- "name": "namespace",
- "in": "path",
- "required": true
- },
- {
- "type": "string",
- "name": "name",
- "in": "path",
- "required": true
- }
- ],
- "responses": {
- "200": {
- "description": "A successful response.",
- "schema": {
- "$ref": "#/definitions/pipeline.RestartPipelineResponse"
- }
- },
- "default": {
- "description": "An unexpected error response.",
- "schema": {
- "$ref": "#/definitions/grpc.gateway.runtime.Error"
- }
- }
- }
- }
- },
"/api/v1/sensors/{namespace}": {
"get": {
"tags": [
@@ -2212,12 +1973,12 @@
}
}
},
- "/api/v1/stream/pipelines/{namespace}": {
+ "/api/v1/stream/sensors/{namespace}": {
"get": {
"tags": [
- "PipelineService"
+ "SensorService"
],
- "operationId": "PipelineService_WatchPipelines",
+ "operationId": "SensorService_WatchSensors",
"parameters": [
{
"type": "string",
@@ -2287,13 +2048,13 @@
"description": "A successful response.(streaming responses)",
"schema": {
"type": "object",
- "title": "Stream result of pipeline.PipelineWatchEvent",
+ "title": "Stream result of sensor.SensorWatchEvent",
"properties": {
"error": {
"$ref": "#/definitions/grpc.gateway.runtime.StreamError"
},
"result": {
- "$ref": "#/definitions/pipeline.PipelineWatchEvent"
+ "$ref": "#/definitions/sensor.SensorWatchEvent"
}
}
}
@@ -2307,12 +2068,12 @@
}
}
},
- "/api/v1/stream/pipelines/{namespace}/logs": {
+ "/api/v1/stream/sensors/{namespace}/logs": {
"get": {
"tags": [
- "PipelineService"
+ "SensorService"
],
- "operationId": "PipelineService_PipelineLogs",
+ "operationId": "SensorService_SensorsLogs",
"parameters": [
{
"type": "string",
@@ -2322,19 +2083,19 @@
},
{
"type": "string",
- "description": "optional - only return entries for this pipeline.",
+ "description": "optional - only return entries for this sensor name.",
"name": "name",
"in": "query"
},
{
"type": "string",
- "description": "optional - only return entries for this step.",
- "name": "stepName",
+ "description": "optional - only return entries for this trigger.",
+ "name": "triggerName",
"in": "query"
},
{
"type": "string",
- "description": "optional - only return entries which match this expresssion.",
+ "description": "option - only return entries where `msg` contains this regular expressions.",
"name": "grep",
"in": "query"
},
@@ -2409,13 +2170,13 @@
"description": "A successful response.(streaming responses)",
"schema": {
"type": "object",
- "title": "Stream result of pipeline.LogEntry",
+ "title": "Stream result of sensor.LogEntry",
"properties": {
"error": {
"$ref": "#/definitions/grpc.gateway.runtime.StreamError"
},
"result": {
- "$ref": "#/definitions/pipeline.LogEntry"
+ "$ref": "#/definitions/sensor.LogEntry"
}
}
}
@@ -2429,27 +2190,103 @@
}
}
},
- "/api/v1/stream/sensors/{namespace}": {
- "get": {
+ "/api/v1/tracking/event": {
+ "post": {
"tags": [
- "SensorService"
+ "InfoService"
],
- "operationId": "SensorService_WatchSensors",
+ "operationId": "InfoService_CollectEvent",
"parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.CollectEventRequest"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "A successful response.",
+ "schema": {
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.CollectEventResponse"
+ }
+ },
+ "default": {
+ "description": "An unexpected error response.",
+ "schema": {
+ "$ref": "#/definitions/grpc.gateway.runtime.Error"
+ }
+ }
+ }
+ }
+ },
+ "/api/v1/userinfo": {
+ "get": {
+ "tags": [
+ "InfoService"
+ ],
+ "operationId": "InfoService_GetUserInfo",
+ "responses": {
+ "200": {
+ "description": "A successful response.",
+ "schema": {
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.GetUserInfoResponse"
+ }
+ },
+ "default": {
+ "description": "An unexpected error response.",
+ "schema": {
+ "$ref": "#/definitions/grpc.gateway.runtime.Error"
+ }
+ }
+ }
+ }
+ },
+ "/api/v1/version": {
+ "get": {
+ "tags": [
+ "InfoService"
+ ],
+ "operationId": "InfoService_GetVersion",
+ "responses": {
+ "200": {
+ "description": "A successful response.",
+ "schema": {
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.Version"
+ }
+ },
+ "default": {
+ "description": "An unexpected error response.",
+ "schema": {
+ "$ref": "#/definitions/grpc.gateway.runtime.Error"
+ }
+ }
+ }
+ }
+ },
+ "/api/v1/workflow-event-bindings/{namespace}": {
+ "get": {
+ "tags": [
+ "EventService"
+ ],
+ "operationId": "EventService_ListWorkflowEventBindings",
+ "parameters": [
+ {
+ "type": "string",
+ "name": "namespace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their labels.\nDefaults to everything.\n+optional.",
+ "name": "listOptions.labelSelector",
+ "in": "query"
+ },
{
"type": "string",
- "name": "namespace",
- "in": "path",
- "required": true
- },
- {
- "type": "string",
- "description": "A selector to restrict the list of returned objects by their labels.\nDefaults to everything.\n+optional.",
- "name": "listOptions.labelSelector",
- "in": "query"
- },
- {
- "type": "string",
"description": "A selector to restrict the list of returned objects by their fields.\nDefaults to everything.\n+optional.",
"name": "listOptions.fieldSelector",
"in": "query"
@@ -2501,18 +2338,9 @@
],
"responses": {
"200": {
- "description": "A successful response.(streaming responses)",
+ "description": "A successful response.",
"schema": {
- "type": "object",
- "title": "Stream result of sensor.SensorWatchEvent",
- "properties": {
- "error": {
- "$ref": "#/definitions/grpc.gateway.runtime.StreamError"
- },
- "result": {
- "$ref": "#/definitions/sensor.SensorWatchEvent"
- }
- }
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowEventBindingList"
}
},
"default": {
@@ -2524,12 +2352,12 @@
}
}
},
- "/api/v1/stream/sensors/{namespace}/logs": {
+ "/api/v1/workflow-events/{namespace}": {
"get": {
"tags": [
- "SensorService"
+ "WorkflowService"
],
- "operationId": "SensorService_SensorsLogs",
+ "operationId": "WorkflowService_WatchWorkflows",
"parameters": [
{
"type": "string",
@@ -2539,85 +2367,63 @@
},
{
"type": "string",
- "description": "optional - only return entries for this sensor name.",
- "name": "name",
- "in": "query"
- },
- {
- "type": "string",
- "description": "optional - only return entries for this trigger.",
- "name": "triggerName",
- "in": "query"
- },
- {
- "type": "string",
- "description": "option - only return entries where `msg` contains this regular expressions.",
- "name": "grep",
+ "description": "A selector to restrict the list of returned objects by their labels.\nDefaults to everything.\n+optional.",
+ "name": "listOptions.labelSelector",
"in": "query"
},
{
"type": "string",
- "description": "The container for which to stream logs. Defaults to only container if there is one container in the pod.\n+optional.",
- "name": "podLogOptions.container",
+ "description": "A selector to restrict the list of returned objects by their fields.\nDefaults to everything.\n+optional.",
+ "name": "listOptions.fieldSelector",
"in": "query"
},
{
"type": "boolean",
- "description": "Follow the log stream of the pod. Defaults to false.\n+optional.",
- "name": "podLogOptions.follow",
+ "description": "Watch for changes to the described resources and return them as a stream of\nadd, update, and remove notifications. Specify resourceVersion.\n+optional.",
+ "name": "listOptions.watch",
"in": "query"
},
{
"type": "boolean",
- "description": "Return previous terminated container logs. Defaults to false.\n+optional.",
- "name": "podLogOptions.previous",
+ "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\".\nServers that do not implement bookmarks may ignore this flag and\nbookmarks are sent at the server's discretion. Clients should not\nassume bookmarks are returned at any specific interval, nor may they\nassume the server will send any BOOKMARK event during a session.\nIf this is not a watch, this field is ignored.\n+optional.",
+ "name": "listOptions.allowWatchBookmarks",
"in": "query"
},
{
"type": "string",
- "format": "int64",
- "description": "A relative time in seconds before the current time from which to show logs. If this value\nprecedes the time a pod was started, only logs since the pod start will be returned.\nIf this value is in the future, no logs will be returned.\nOnly one of sinceSeconds or sinceTime may be specified.\n+optional.",
- "name": "podLogOptions.sinceSeconds",
+ "description": "resourceVersion sets a constraint on what resource versions a request may be served from.\nSee https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for\ndetails.\n\nDefaults to unset\n+optional",
+ "name": "listOptions.resourceVersion",
"in": "query"
},
{
"type": "string",
- "format": "int64",
- "description": "Represents seconds of UTC time since Unix epoch\n1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to\n9999-12-31T23:59:59Z inclusive.",
- "name": "podLogOptions.sinceTime.seconds",
- "in": "query"
- },
- {
- "type": "integer",
- "format": "int32",
- "description": "Non-negative fractions of a second at nanosecond resolution. Negative\nsecond values with fractions must still have non-negative nanos values\nthat count forward in time. Must be from 0 to 999,999,999\ninclusive. This field may be limited in precision depending on context.",
- "name": "podLogOptions.sinceTime.nanos",
+ "description": "resourceVersionMatch determines how resourceVersion is applied to list calls.\nIt is highly recommended that resourceVersionMatch be set for list calls where\nresourceVersion is set\nSee https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for\ndetails.\n\nDefaults to unset\n+optional",
+ "name": "listOptions.resourceVersionMatch",
"in": "query"
},
{
- "type": "boolean",
- "description": "If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line\nof log output. Defaults to false.\n+optional.",
- "name": "podLogOptions.timestamps",
+ "type": "string",
+ "format": "int64",
+ "description": "Timeout for the list/watch call.\nThis limits the duration of the call, regardless of any activity or inactivity.\n+optional.",
+ "name": "listOptions.timeoutSeconds",
"in": "query"
},
{
"type": "string",
"format": "int64",
- "description": "If set, the number of lines from the end of the logs to show. If not specified,\nlogs are shown from the creation of the container or sinceSeconds or sinceTime\n+optional.",
- "name": "podLogOptions.tailLines",
+ "description": "limit is a maximum number of responses to return for a list call. If more items exist, the\nserver will set the `continue` field on the list metadata to a value that can be used with the\nsame initial query to retrieve the next set of results. Setting a limit may return fewer than\nthe requested amount of items (up to zero items) in the event all requested objects are\nfiltered out and clients should only use the presence of the continue field to determine whether\nmore results are available. Servers may choose not to support the limit argument and will return\nall of the available results. If limit is specified and the continue field is empty, clients may\nassume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing\na single list call without a limit - that is, no objects created, modified, or deleted after the\nfirst request is issued will be included in any subsequent continued requests. This is sometimes\nreferred to as a consistent snapshot, and ensures that a client that is using limit to receive\nsmaller chunks of a very large result can ensure they see all possible objects. If objects are\nupdated during a chunked list the version of the object that was present at the time the first list\nresult was calculated is returned.",
+ "name": "listOptions.limit",
"in": "query"
},
{
"type": "string",
- "format": "int64",
- "description": "If set, the number of bytes to read from the server before terminating the\nlog output. This may not display a complete final line of logging, and may return\nslightly more or slightly less than the specified limit.\n+optional.",
- "name": "podLogOptions.limitBytes",
+ "description": "The continue option should be set when retrieving more results from the server. Since this value is\nserver defined, clients may only use the continue value from a previous query result with identical\nquery parameters (except for the value of continue) and the server may reject a continue value it\ndoes not recognize. If the specified continue value is no longer valid whether due to expiration\n(generally five to fifteen minutes) or a configuration change on the server, the server will\nrespond with a 410 ResourceExpired error together with a continue token. If the client needs a\nconsistent list, it must restart their list without the continue field. Otherwise, the client may\nsend another list request with the token received with the 410 error, the server will respond with\na list starting from the next key, but from the latest snapshot, which is inconsistent from the\nprevious list results - objects that are created, modified, or deleted after the first list request\nwill be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last\nresourceVersion value returned by the server and not miss any modifications.",
+ "name": "listOptions.continue",
"in": "query"
},
{
- "type": "boolean",
- "description": "insecureSkipTLSVerifyBackend indicates that the apiserver should not confirm the validity of the\nserving certificate of the backend it is connecting to. This will make the HTTPS connection between the apiserver\nand the backend insecure. This means the apiserver cannot verify the log data it is receiving came from the real\nkubelet. If the kubelet is configured to verify the apiserver's TLS credentials, it does not mean the\nconnection to the real kubelet is vulnerable to a man in the middle attack (e.g. an attacker could not intercept\nthe actual log data coming from the real kubelet).\n+optional.",
- "name": "podLogOptions.insecureSkipTLSVerifyBackend",
+ "type": "string",
+ "name": "fields",
"in": "query"
}
],
@@ -2626,13 +2432,13 @@
"description": "A successful response.(streaming responses)",
"schema": {
"type": "object",
- "title": "Stream result of sensor.LogEntry",
+ "title": "Stream result of io.argoproj.workflow.v1alpha1.WorkflowWatchEvent",
"properties": {
"error": {
"$ref": "#/definitions/grpc.gateway.runtime.StreamError"
},
"result": {
- "$ref": "#/definitions/sensor.LogEntry"
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowWatchEvent"
}
}
}
@@ -2646,12 +2452,12 @@
}
}
},
- "/api/v1/stream/steps/{namespace}": {
+ "/api/v1/workflow-templates/{namespace}": {
"get": {
"tags": [
- "PipelineService"
+ "WorkflowTemplateService"
],
- "operationId": "PipelineService_WatchSteps",
+ "operationId": "WorkflowTemplateService_ListWorkflowTemplates",
"parameters": [
{
"type": "string",
@@ -2718,18 +2524,9 @@
],
"responses": {
"200": {
- "description": "A successful response.(streaming responses)",
+ "description": "A successful response.",
"schema": {
- "type": "object",
- "title": "Stream result of pipeline.StepWatchEvent",
- "properties": {
- "error": {
- "$ref": "#/definitions/grpc.gateway.runtime.StreamError"
- },
- "result": {
- "$ref": "#/definitions/pipeline.StepWatchEvent"
- }
- }
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowTemplateList"
}
},
"default": {
@@ -2739,19 +2536,33 @@
}
}
}
- }
- },
- "/api/v1/userinfo": {
- "get": {
+ },
+ "post": {
"tags": [
- "InfoService"
+ "WorkflowTemplateService"
+ ],
+ "operationId": "WorkflowTemplateService_CreateWorkflowTemplate",
+ "parameters": [
+ {
+ "type": "string",
+ "name": "namespace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowTemplateCreateRequest"
+ }
+ }
],
- "operationId": "InfoService_GetUserInfo",
"responses": {
"200": {
"description": "A successful response.",
"schema": {
- "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.GetUserInfoResponse"
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowTemplate"
}
},
"default": {
@@ -2763,17 +2574,33 @@
}
}
},
- "/api/v1/version": {
- "get": {
+ "/api/v1/workflow-templates/{namespace}/lint": {
+ "post": {
"tags": [
- "InfoService"
+ "WorkflowTemplateService"
+ ],
+ "operationId": "WorkflowTemplateService_LintWorkflowTemplate",
+ "parameters": [
+ {
+ "type": "string",
+ "name": "namespace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowTemplateLintRequest"
+ }
+ }
],
- "operationId": "InfoService_GetVersion",
"responses": {
"200": {
"description": "A successful response.",
"schema": {
- "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.Version"
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowTemplate"
}
},
"default": {
@@ -2785,12 +2612,12 @@
}
}
},
- "/api/v1/workflow-event-bindings/{namespace}": {
+ "/api/v1/workflow-templates/{namespace}/{name}": {
"get": {
"tags": [
- "EventService"
+ "WorkflowTemplateService"
],
- "operationId": "EventService_ListWorkflowEventBindings",
+ "operationId": "WorkflowTemplateService_GetWorkflowTemplate",
"parameters": [
{
"type": "string",
@@ -2800,66 +2627,65 @@
},
{
"type": "string",
- "description": "A selector to restrict the list of returned objects by their labels.\nDefaults to everything.\n+optional.",
- "name": "listOptions.labelSelector",
- "in": "query"
- },
- {
- "type": "string",
- "description": "A selector to restrict the list of returned objects by their fields.\nDefaults to everything.\n+optional.",
- "name": "listOptions.fieldSelector",
- "in": "query"
- },
- {
- "type": "boolean",
- "description": "Watch for changes to the described resources and return them as a stream of\nadd, update, and remove notifications. Specify resourceVersion.\n+optional.",
- "name": "listOptions.watch",
- "in": "query"
- },
- {
- "type": "boolean",
- "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\".\nServers that do not implement bookmarks may ignore this flag and\nbookmarks are sent at the server's discretion. Clients should not\nassume bookmarks are returned at any specific interval, nor may they\nassume the server will send any BOOKMARK event during a session.\nIf this is not a watch, this field is ignored.\n+optional.",
- "name": "listOptions.allowWatchBookmarks",
- "in": "query"
+ "name": "name",
+ "in": "path",
+ "required": true
},
{
"type": "string",
"description": "resourceVersion sets a constraint on what resource versions a request may be served from.\nSee https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for\ndetails.\n\nDefaults to unset\n+optional",
- "name": "listOptions.resourceVersion",
- "in": "query"
- },
- {
- "type": "string",
- "description": "resourceVersionMatch determines how resourceVersion is applied to list calls.\nIt is highly recommended that resourceVersionMatch be set for list calls where\nresourceVersion is set\nSee https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for\ndetails.\n\nDefaults to unset\n+optional",
- "name": "listOptions.resourceVersionMatch",
+ "name": "getOptions.resourceVersion",
"in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "A successful response.",
+ "schema": {
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowTemplate"
+ }
},
+ "default": {
+ "description": "An unexpected error response.",
+ "schema": {
+ "$ref": "#/definitions/grpc.gateway.runtime.Error"
+ }
+ }
+ }
+ },
+ "put": {
+ "tags": [
+ "WorkflowTemplateService"
+ ],
+ "operationId": "WorkflowTemplateService_UpdateWorkflowTemplate",
+ "parameters": [
{
"type": "string",
- "format": "int64",
- "description": "Timeout for the list/watch call.\nThis limits the duration of the call, regardless of any activity or inactivity.\n+optional.",
- "name": "listOptions.timeoutSeconds",
- "in": "query"
+ "name": "namespace",
+ "in": "path",
+ "required": true
},
{
"type": "string",
- "format": "int64",
- "description": "limit is a maximum number of responses to return for a list call. If more items exist, the\nserver will set the `continue` field on the list metadata to a value that can be used with the\nsame initial query to retrieve the next set of results. Setting a limit may return fewer than\nthe requested amount of items (up to zero items) in the event all requested objects are\nfiltered out and clients should only use the presence of the continue field to determine whether\nmore results are available. Servers may choose not to support the limit argument and will return\nall of the available results. If limit is specified and the continue field is empty, clients may\nassume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing\na single list call without a limit - that is, no objects created, modified, or deleted after the\nfirst request is issued will be included in any subsequent continued requests. This is sometimes\nreferred to as a consistent snapshot, and ensures that a client that is using limit to receive\nsmaller chunks of a very large result can ensure they see all possible objects. If objects are\nupdated during a chunked list the version of the object that was present at the time the first list\nresult was calculated is returned.",
- "name": "listOptions.limit",
- "in": "query"
+ "description": "DEPRECATED: This field is ignored.",
+ "name": "name",
+ "in": "path",
+ "required": true
},
{
- "type": "string",
- "description": "The continue option should be set when retrieving more results from the server. Since this value is\nserver defined, clients may only use the continue value from a previous query result with identical\nquery parameters (except for the value of continue) and the server may reject a continue value it\ndoes not recognize. If the specified continue value is no longer valid whether due to expiration\n(generally five to fifteen minutes) or a configuration change on the server, the server will\nrespond with a 410 ResourceExpired error together with a continue token. If the client needs a\nconsistent list, it must restart their list without the continue field. Otherwise, the client may\nsend another list request with the token received with the 410 error, the server will respond with\na list starting from the next key, but from the latest snapshot, which is inconsistent from the\nprevious list results - objects that are created, modified, or deleted after the first list request\nwill be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last\nresourceVersion value returned by the server and not miss any modifications.",
- "name": "listOptions.continue",
- "in": "query"
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowTemplateUpdateRequest"
+ }
}
],
"responses": {
"200": {
"description": "A successful response.",
"schema": {
- "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowEventBindingList"
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowTemplate"
}
},
"default": {
@@ -2869,14 +2695,12 @@
}
}
}
- }
- },
- "/api/v1/workflow-events/{namespace}": {
- "get": {
+ },
+ "delete": {
"tags": [
- "WorkflowService"
+ "WorkflowTemplateService"
],
- "operationId": "WorkflowService_WatchWorkflows",
+ "operationId": "WorkflowTemplateService_DeleteWorkflowTemplate",
"parameters": [
{
"type": "string",
@@ -2886,80 +2710,57 @@
},
{
"type": "string",
- "description": "A selector to restrict the list of returned objects by their labels.\nDefaults to everything.\n+optional.",
- "name": "listOptions.labelSelector",
- "in": "query"
- },
- {
- "type": "string",
- "description": "A selector to restrict the list of returned objects by their fields.\nDefaults to everything.\n+optional.",
- "name": "listOptions.fieldSelector",
- "in": "query"
- },
- {
- "type": "boolean",
- "description": "Watch for changes to the described resources and return them as a stream of\nadd, update, and remove notifications. Specify resourceVersion.\n+optional.",
- "name": "listOptions.watch",
- "in": "query"
- },
- {
- "type": "boolean",
- "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\".\nServers that do not implement bookmarks may ignore this flag and\nbookmarks are sent at the server's discretion. Clients should not\nassume bookmarks are returned at any specific interval, nor may they\nassume the server will send any BOOKMARK event during a session.\nIf this is not a watch, this field is ignored.\n+optional.",
- "name": "listOptions.allowWatchBookmarks",
- "in": "query"
+ "name": "name",
+ "in": "path",
+ "required": true
},
{
"type": "string",
- "description": "resourceVersion sets a constraint on what resource versions a request may be served from.\nSee https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for\ndetails.\n\nDefaults to unset\n+optional",
- "name": "listOptions.resourceVersion",
+ "format": "int64",
+ "description": "The duration in seconds before the object should be deleted. Value must be non-negative integer.\nThe value zero indicates delete immediately. If this value is nil, the default grace period for the\nspecified type will be used.\nDefaults to a per object value if not specified. zero means delete immediately.\n+optional.",
+ "name": "deleteOptions.gracePeriodSeconds",
"in": "query"
},
{
"type": "string",
- "description": "resourceVersionMatch determines how resourceVersion is applied to list calls.\nIt is highly recommended that resourceVersionMatch be set for list calls where\nresourceVersion is set\nSee https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for\ndetails.\n\nDefaults to unset\n+optional",
- "name": "listOptions.resourceVersionMatch",
+ "description": "Specifies the target UID.\n+optional.",
+ "name": "deleteOptions.preconditions.uid",
"in": "query"
},
{
"type": "string",
- "format": "int64",
- "description": "Timeout for the list/watch call.\nThis limits the duration of the call, regardless of any activity or inactivity.\n+optional.",
- "name": "listOptions.timeoutSeconds",
+ "description": "Specifies the target ResourceVersion\n+optional.",
+ "name": "deleteOptions.preconditions.resourceVersion",
"in": "query"
},
{
- "type": "string",
- "format": "int64",
- "description": "limit is a maximum number of responses to return for a list call. If more items exist, the\nserver will set the `continue` field on the list metadata to a value that can be used with the\nsame initial query to retrieve the next set of results. Setting a limit may return fewer than\nthe requested amount of items (up to zero items) in the event all requested objects are\nfiltered out and clients should only use the presence of the continue field to determine whether\nmore results are available. Servers may choose not to support the limit argument and will return\nall of the available results. If limit is specified and the continue field is empty, clients may\nassume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing\na single list call without a limit - that is, no objects created, modified, or deleted after the\nfirst request is issued will be included in any subsequent continued requests. This is sometimes\nreferred to as a consistent snapshot, and ensures that a client that is using limit to receive\nsmaller chunks of a very large result can ensure they see all possible objects. If objects are\nupdated during a chunked list the version of the object that was present at the time the first list\nresult was calculated is returned.",
- "name": "listOptions.limit",
+ "type": "boolean",
+ "description": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7.\nShould the dependent objects be orphaned. If true/false, the \"orphan\"\nfinalizer will be added to/removed from the object's finalizers list.\nEither this field or PropagationPolicy may be set, but not both.\n+optional.",
+ "name": "deleteOptions.orphanDependents",
"in": "query"
},
{
"type": "string",
- "description": "The continue option should be set when retrieving more results from the server. Since this value is\nserver defined, clients may only use the continue value from a previous query result with identical\nquery parameters (except for the value of continue) and the server may reject a continue value it\ndoes not recognize. If the specified continue value is no longer valid whether due to expiration\n(generally five to fifteen minutes) or a configuration change on the server, the server will\nrespond with a 410 ResourceExpired error together with a continue token. If the client needs a\nconsistent list, it must restart their list without the continue field. Otherwise, the client may\nsend another list request with the token received with the 410 error, the server will respond with\na list starting from the next key, but from the latest snapshot, which is inconsistent from the\nprevious list results - objects that are created, modified, or deleted after the first list request\nwill be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last\nresourceVersion value returned by the server and not miss any modifications.",
- "name": "listOptions.continue",
+ "description": "Whether and how garbage collection will be performed.\nEither this field or OrphanDependents may be set, but not both.\nThe default policy is decided by the existing finalizer set in the\nmetadata.finalizers and the resource-specific default policy.\nAcceptable values are: 'Orphan' - orphan the dependents; 'Background' -\nallow the garbage collector to delete the dependents in the background;\n'Foreground' - a cascading policy that deletes all dependents in the\nforeground.\n+optional.",
+ "name": "deleteOptions.propagationPolicy",
"in": "query"
},
{
- "type": "string",
- "name": "fields",
+ "type": "array",
+ "items": {
+ "type": "string"
+ },
+ "collectionFormat": "multi",
+ "description": "When present, indicates that modifications should not be\npersisted. An invalid or unrecognized dryRun directive will\nresult in an error response and no further processing of the\nrequest. Valid values are:\n- All: all dry run stages will be processed\n+optional.",
+ "name": "deleteOptions.dryRun",
"in": "query"
}
],
"responses": {
"200": {
- "description": "A successful response.(streaming responses)",
+ "description": "A successful response.",
"schema": {
- "type": "object",
- "title": "Stream result of io.argoproj.workflow.v1alpha1.WorkflowWatchEvent",
- "properties": {
- "error": {
- "$ref": "#/definitions/grpc.gateway.runtime.StreamError"
- },
- "result": {
- "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowWatchEvent"
- }
- }
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowTemplateDeleteResponse"
}
},
"default": {
@@ -2971,12 +2772,12 @@
}
}
},
- "/api/v1/workflow-templates/{namespace}": {
+ "/api/v1/workflows/{namespace}": {
"get": {
"tags": [
- "WorkflowTemplateService"
+ "WorkflowService"
],
- "operationId": "WorkflowTemplateService_ListWorkflowTemplates",
+ "operationId": "WorkflowService_ListWorkflows",
"parameters": [
{
"type": "string",
@@ -3039,13 +2840,19 @@
"description": "The continue option should be set when retrieving more results from the server. Since this value is\nserver defined, clients may only use the continue value from a previous query result with identical\nquery parameters (except for the value of continue) and the server may reject a continue value it\ndoes not recognize. If the specified continue value is no longer valid whether due to expiration\n(generally five to fifteen minutes) or a configuration change on the server, the server will\nrespond with a 410 ResourceExpired error together with a continue token. If the client needs a\nconsistent list, it must restart their list without the continue field. Otherwise, the client may\nsend another list request with the token received with the 410 error, the server will respond with\na list starting from the next key, but from the latest snapshot, which is inconsistent from the\nprevious list results - objects that are created, modified, or deleted after the first list request\nwill be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last\nresourceVersion value returned by the server and not miss any modifications.",
"name": "listOptions.continue",
"in": "query"
+ },
+ {
+ "type": "string",
+ "description": "Fields to be included or excluded in the response. e.g. \"items.spec,items.status.phase\", \"-items.status.nodes\".",
+ "name": "fields",
+ "in": "query"
}
],
"responses": {
"200": {
"description": "A successful response.",
"schema": {
- "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowTemplateList"
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowList"
}
},
"default": {
@@ -3058,9 +2865,9 @@
},
"post": {
"tags": [
- "WorkflowTemplateService"
+ "WorkflowService"
],
- "operationId": "WorkflowTemplateService_CreateWorkflowTemplate",
+ "operationId": "WorkflowService_CreateWorkflow",
"parameters": [
{
"type": "string",
@@ -3073,7 +2880,7 @@
"in": "body",
"required": true,
"schema": {
- "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowTemplateCreateRequest"
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowCreateRequest"
}
}
],
@@ -3081,7 +2888,7 @@
"200": {
"description": "A successful response.",
"schema": {
- "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowTemplate"
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.Workflow"
}
},
"default": {
@@ -3093,12 +2900,12 @@
}
}
},
- "/api/v1/workflow-templates/{namespace}/lint": {
+ "/api/v1/workflows/{namespace}/lint": {
"post": {
"tags": [
- "WorkflowTemplateService"
+ "WorkflowService"
],
- "operationId": "WorkflowTemplateService_LintWorkflowTemplate",
+ "operationId": "WorkflowService_LintWorkflow",
"parameters": [
{
"type": "string",
@@ -3111,7 +2918,7 @@
"in": "body",
"required": true,
"schema": {
- "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowTemplateLintRequest"
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowLintRequest"
}
}
],
@@ -3119,7 +2926,7 @@
"200": {
"description": "A successful response.",
"schema": {
- "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowTemplate"
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.Workflow"
}
},
"default": {
@@ -3131,12 +2938,12 @@
}
}
},
- "/api/v1/workflow-templates/{namespace}/{name}": {
- "get": {
+ "/api/v1/workflows/{namespace}/submit": {
+ "post": {
"tags": [
- "WorkflowTemplateService"
+ "WorkflowService"
],
- "operationId": "WorkflowTemplateService_GetWorkflowTemplate",
+ "operationId": "WorkflowService_SubmitWorkflow",
"parameters": [
{
"type": "string",
@@ -3145,23 +2952,19 @@
"required": true
},
{
- "type": "string",
- "name": "name",
- "in": "path",
- "required": true
- },
- {
- "type": "string",
- "description": "resourceVersion sets a constraint on what resource versions a request may be served from.\nSee https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for\ndetails.\n\nDefaults to unset\n+optional",
- "name": "getOptions.resourceVersion",
- "in": "query"
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowSubmitRequest"
+ }
}
],
"responses": {
"200": {
"description": "A successful response.",
"schema": {
- "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowTemplate"
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.Workflow"
}
},
"default": {
@@ -3171,12 +2974,14 @@
}
}
}
- },
- "put": {
+ }
+ },
+ "/api/v1/workflows/{namespace}/{name}": {
+ "get": {
"tags": [
- "WorkflowTemplateService"
+ "WorkflowService"
],
- "operationId": "WorkflowTemplateService_UpdateWorkflowTemplate",
+ "operationId": "WorkflowService_GetWorkflow",
"parameters": [
{
"type": "string",
@@ -3186,25 +2991,28 @@
},
{
"type": "string",
- "description": "DEPRECATED: This field is ignored.",
"name": "name",
"in": "path",
"required": true
},
{
- "name": "body",
- "in": "body",
- "required": true,
- "schema": {
- "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowTemplateUpdateRequest"
- }
+ "type": "string",
+ "description": "resourceVersion sets a constraint on what resource versions a request may be served from.\nSee https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for\ndetails.\n\nDefaults to unset\n+optional",
+ "name": "getOptions.resourceVersion",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "Fields to be included or excluded in the response. e.g. \"spec,status.phase\", \"-status.nodes\".",
+ "name": "fields",
+ "in": "query"
}
],
"responses": {
"200": {
"description": "A successful response.",
"schema": {
- "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowTemplate"
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.Workflow"
}
},
"default": {
@@ -3217,9 +3025,9 @@
},
"delete": {
"tags": [
- "WorkflowTemplateService"
+ "WorkflowService"
],
- "operationId": "WorkflowTemplateService_DeleteWorkflowTemplate",
+ "operationId": "WorkflowService_DeleteWorkflow",
"parameters": [
{
"type": "string",
@@ -3273,13 +3081,18 @@
"description": "When present, indicates that modifications should not be\npersisted. An invalid or unrecognized dryRun directive will\nresult in an error response and no further processing of the\nrequest. Valid values are:\n- All: all dry run stages will be processed\n+optional.",
"name": "deleteOptions.dryRun",
"in": "query"
+ },
+ {
+ "type": "boolean",
+ "name": "force",
+ "in": "query"
}
],
"responses": {
"200": {
"description": "A successful response.",
"schema": {
- "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowTemplateDeleteResponse"
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowDeleteResponse"
}
},
"default": {
@@ -3291,12 +3104,12 @@
}
}
},
- "/api/v1/workflows/{namespace}": {
+ "/api/v1/workflows/{namespace}/{name}/log": {
"get": {
"tags": [
"WorkflowService"
],
- "operationId": "WorkflowService_ListWorkflows",
+ "operationId": "WorkflowService_WorkflowLogs",
"parameters": [
{
"type": "string",
@@ -3306,72 +3119,105 @@
},
{
"type": "string",
- "description": "A selector to restrict the list of returned objects by their labels.\nDefaults to everything.\n+optional.",
- "name": "listOptions.labelSelector",
+ "name": "name",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "name": "podName",
"in": "query"
},
{
"type": "string",
- "description": "A selector to restrict the list of returned objects by their fields.\nDefaults to everything.\n+optional.",
- "name": "listOptions.fieldSelector",
+ "description": "The container for which to stream logs. Defaults to only container if there is one container in the pod.\n+optional.",
+ "name": "logOptions.container",
"in": "query"
},
{
"type": "boolean",
- "description": "Watch for changes to the described resources and return them as a stream of\nadd, update, and remove notifications. Specify resourceVersion.\n+optional.",
- "name": "listOptions.watch",
+ "description": "Follow the log stream of the pod. Defaults to false.\n+optional.",
+ "name": "logOptions.follow",
"in": "query"
},
{
"type": "boolean",
- "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\".\nServers that do not implement bookmarks may ignore this flag and\nbookmarks are sent at the server's discretion. Clients should not\nassume bookmarks are returned at any specific interval, nor may they\nassume the server will send any BOOKMARK event during a session.\nIf this is not a watch, this field is ignored.\n+optional.",
- "name": "listOptions.allowWatchBookmarks",
+ "description": "Return previous terminated container logs. Defaults to false.\n+optional.",
+ "name": "logOptions.previous",
"in": "query"
},
{
"type": "string",
- "description": "resourceVersion sets a constraint on what resource versions a request may be served from.\nSee https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for\ndetails.\n\nDefaults to unset\n+optional",
- "name": "listOptions.resourceVersion",
+ "format": "int64",
+ "description": "A relative time in seconds before the current time from which to show logs. If this value\nprecedes the time a pod was started, only logs since the pod start will be returned.\nIf this value is in the future, no logs will be returned.\nOnly one of sinceSeconds or sinceTime may be specified.\n+optional.",
+ "name": "logOptions.sinceSeconds",
"in": "query"
},
{
"type": "string",
- "description": "resourceVersionMatch determines how resourceVersion is applied to list calls.\nIt is highly recommended that resourceVersionMatch be set for list calls where\nresourceVersion is set\nSee https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for\ndetails.\n\nDefaults to unset\n+optional",
- "name": "listOptions.resourceVersionMatch",
+ "format": "int64",
+ "description": "Represents seconds of UTC time since Unix epoch\n1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to\n9999-12-31T23:59:59Z inclusive.",
+ "name": "logOptions.sinceTime.seconds",
+ "in": "query"
+ },
+ {
+ "type": "integer",
+ "format": "int32",
+ "description": "Non-negative fractions of a second at nanosecond resolution. Negative\nsecond values with fractions must still have non-negative nanos values\nthat count forward in time. Must be from 0 to 999,999,999\ninclusive. This field may be limited in precision depending on context.",
+ "name": "logOptions.sinceTime.nanos",
+ "in": "query"
+ },
+ {
+ "type": "boolean",
+ "description": "If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line\nof log output. Defaults to false.\n+optional.",
+ "name": "logOptions.timestamps",
"in": "query"
},
{
"type": "string",
"format": "int64",
- "description": "Timeout for the list/watch call.\nThis limits the duration of the call, regardless of any activity or inactivity.\n+optional.",
- "name": "listOptions.timeoutSeconds",
+ "description": "If set, the number of lines from the end of the logs to show. If not specified,\nlogs are shown from the creation of the container or sinceSeconds or sinceTime\n+optional.",
+ "name": "logOptions.tailLines",
"in": "query"
},
{
"type": "string",
"format": "int64",
- "description": "limit is a maximum number of responses to return for a list call. If more items exist, the\nserver will set the `continue` field on the list metadata to a value that can be used with the\nsame initial query to retrieve the next set of results. Setting a limit may return fewer than\nthe requested amount of items (up to zero items) in the event all requested objects are\nfiltered out and clients should only use the presence of the continue field to determine whether\nmore results are available. Servers may choose not to support the limit argument and will return\nall of the available results. If limit is specified and the continue field is empty, clients may\nassume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing\na single list call without a limit - that is, no objects created, modified, or deleted after the\nfirst request is issued will be included in any subsequent continued requests. This is sometimes\nreferred to as a consistent snapshot, and ensures that a client that is using limit to receive\nsmaller chunks of a very large result can ensure they see all possible objects. If objects are\nupdated during a chunked list the version of the object that was present at the time the first list\nresult was calculated is returned.",
- "name": "listOptions.limit",
+ "description": "If set, the number of bytes to read from the server before terminating the\nlog output. This may not display a complete final line of logging, and may return\nslightly more or slightly less than the specified limit.\n+optional.",
+ "name": "logOptions.limitBytes",
+ "in": "query"
+ },
+ {
+ "type": "boolean",
+ "description": "insecureSkipTLSVerifyBackend indicates that the apiserver should not confirm the validity of the\nserving certificate of the backend it is connecting to. This will make the HTTPS connection between the apiserver\nand the backend insecure. This means the apiserver cannot verify the log data it is receiving came from the real\nkubelet. If the kubelet is configured to verify the apiserver's TLS credentials, it does not mean the\nconnection to the real kubelet is vulnerable to a man in the middle attack (e.g. an attacker could not intercept\nthe actual log data coming from the real kubelet).\n+optional.",
+ "name": "logOptions.insecureSkipTLSVerifyBackend",
"in": "query"
},
{
"type": "string",
- "description": "The continue option should be set when retrieving more results from the server. Since this value is\nserver defined, clients may only use the continue value from a previous query result with identical\nquery parameters (except for the value of continue) and the server may reject a continue value it\ndoes not recognize. If the specified continue value is no longer valid whether due to expiration\n(generally five to fifteen minutes) or a configuration change on the server, the server will\nrespond with a 410 ResourceExpired error together with a continue token. If the client needs a\nconsistent list, it must restart their list without the continue field. Otherwise, the client may\nsend another list request with the token received with the 410 error, the server will respond with\na list starting from the next key, but from the latest snapshot, which is inconsistent from the\nprevious list results - objects that are created, modified, or deleted after the first list request\nwill be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last\nresourceVersion value returned by the server and not miss any modifications.",
- "name": "listOptions.continue",
+ "name": "grep",
"in": "query"
},
{
"type": "string",
- "description": "Fields to be included or excluded in the response. e.g. \"items.spec,items.status.phase\", \"-items.status.nodes\".",
- "name": "fields",
+ "name": "selector",
"in": "query"
}
],
"responses": {
"200": {
- "description": "A successful response.",
+ "description": "A successful response.(streaming responses)",
"schema": {
- "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowList"
+ "type": "object",
+ "title": "Stream result of io.argoproj.workflow.v1alpha1.LogEntry",
+ "properties": {
+ "error": {
+ "$ref": "#/definitions/grpc.gateway.runtime.StreamError"
+ },
+ "result": {
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.LogEntry"
+ }
+ }
}
},
"default": {
@@ -3381,12 +3227,14 @@
}
}
}
- },
- "post": {
+ }
+ },
+ "/api/v1/workflows/{namespace}/{name}/resubmit": {
+ "put": {
"tags": [
"WorkflowService"
],
- "operationId": "WorkflowService_CreateWorkflow",
+ "operationId": "WorkflowService_ResubmitWorkflow",
"parameters": [
{
"type": "string",
@@ -3394,12 +3242,18 @@
"in": "path",
"required": true
},
+ {
+ "type": "string",
+ "name": "name",
+ "in": "path",
+ "required": true
+ },
{
"name": "body",
"in": "body",
"required": true,
"schema": {
- "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowCreateRequest"
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowResubmitRequest"
}
}
],
@@ -3419,12 +3273,12 @@
}
}
},
- "/api/v1/workflows/{namespace}/lint": {
- "post": {
+ "/api/v1/workflows/{namespace}/{name}/resume": {
+ "put": {
"tags": [
"WorkflowService"
],
- "operationId": "WorkflowService_LintWorkflow",
+ "operationId": "WorkflowService_ResumeWorkflow",
"parameters": [
{
"type": "string",
@@ -3432,12 +3286,18 @@
"in": "path",
"required": true
},
+ {
+ "type": "string",
+ "name": "name",
+ "in": "path",
+ "required": true
+ },
{
"name": "body",
"in": "body",
"required": true,
"schema": {
- "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowLintRequest"
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowResumeRequest"
}
}
],
@@ -3457,12 +3317,12 @@
}
}
},
- "/api/v1/workflows/{namespace}/submit": {
- "post": {
+ "/api/v1/workflows/{namespace}/{name}/retry": {
+ "put": {
"tags": [
"WorkflowService"
],
- "operationId": "WorkflowService_SubmitWorkflow",
+ "operationId": "WorkflowService_RetryWorkflow",
"parameters": [
{
"type": "string",
@@ -3470,12 +3330,18 @@
"in": "path",
"required": true
},
+ {
+ "type": "string",
+ "name": "name",
+ "in": "path",
+ "required": true
+ },
{
"name": "body",
"in": "body",
"required": true,
"schema": {
- "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowSubmitRequest"
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowRetryRequest"
}
}
],
@@ -3495,12 +3361,12 @@
}
}
},
- "/api/v1/workflows/{namespace}/{name}": {
- "get": {
+ "/api/v1/workflows/{namespace}/{name}/set": {
+ "put": {
"tags": [
"WorkflowService"
],
- "operationId": "WorkflowService_GetWorkflow",
+ "operationId": "WorkflowService_SetWorkflow",
"parameters": [
{
"type": "string",
@@ -3515,16 +3381,12 @@
"required": true
},
{
- "type": "string",
- "description": "resourceVersion sets a constraint on what resource versions a request may be served from.\nSee https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for\ndetails.\n\nDefaults to unset\n+optional",
- "name": "getOptions.resourceVersion",
- "in": "query"
- },
- {
- "type": "string",
- "description": "Fields to be included or excluded in the response. e.g. \"spec,status.phase\", \"-status.nodes\".",
- "name": "fields",
- "in": "query"
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowSetRequest"
+ }
}
],
"responses": {
@@ -3541,12 +3403,14 @@
}
}
}
- },
- "delete": {
+ }
+ },
+ "/api/v1/workflows/{namespace}/{name}/stop": {
+ "put": {
"tags": [
"WorkflowService"
],
- "operationId": "WorkflowService_DeleteWorkflow",
+ "operationId": "WorkflowService_StopWorkflow",
"parameters": [
{
"type": "string",
@@ -3561,52 +3425,63 @@
"required": true
},
{
- "type": "string",
- "format": "int64",
- "description": "The duration in seconds before the object should be deleted. Value must be non-negative integer.\nThe value zero indicates delete immediately. If this value is nil, the default grace period for the\nspecified type will be used.\nDefaults to a per object value if not specified. zero means delete immediately.\n+optional.",
- "name": "deleteOptions.gracePeriodSeconds",
- "in": "query"
- },
- {
- "type": "string",
- "description": "Specifies the target UID.\n+optional.",
- "name": "deleteOptions.preconditions.uid",
- "in": "query"
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowStopRequest"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "A successful response.",
+ "schema": {
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.Workflow"
+ }
},
+ "default": {
+ "description": "An unexpected error response.",
+ "schema": {
+ "$ref": "#/definitions/grpc.gateway.runtime.Error"
+ }
+ }
+ }
+ }
+ },
+ "/api/v1/workflows/{namespace}/{name}/suspend": {
+ "put": {
+ "tags": [
+ "WorkflowService"
+ ],
+ "operationId": "WorkflowService_SuspendWorkflow",
+ "parameters": [
{
"type": "string",
- "description": "Specifies the target ResourceVersion\n+optional.",
- "name": "deleteOptions.preconditions.resourceVersion",
- "in": "query"
- },
- {
- "type": "boolean",
- "description": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7.\nShould the dependent objects be orphaned. If true/false, the \"orphan\"\nfinalizer will be added to/removed from the object's finalizers list.\nEither this field or PropagationPolicy may be set, but not both.\n+optional.",
- "name": "deleteOptions.orphanDependents",
- "in": "query"
+ "name": "namespace",
+ "in": "path",
+ "required": true
},
{
"type": "string",
- "description": "Whether and how garbage collection will be performed.\nEither this field or OrphanDependents may be set, but not both.\nThe default policy is decided by the existing finalizer set in the\nmetadata.finalizers and the resource-specific default policy.\nAcceptable values are: 'Orphan' - orphan the dependents; 'Background' -\nallow the garbage collector to delete the dependents in the background;\n'Foreground' - a cascading policy that deletes all dependents in the\nforeground.\n+optional.",
- "name": "deleteOptions.propagationPolicy",
- "in": "query"
+ "name": "name",
+ "in": "path",
+ "required": true
},
{
- "type": "array",
- "items": {
- "type": "string"
- },
- "collectionFormat": "multi",
- "description": "When present, indicates that modifications should not be\npersisted. An invalid or unrecognized dryRun directive will\nresult in an error response and no further processing of the\nrequest. Valid values are:\n- All: all dry run stages will be processed\n+optional.",
- "name": "deleteOptions.dryRun",
- "in": "query"
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowSuspendRequest"
+ }
}
],
"responses": {
"200": {
"description": "A successful response.",
"schema": {
- "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowDeleteResponse"
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.Workflow"
}
},
"default": {
@@ -3618,12 +3493,12 @@
}
}
},
- "/api/v1/workflows/{namespace}/{name}/log": {
- "get": {
+ "/api/v1/workflows/{namespace}/{name}/terminate": {
+ "put": {
"tags": [
"WorkflowService"
],
- "operationId": "WorkflowService_WorkflowLogs",
+ "operationId": "WorkflowService_TerminateWorkflow",
"parameters": [
{
"type": "string",
@@ -3638,17 +3513,63 @@
"required": true
},
{
- "type": "string",
- "name": "podName",
- "in": "query"
- },
- {
- "type": "string",
- "description": "The container for which to stream logs. Defaults to only container if there is one container in the pod.\n+optional.",
- "name": "logOptions.container",
- "in": "query"
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowTerminateRequest"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "A successful response.",
+ "schema": {
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.Workflow"
+ }
},
- {
+ "default": {
+ "description": "An unexpected error response.",
+ "schema": {
+ "$ref": "#/definitions/grpc.gateway.runtime.Error"
+ }
+ }
+ }
+ }
+ },
+ "/api/v1/workflows/{namespace}/{name}/{podName}/log": {
+ "get": {
+ "tags": [
+ "WorkflowService"
+ ],
+ "summary": "DEPRECATED: Cannot work via HTTP if podName is an empty string. Use WorkflowLogs.",
+ "operationId": "WorkflowService_PodLogs",
+ "parameters": [
+ {
+ "type": "string",
+ "name": "namespace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "name": "name",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "name": "podName",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "The container for which to stream logs. Defaults to only container if there is one container in the pod.\n+optional.",
+ "name": "logOptions.container",
+ "in": "query"
+ },
+ {
"type": "boolean",
"description": "Follow the log stream of the pod. Defaults to false.\n+optional.",
"name": "logOptions.follow",
@@ -3743,12 +3664,13 @@
}
}
},
- "/api/v1/workflows/{namespace}/{name}/resubmit": {
- "put": {
+ "/artifact-files/{namespace}/{idDiscriminator}/{id}/{nodeId}/{artifactDiscriminator}/{artifactName}": {
+ "get": {
"tags": [
- "WorkflowService"
+ "ArtifactService"
],
- "operationId": "WorkflowService_ResubmitWorkflow",
+ "summary": "Get an artifact.",
+ "operationId": "ArtifactService_GetArtifactFile",
"parameters": [
{
"type": "string",
@@ -3757,113 +3679,49 @@
"required": true
},
{
+ "enum": [
+ "workflow",
+ "archived-workflows "
+ ],
"type": "string",
- "name": "name",
+ "name": "idDiscriminator",
"in": "path",
"required": true
},
- {
- "name": "body",
- "in": "body",
- "required": true,
- "schema": {
- "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowResubmitRequest"
- }
- }
- ],
- "responses": {
- "200": {
- "description": "A successful response.",
- "schema": {
- "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.Workflow"
- }
- },
- "default": {
- "description": "An unexpected error response.",
- "schema": {
- "$ref": "#/definitions/grpc.gateway.runtime.Error"
- }
- }
- }
- }
- },
- "/api/v1/workflows/{namespace}/{name}/resume": {
- "put": {
- "tags": [
- "WorkflowService"
- ],
- "operationId": "WorkflowService_ResumeWorkflow",
- "parameters": [
{
"type": "string",
- "name": "namespace",
+ "name": "id",
"in": "path",
"required": true
},
{
"type": "string",
- "name": "name",
+ "name": "nodeId",
"in": "path",
"required": true
},
- {
- "name": "body",
- "in": "body",
- "required": true,
- "schema": {
- "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowResumeRequest"
- }
- }
- ],
- "responses": {
- "200": {
- "description": "A successful response.",
- "schema": {
- "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.Workflow"
- }
- },
- "default": {
- "description": "An unexpected error response.",
- "schema": {
- "$ref": "#/definitions/grpc.gateway.runtime.Error"
- }
- }
- }
- }
- },
- "/api/v1/workflows/{namespace}/{name}/retry": {
- "put": {
- "tags": [
- "WorkflowService"
- ],
- "operationId": "WorkflowService_RetryWorkflow",
- "parameters": [
{
"type": "string",
- "name": "namespace",
+ "name": "artifactName",
"in": "path",
"required": true
},
{
+ "enum": [
+ "outputs"
+ ],
"type": "string",
- "name": "name",
+ "name": "artifactDiscriminator",
"in": "path",
"required": true
- },
- {
- "name": "body",
- "in": "body",
- "required": true,
- "schema": {
- "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowRetryRequest"
- }
}
],
"responses": {
"200": {
- "description": "A successful response.",
+ "description": "An artifact file.",
"schema": {
- "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.Workflow"
+ "type": "string",
+ "format": "binary"
}
},
"default": {
@@ -3875,39 +3733,39 @@
}
}
},
- "/api/v1/workflows/{namespace}/{name}/set": {
- "put": {
+ "/artifacts-by-uid/{uid}/{nodeId}/{artifactName}": {
+ "get": {
"tags": [
- "WorkflowService"
+ "ArtifactService"
],
- "operationId": "WorkflowService_SetWorkflow",
+ "summary": "Get an output artifact by UID.",
+ "operationId": "ArtifactService_GetOutputArtifactByUID",
"parameters": [
{
"type": "string",
- "name": "namespace",
+ "name": "uid",
"in": "path",
"required": true
},
{
"type": "string",
- "name": "name",
+ "name": "nodeId",
"in": "path",
"required": true
},
{
- "name": "body",
- "in": "body",
- "required": true,
- "schema": {
- "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowSetRequest"
- }
+ "type": "string",
+ "name": "artifactName",
+ "in": "path",
+ "required": true
}
],
"responses": {
"200": {
- "description": "A successful response.",
+ "description": "An artifact file.",
"schema": {
- "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.Workflow"
+ "type": "string",
+ "format": "binary"
}
},
"default": {
@@ -3919,12 +3777,13 @@
}
}
},
- "/api/v1/workflows/{namespace}/{name}/stop": {
- "put": {
+ "/artifacts/{namespace}/{name}/{nodeId}/{artifactName}": {
+ "get": {
"tags": [
- "WorkflowService"
+ "ArtifactService"
],
- "operationId": "WorkflowService_StopWorkflow",
+ "summary": "Get an output artifact.",
+ "operationId": "ArtifactService_GetOutputArtifact",
"parameters": [
{
"type": "string",
@@ -3938,64 +3797,25 @@
"in": "path",
"required": true
},
- {
- "name": "body",
- "in": "body",
- "required": true,
- "schema": {
- "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowStopRequest"
- }
- }
- ],
- "responses": {
- "200": {
- "description": "A successful response.",
- "schema": {
- "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.Workflow"
- }
- },
- "default": {
- "description": "An unexpected error response.",
- "schema": {
- "$ref": "#/definitions/grpc.gateway.runtime.Error"
- }
- }
- }
- }
- },
- "/api/v1/workflows/{namespace}/{name}/suspend": {
- "put": {
- "tags": [
- "WorkflowService"
- ],
- "operationId": "WorkflowService_SuspendWorkflow",
- "parameters": [
{
"type": "string",
- "name": "namespace",
+ "name": "nodeId",
"in": "path",
"required": true
},
{
"type": "string",
- "name": "name",
+ "name": "artifactName",
"in": "path",
"required": true
- },
- {
- "name": "body",
- "in": "body",
- "required": true,
- "schema": {
- "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowSuspendRequest"
- }
}
],
"responses": {
"200": {
- "description": "A successful response.",
+ "description": "An artifact file.",
"schema": {
- "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.Workflow"
+ "type": "string",
+ "format": "binary"
}
},
"default": {
@@ -4007,39 +3827,39 @@
}
}
},
- "/api/v1/workflows/{namespace}/{name}/terminate": {
- "put": {
+ "/input-artifacts-by-uid/{uid}/{nodeId}/{artifactName}": {
+ "get": {
"tags": [
- "WorkflowService"
+ "ArtifactService"
],
- "operationId": "WorkflowService_TerminateWorkflow",
+ "summary": "Get an input artifact by UID.",
+ "operationId": "ArtifactService_GetInputArtifactByUID",
"parameters": [
{
"type": "string",
- "name": "namespace",
+ "name": "uid",
"in": "path",
"required": true
},
{
"type": "string",
- "name": "name",
+ "name": "nodeId",
"in": "path",
"required": true
},
{
- "name": "body",
- "in": "body",
- "required": true,
- "schema": {
- "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowTerminateRequest"
- }
+ "type": "string",
+ "name": "artifactName",
+ "in": "path",
+ "required": true
}
],
"responses": {
"200": {
- "description": "A successful response.",
+ "description": "An artifact file.",
"schema": {
- "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.Workflow"
+ "type": "string",
+ "format": "binary"
}
},
"default": {
@@ -4051,13 +3871,13 @@
}
}
},
- "/api/v1/workflows/{namespace}/{name}/{podName}/log": {
+ "/input-artifacts/{namespace}/{name}/{nodeId}/{artifactName}": {
"get": {
"tags": [
- "WorkflowService"
+ "ArtifactService"
],
- "summary": "DEPRECATED: Cannot work via HTTP if podName is an empty string. Use WorkflowLogs.",
- "operationId": "WorkflowService_PodLogs",
+ "summary": "Get an input artifact.",
+ "operationId": "ArtifactService_GetInputArtifact",
"parameters": [
{
"type": "string",
@@ -4073,1517 +3893,101 @@
},
{
"type": "string",
- "name": "podName",
+ "name": "nodeId",
"in": "path",
"required": true
},
{
"type": "string",
- "description": "The container for which to stream logs. Defaults to only container if there is one container in the pod.\n+optional.",
- "name": "logOptions.container",
- "in": "query"
- },
- {
- "type": "boolean",
- "description": "Follow the log stream of the pod. Defaults to false.\n+optional.",
- "name": "logOptions.follow",
- "in": "query"
+ "name": "artifactName",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "An artifact file.",
+ "schema": {
+ "type": "string",
+ "format": "binary"
+ }
},
- {
- "type": "boolean",
- "description": "Return previous terminated container logs. Defaults to false.\n+optional.",
- "name": "logOptions.previous",
- "in": "query"
- },
- {
- "type": "string",
- "format": "int64",
- "description": "A relative time in seconds before the current time from which to show logs. If this value\nprecedes the time a pod was started, only logs since the pod start will be returned.\nIf this value is in the future, no logs will be returned.\nOnly one of sinceSeconds or sinceTime may be specified.\n+optional.",
- "name": "logOptions.sinceSeconds",
- "in": "query"
- },
- {
- "type": "string",
- "format": "int64",
- "description": "Represents seconds of UTC time since Unix epoch\n1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to\n9999-12-31T23:59:59Z inclusive.",
- "name": "logOptions.sinceTime.seconds",
- "in": "query"
- },
- {
- "type": "integer",
- "format": "int32",
- "description": "Non-negative fractions of a second at nanosecond resolution. Negative\nsecond values with fractions must still have non-negative nanos values\nthat count forward in time. Must be from 0 to 999,999,999\ninclusive. This field may be limited in precision depending on context.",
- "name": "logOptions.sinceTime.nanos",
- "in": "query"
- },
- {
- "type": "boolean",
- "description": "If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line\nof log output. Defaults to false.\n+optional.",
- "name": "logOptions.timestamps",
- "in": "query"
- },
- {
- "type": "string",
- "format": "int64",
- "description": "If set, the number of lines from the end of the logs to show. If not specified,\nlogs are shown from the creation of the container or sinceSeconds or sinceTime\n+optional.",
- "name": "logOptions.tailLines",
- "in": "query"
- },
- {
- "type": "string",
- "format": "int64",
- "description": "If set, the number of bytes to read from the server before terminating the\nlog output. This may not display a complete final line of logging, and may return\nslightly more or slightly less than the specified limit.\n+optional.",
- "name": "logOptions.limitBytes",
- "in": "query"
- },
- {
- "type": "boolean",
- "description": "insecureSkipTLSVerifyBackend indicates that the apiserver should not confirm the validity of the\nserving certificate of the backend it is connecting to. This will make the HTTPS connection between the apiserver\nand the backend insecure. This means the apiserver cannot verify the log data it is receiving came from the real\nkubelet. If the kubelet is configured to verify the apiserver's TLS credentials, it does not mean the\nconnection to the real kubelet is vulnerable to a man in the middle attack (e.g. an attacker could not intercept\nthe actual log data coming from the real kubelet).\n+optional.",
- "name": "logOptions.insecureSkipTLSVerifyBackend",
- "in": "query"
- },
- {
- "type": "string",
- "name": "grep",
- "in": "query"
- },
- {
- "type": "string",
- "name": "selector",
- "in": "query"
- }
- ],
- "responses": {
- "200": {
- "description": "A successful response.(streaming responses)",
- "schema": {
- "type": "object",
- "title": "Stream result of io.argoproj.workflow.v1alpha1.LogEntry",
- "properties": {
- "error": {
- "$ref": "#/definitions/grpc.gateway.runtime.StreamError"
- },
- "result": {
- "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.LogEntry"
- }
- }
- }
- },
- "default": {
- "description": "An unexpected error response.",
- "schema": {
- "$ref": "#/definitions/grpc.gateway.runtime.Error"
- }
- }
- }
- }
- },
- "/artifacts-by-uid/{uid}/{podName}/{artifactName}": {
- "get": {
- "tags": [
- "ArtifactService"
- ],
- "summary": "Get an output artifact by UID.",
- "operationId": "ArtifactService_GetOutputArtifactByUID",
- "parameters": [
- {
- "type": "string",
- "name": "uid",
- "in": "path",
- "required": true
- },
- {
- "type": "string",
- "name": "podName",
- "in": "path",
- "required": true
- },
- {
- "type": "string",
- "name": "artifactName",
- "in": "path",
- "required": true
- }
- ],
- "responses": {
- "200": {
- "description": "An artifact file."
- },
- "default": {
- "description": "An unexpected error response.",
- "schema": {
- "$ref": "#/definitions/grpc.gateway.runtime.Error"
- }
- }
- }
- }
- },
- "/artifacts/{namespace}/{name}/{podName}/{artifactName}": {
- "get": {
- "tags": [
- "ArtifactService"
- ],
- "summary": "Get an output artifact.",
- "operationId": "ArtifactService_GetOutputArtifact",
- "parameters": [
- {
- "type": "string",
- "name": "namespace",
- "in": "path",
- "required": true
- },
- {
- "type": "string",
- "name": "name",
- "in": "path",
- "required": true
- },
- {
- "type": "string",
- "name": "podName",
- "in": "path",
- "required": true
- },
- {
- "type": "string",
- "name": "artifactName",
- "in": "path",
- "required": true
- }
- ],
- "responses": {
- "200": {
- "description": "An artifact file."
- },
- "default": {
- "description": "An unexpected error response.",
- "schema": {
- "$ref": "#/definitions/grpc.gateway.runtime.Error"
- }
- }
- }
- }
- },
- "/input-artifacts-by-uid/{uid}/{podName}/{artifactName}": {
- "get": {
- "tags": [
- "ArtifactService"
- ],
- "summary": "Get an input artifact by UID.",
- "operationId": "ArtifactService_GetInputArtifactByUID",
- "parameters": [
- {
- "type": "string",
- "name": "namespace",
- "in": "path",
- "required": true
- },
- {
- "type": "string",
- "name": "uid",
- "in": "path",
- "required": true
- },
- {
- "type": "string",
- "name": "podName",
- "in": "path",
- "required": true
- },
- {
- "type": "string",
- "name": "artifactName",
- "in": "path",
- "required": true
- }
- ],
- "responses": {
- "200": {
- "description": "An artifact file."
- },
- "default": {
- "description": "An unexpected error response.",
- "schema": {
- "$ref": "#/definitions/grpc.gateway.runtime.Error"
- }
- }
- }
- }
- },
- "/input-artifacts/{namespace}/{name}/{podName}/{artifactName}": {
- "get": {
- "tags": [
- "ArtifactService"
- ],
- "summary": "Get an input artifact.",
- "operationId": "ArtifactService_GetInputArtifact",
- "parameters": [
- {
- "type": "string",
- "name": "namespace",
- "in": "path",
- "required": true
- },
- {
- "type": "string",
- "name": "name",
- "in": "path",
- "required": true
- },
- {
- "type": "string",
- "name": "podName",
- "in": "path",
- "required": true
- },
- {
- "type": "string",
- "name": "artifactName",
- "in": "path",
- "required": true
- }
- ],
- "responses": {
- "200": {
- "description": "An artifact file."
- },
- "default": {
- "description": "An unexpected error response.",
- "schema": {
- "$ref": "#/definitions/grpc.gateway.runtime.Error"
- }
- }
- }
- }
- }
- },
- "definitions": {
- "eventsource.CreateEventSourceRequest": {
- "type": "object",
- "properties": {
- "eventSource": {
- "$ref": "#/definitions/io.argoproj.events.v1alpha1.EventSource"
- },
- "namespace": {
- "type": "string"
- }
- }
- },
- "eventsource.EventSourceDeletedResponse": {
- "type": "object"
- },
- "eventsource.EventSourceWatchEvent": {
- "type": "object",
- "properties": {
- "object": {
- "$ref": "#/definitions/io.argoproj.events.v1alpha1.EventSource"
- },
- "type": {
- "type": "string"
- }
- }
- },
- "eventsource.LogEntry": {
- "type": "object",
- "title": "structured log entry",
- "properties": {
- "eventName": {
- "type": "string",
- "title": "optional - the event name (e.g. `example`)"
- },
- "eventSourceName": {
- "type": "string"
- },
- "eventSourceType": {
- "type": "string",
- "title": "optional - the event source type (e.g. `webhook`)"
- },
- "level": {
- "type": "string"
- },
- "msg": {
- "type": "string"
- },
- "namespace": {
- "type": "string"
- },
- "time": {
- "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"
- }
- }
- },
- "eventsource.UpdateEventSourceRequest": {
- "type": "object",
- "properties": {
- "eventSource": {
- "$ref": "#/definitions/io.argoproj.events.v1alpha1.EventSource"
- },
- "name": {
- "type": "string"
- },
- "namespace": {
- "type": "string"
- }
- }
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.AWSCredentials": {
- "type": "object",
- "properties": {
- "accessKeyId": {
- "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector"
- },
- "secretAccessKey": {
- "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector"
- },
- "sessionToken": {
- "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector"
- }
- }
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.AWSEndpoint": {
- "type": "object",
- "properties": {
- "url": {
- "type": "string"
- }
- }
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.AbstractStep": {
- "type": "object",
- "properties": {
- "resources": {
- "title": "+kubebuilder:default={limits: {\"cpu\": \"500m\", \"memory\": \"256Mi\"}, requests: {\"cpu\": \"100m\", \"memory\": \"64Mi\"}}",
- "$ref": "#/definitions/io.k8s.api.core.v1.ResourceRequirements"
- }
- }
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.AbstractVolumeSource": {
- "type": "object",
- "properties": {
- "awsElasticBlockStore": {
- "title": "AWSElasticBlockStore represents an AWS Disk resource that is attached to a\nkubelet's host machine and then exposed to the pod.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore\n+optional",
- "$ref": "#/definitions/io.k8s.api.core.v1.AWSElasticBlockStoreVolumeSource"
- },
- "azureDisk": {
- "title": "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.\n+optional",
- "$ref": "#/definitions/io.k8s.api.core.v1.AzureDiskVolumeSource"
- },
- "azureFile": {
- "title": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.\n+optional",
- "$ref": "#/definitions/io.k8s.api.core.v1.AzureFileVolumeSource"
- },
- "cephfs": {
- "title": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime\n+optional",
- "$ref": "#/definitions/io.k8s.api.core.v1.CephFSVolumeSource"
- },
- "cinder": {
- "title": "Cinder represents a cinder volume attached and mounted on kubelets host machine.\nMore info: https://examples.io.k8s.mysql-cinder-pd/README.md\n+optional",
- "$ref": "#/definitions/io.k8s.api.core.v1.CinderVolumeSource"
- },
- "configMap": {
- "title": "ConfigMap represents a configMap that should populate this volume\n+optional",
- "$ref": "#/definitions/io.k8s.api.core.v1.ConfigMapVolumeSource"
- },
- "csi": {
- "title": "CSI (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature).\n+optional",
- "$ref": "#/definitions/io.k8s.api.core.v1.CSIVolumeSource"
- },
- "downwardAPI": {
- "title": "DownwardAPI represents downward API about the pod that should populate this volume\n+optional",
- "$ref": "#/definitions/io.k8s.api.core.v1.DownwardAPIVolumeSource"
- },
- "emptyDir": {
- "title": "EmptyDir represents a temporary directory that shares a pod's lifetime.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir\n+optional",
- "$ref": "#/definitions/io.k8s.api.core.v1.EmptyDirVolumeSource"
- },
- "ephemeral": {
- "description": "Ephemeral represents a volume that is handled by a cluster storage driver.\nThe volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts,\nand deleted when the pod is removed.\n\nUse this if:\na) the volume is only needed while the pod runs,\nb) features of normal volumes like restoring from snapshot or capacity\n tracking are needed,\nc) the storage driver is specified through a storage class, and\nd) the storage driver supports dynamic volume provisioning through\n a PersistentVolumeClaim (see EphemeralVolumeSource for more\n information on the connection between this volume type\n and PersistentVolumeClaim).\n\nUse PersistentVolumeClaim or one of the vendor-specific\nAPIs for volumes that persist for longer than the lifecycle\nof an individual pod.\n\nUse CSI for light-weight local ephemeral volumes if the CSI driver is meant to\nbe used that way - see the documentation of the driver for\nmore information.\n\nA pod can use both types of ephemeral volumes and\npersistent volumes at the same time.\n\n+optional",
- "$ref": "#/definitions/io.k8s.api.core.v1.EphemeralVolumeSource"
- },
- "fc": {
- "title": "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.\n+optional",
- "$ref": "#/definitions/io.k8s.api.core.v1.FCVolumeSource"
- },
- "flexVolume": {
- "title": "FlexVolume represents a generic volume resource that is\nprovisioned/attached using an exec based plugin.\n+optional",
- "$ref": "#/definitions/io.k8s.api.core.v1.FlexVolumeSource"
- },
- "flocker": {
- "title": "Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running\n+optional",
- "$ref": "#/definitions/io.k8s.api.core.v1.FlockerVolumeSource"
- },
- "gcePersistentDisk": {
- "title": "GCEPersistentDisk represents a GCE Disk resource that is attached to a\nkubelet's host machine and then exposed to the pod.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk\n+optional",
- "$ref": "#/definitions/io.k8s.api.core.v1.GCEPersistentDiskVolumeSource"
- },
- "gitRepo": {
- "title": "GitRepo represents a git repository at a particular revision.\nDEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an\nEmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir\ninto the Pod's container.\n+optional",
- "$ref": "#/definitions/io.k8s.api.core.v1.GitRepoVolumeSource"
- },
- "glusterfs": {
- "title": "Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.\nMore info: https://examples.io.k8s.volumes/glusterfs/README.md\n+optional",
- "$ref": "#/definitions/io.k8s.api.core.v1.GlusterfsVolumeSource"
- },
- "hostPath": {
- "title": "HostPath represents a pre-existing file or directory on the host\nmachine that is directly exposed to the container. This is generally\nused for system agents or other privileged things that are allowed\nto see the host machine. Most containers will NOT need this.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath\n---\nTODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not\nmount host directories as read/write.\n+optional",
- "$ref": "#/definitions/io.k8s.api.core.v1.HostPathVolumeSource"
- },
- "iscsi": {
- "title": "ISCSI represents an ISCSI Disk resource that is attached to a\nkubelet's host machine and then exposed to the pod.\nMore info: https://examples.io.k8s.volumes/iscsi/README.md\n+optional",
- "$ref": "#/definitions/io.k8s.api.core.v1.ISCSIVolumeSource"
- },
- "nfs": {
- "title": "NFS represents an NFS mount on the host that shares a pod's lifetime\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#nfs\n+optional",
- "$ref": "#/definitions/io.k8s.api.core.v1.NFSVolumeSource"
- },
- "persistentVolumeClaim": {
- "title": "PersistentVolumeClaimVolumeSource represents a reference to a\nPersistentVolumeClaim in the same namespace.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims\n+optional",
- "$ref": "#/definitions/io.k8s.api.core.v1.PersistentVolumeClaimVolumeSource"
- },
- "photonPersistentDisk": {
- "title": "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine",
- "$ref": "#/definitions/io.k8s.api.core.v1.PhotonPersistentDiskVolumeSource"
- },
- "portworxVolume": {
- "title": "PortworxVolume represents a portworx volume attached and mounted on kubelets host machine\n+optional",
- "$ref": "#/definitions/io.k8s.api.core.v1.PortworxVolumeSource"
- },
- "projected": {
- "title": "Items for all in one resources secrets, configmaps, and downward API",
- "$ref": "#/definitions/io.k8s.api.core.v1.ProjectedVolumeSource"
- },
- "quobyte": {
- "title": "Quobyte represents a Quobyte mount on the host that shares a pod's lifetime\n+optional",
- "$ref": "#/definitions/io.k8s.api.core.v1.QuobyteVolumeSource"
- },
- "rbd": {
- "title": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime.\nMore info: https://examples.io.k8s.volumes/rbd/README.md\n+optional",
- "$ref": "#/definitions/io.k8s.api.core.v1.RBDVolumeSource"
- },
- "scaleIO": {
- "title": "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.\n+optional",
- "$ref": "#/definitions/io.k8s.api.core.v1.ScaleIOVolumeSource"
- },
- "secret": {
- "title": "Secret represents a secret that should populate this volume.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#secret\n+optional",
- "$ref": "#/definitions/io.k8s.api.core.v1.SecretVolumeSource"
- },
- "storageos": {
- "title": "StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.\n+optional",
- "$ref": "#/definitions/io.k8s.api.core.v1.StorageOSVolumeSource"
- },
- "vsphereVolume": {
- "title": "VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine\n+optional",
- "$ref": "#/definitions/io.k8s.api.core.v1.VsphereVirtualDiskVolumeSource"
- }
- }
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Backoff": {
- "type": "object",
- "properties": {
- "FactorPercentage": {
- "type": "integer",
- "title": "+kubebuilder:default=200"
- },
- "cap": {
- "title": "+kubebuilder:default=\"0ms\"",
- "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Duration"
- },
- "duration": {
- "title": "+kubebuilder:default=\"100ms\"",
- "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Duration"
- },
- "jitterPercentage": {
- "type": "integer",
- "title": "the amount of jitter per step, typically 10-20%, \u003e100% is valid, but strange\n+kubebuilder:default=10"
- },
- "steps": {
- "type": "string",
- "format": "uint64",
- "title": "the number of backoff steps, zero means no retries\n+kubebuilder:default=20"
- }
- }
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Cat": {
- "type": "object",
- "properties": {
- "abstractStep": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.AbstractStep"
- }
- }
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Code": {
- "type": "object",
- "properties": {
- "image": {
- "description": "Image is used in preference to Runtime.",
- "type": "string"
- },
- "runtime": {
- "type": "string"
- },
- "source": {
- "type": "string"
- }
- }
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Container": {
- "type": "object",
- "properties": {
- "args": {
- "type": "array",
- "items": {
- "type": "string"
- }
- },
- "command": {
- "type": "array",
- "items": {
- "type": "string"
- }
- },
- "env": {
- "type": "array",
- "items": {
- "$ref": "#/definitions/io.k8s.api.core.v1.EnvVar"
- }
- },
- "image": {
- "type": "string"
- },
- "in": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Interface"
- },
- "resources": {
- "$ref": "#/definitions/io.k8s.api.core.v1.ResourceRequirements"
- },
- "volumeMounts": {
- "type": "array",
- "items": {
- "$ref": "#/definitions/io.k8s.api.core.v1.VolumeMount"
- }
- }
- }
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Cron": {
- "type": "object",
- "properties": {
- "layout": {
- "type": "string",
- "title": "+kubebuilder:default=\"2006-01-02T15:04:05Z07:00\""
- },
- "schedule": {
- "type": "string"
- }
- }
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.DBDataSource": {
- "type": "object",
- "properties": {
- "value": {
- "type": "string"
- },
- "valueFrom": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.DBDataSourceFrom"
- }
- }
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.DBDataSourceFrom": {
- "type": "object",
- "properties": {
- "secretKeyRef": {
- "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector"
- }
- }
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.DBSink": {
- "type": "object",
- "properties": {
- "actions": {
- "type": "array",
- "items": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.SQLAction"
- }
- },
- "database": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Database"
- }
- }
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.DBSource": {
- "type": "object",
- "properties": {
- "commitInterval": {
- "title": "+kubebuilder:default=\"5s\"",
- "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Duration"
- },
- "database": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Database"
- },
- "initSchema": {
- "type": "boolean",
- "title": "+kubebuilder:default=true"
- },
- "offsetColumn": {
- "type": "string"
- },
- "pollInterval": {
- "title": "+kubebuilder:default=\"1s\"",
- "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Duration"
- },
- "query": {
- "type": "string"
- }
- }
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Database": {
- "type": "object",
- "properties": {
- "dataSource": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.DBDataSource"
- },
- "driver": {
- "type": "string",
- "title": "+kubebuilder:default=default"
- }
- }
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Dedupe": {
- "type": "object",
- "properties": {
- "abstractStep": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.AbstractStep"
- },
- "maxSize": {
- "title": "MaxSize is the maximum number of entries to keep in the in-memory database used to store recent UIDs.\nLarger number mean bigger windows of time for dedupe, but greater memory usage.\n+kubebuilder:default=\"1M\"",
- "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity"
- },
- "uid": {
- "type": "string",
- "title": "+kubebuilder:default=\"sha1(msg)\""
- }
- }
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Expand": {
- "type": "object",
- "properties": {
- "abstractStep": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.AbstractStep"
- }
- }
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Filter": {
- "type": "object",
- "properties": {
- "abstractStep": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.AbstractStep"
- },
- "expression": {
- "type": "string"
- }
- }
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Flatten": {
- "type": "object",
- "properties": {
- "abstractStep": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.AbstractStep"
- }
- }
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Git": {
- "type": "object",
- "properties": {
- "branch": {
- "type": "string",
- "title": "+kubebuilder:default=main"
- },
- "command": {
- "type": "array",
- "items": {
- "type": "string"
- }
- },
- "env": {
- "type": "array",
- "items": {
- "$ref": "#/definitions/io.k8s.api.core.v1.EnvVar"
- }
- },
- "image": {
- "type": "string"
- },
- "insecureIgnoreHostKey": {
- "type": "boolean",
- "title": "InsecureIgnoreHostKey is the bool value for ignoring check for host key"
- },
- "passwordSecret": {
- "title": "PasswordSecret is the secret selector to the repository password",
- "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector"
- },
- "path": {
- "description": "+kubebuilder:default=.",
- "type": "string"
- },
- "sshPrivateKeySecret": {
- "title": "SSHPrivateKeySecret is the secret selector to the repository ssh private key",
- "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector"
- },
- "url": {
- "type": "string"
- },
- "usernameSecret": {
- "title": "UsernameSecret is the secret selector to the repository username",
- "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector"
- }
- }
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Group": {
- "type": "object",
- "properties": {
- "endOfGroup": {
- "type": "string"
- },
- "format": {
- "type": "string"
- },
- "key": {
- "type": "string"
- },
- "storage": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Storage"
- }
- }
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.HTTP": {
- "type": "object"
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.HTTPHeader": {
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
- },
- "value": {
- "type": "string"
- },
- "valueFrom": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.HTTPHeaderSource"
- }
- }
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.HTTPHeaderSource": {
- "type": "object",
- "properties": {
- "secretKeyRef": {
- "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector"
- }
- }
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.HTTPSink": {
- "type": "object",
- "properties": {
- "headers": {
- "type": "array",
- "items": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.HTTPHeader"
- }
- },
- "insecureSkipVerify": {
- "type": "boolean"
- },
- "url": {
- "type": "string"
- }
- }
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.HTTPSource": {
- "type": "object",
- "properties": {
- "serviceName": {
- "type": "string"
- }
- }
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Interface": {
- "type": "object",
- "properties": {
- "fifo": {
- "type": "boolean"
- },
- "http": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.HTTP"
- }
- }
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.JetStream": {
- "type": "object",
- "properties": {
- "auth": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.NATSAuth"
- },
- "name": {
- "type": "string",
- "title": "+kubebuilder:default=default"
- },
- "natsUrl": {
- "type": "string"
- },
- "subject": {
- "type": "string"
- }
- }
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.JetStreamSink": {
- "type": "object",
- "properties": {
- "jetstream": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.JetStream"
- }
- }
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.JetStreamSource": {
- "type": "object",
- "properties": {
- "jetstream": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.JetStream"
- }
- }
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Kafka": {
- "type": "object",
- "properties": {
- "kafkaConfig": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.KafkaConfig"
- },
- "name": {
- "type": "string",
- "title": "+kubebuilder:default=default"
- },
- "topic": {
- "type": "string"
- }
- }
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.KafkaConfig": {
- "type": "object",
- "properties": {
- "brokers": {
- "type": "array",
- "items": {
- "type": "string"
- }
- },
- "maxMessageBytes": {
- "type": "integer"
- },
- "net": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.KafkaNET"
- }
- }
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.KafkaNET": {
- "type": "object",
- "properties": {
- "sasl": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.SASL"
- },
- "tls": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.TLS"
- }
- }
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.KafkaSink": {
- "type": "object",
- "properties": {
- "acks": {
- "title": "+kubebuilder:default=\"all\"",
- "$ref": "#/definitions/io.k8s.apimachinery.pkg.util.intstr.IntOrString"
- },
- "async": {
- "type": "boolean"
- },
- "batchSize": {
- "title": "+kubebuilder:default=\"100Ki\"",
- "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity"
- },
- "compressionType": {
- "type": "string",
- "title": "+kubebuilder:default=\"lz4\""
- },
- "enableIdempotence": {
- "type": "boolean",
- "title": "+kubebuilder:default=true"
- },
- "kafka": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Kafka"
- },
- "linger": {
- "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Duration"
- },
- "maxInflight": {
- "type": "integer",
- "title": "The maximum number of messages to be in-flight when async.\n+kubebuilder:default=20"
- },
- "messageTimeout": {
- "title": "+kubebuilder:default=\"30s\"",
- "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Duration"
- }
- }
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.KafkaSource": {
- "type": "object",
- "properties": {
- "fetchMin": {
- "title": "+kubebuilder:default=\"100Ki\"",
- "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity"
- },
- "fetchWaitMax": {
- "title": "+kubebuilder:default=\"500ms\"",
- "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Duration"
- },
- "groupId": {
- "description": "GroupID is the consumer group ID. If not specified, a unique deterministic group ID is generated.",
- "type": "string"
- },
- "kafka": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Kafka"
- },
- "startOffset": {
- "type": "string",
- "title": "+kubebuilder:default=Last"
- }
- }
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Log": {
- "type": "object",
- "properties": {
- "truncate": {
- "type": "string",
- "format": "uint64"
- }
- }
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Map": {
- "type": "object",
- "properties": {
- "abstractStep": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.AbstractStep"
- },
- "expression": {
- "type": "string"
- }
- }
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Metadata": {
- "type": "object",
- "properties": {
- "annotations": {
- "type": "object",
- "additionalProperties": {
- "type": "string"
- }
- },
- "labels": {
- "type": "object",
- "additionalProperties": {
- "type": "string"
- }
- }
- }
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.NATSAuth": {
- "type": "object",
- "properties": {
- "token": {
- "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector"
- }
- }
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Pipeline": {
- "type": "object",
- "title": "+kubebuilder:object:root=true\n+kubebuilder:resource:shortName=pl\n+kubebuilder:subresource:status\n+kubebuilder:printcolumn:name=\"Phase\",type=string,JSONPath=`.status.phase`\n+kubebuilder:printcolumn:name=\"Message\",type=string,JSONPath=`.status.message`",
- "properties": {
- "metadata": {
- "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"
- },
- "spec": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.PipelineSpec"
- },
- "status": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.PipelineStatus"
- }
- }
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.PipelineList": {
- "type": "object",
- "properties": {
- "items": {
- "type": "array",
- "items": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Pipeline"
- }
- },
- "metadata": {
- "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"
- }
- }
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.PipelineSpec": {
- "type": "object",
- "properties": {
- "deletionDelay": {
- "title": "+kubebuilder:default=\"72h\"",
- "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Duration"
- },
- "steps": {
- "type": "array",
- "title": "+patchStrategy=merge\n+patchMergeKey=name",
- "items": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.StepSpec"
- }
- }
- }
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.PipelineStatus": {
- "type": "object",
- "properties": {
- "conditions": {
- "type": "array",
- "items": {
- "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Condition"
- }
- },
- "lastUpdated": {
- "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"
- },
- "message": {
- "type": "string"
- },
- "phase": {
- "type": "string"
- }
- }
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.S3": {
- "type": "object",
- "properties": {
- "bucket": {
- "type": "string"
- },
- "credentials": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.AWSCredentials"
- },
- "endpoint": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.AWSEndpoint"
- },
- "name": {
- "type": "string",
- "title": "+kubebuilder:default=default"
- },
- "region": {
- "type": "string"
- }
- }
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.S3Sink": {
- "type": "object",
- "properties": {
- "s3": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.S3"
- }
- }
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.S3Source": {
- "type": "object",
- "properties": {
- "concurrency": {
- "type": "integer",
- "title": "+kubebuilder:default=1"
- },
- "pollPeriod": {
- "title": "+kubebuilder:default=\"1m\"",
- "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Duration"
- },
- "s3": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.S3"
- }
- }
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.SASL": {
- "type": "object",
- "properties": {
- "mechanism": {
- "type": "string",
- "title": "SASLMechanism is the name of the enabled SASL mechanism.\nPossible values: OAUTHBEARER, PLAIN (defaults to PLAIN).\n+optional"
- },
- "password": {
- "title": "Password for SASL/PLAIN authentication",
- "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector"
- },
- "user": {
- "title": "User is the authentication identity (authcid) to present for\nSASL/PLAIN or SASL/SCRAM authentication",
- "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector"
- }
- }
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.SQLAction": {
- "type": "object",
- "properties": {
- "onError": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.SQLStatement"
- },
- "onRecordNotFound": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.SQLStatement"
- },
- "statement": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.SQLStatement"
- }
- }
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.SQLStatement": {
- "type": "object",
- "properties": {
- "args": {
- "type": "array",
- "items": {
- "type": "string"
- }
- },
- "sql": {
- "type": "string"
- }
- }
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.STAN": {
- "type": "object",
- "properties": {
- "auth": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.NATSAuth"
- },
- "clusterId": {
- "type": "string"
- },
- "maxInflight": {
- "type": "integer",
- "title": "Max inflight messages when subscribing to the stan server, which means how many messages\nbetween commits, therefore potential duplicates during disruption\n+kubebuilder:default=20"
- },
- "name": {
- "type": "string",
- "title": "+kubebuilder:default=default"
- },
- "natsMonitoringUrl": {
- "type": "string"
- },
- "natsUrl": {
- "type": "string"
- },
- "subject": {
- "type": "string"
- },
- "subjectPrefix": {
- "type": "string"
- }
- }
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Scale": {
- "type": "object",
- "properties": {
- "desiredReplicas": {
- "description": "An expression to determine the number of replicas. Must evaluation to an `int`.",
- "type": "string"
- },
- "peekDelay": {
- "type": "string",
- "title": "An expression to determine the delay for peeking. Maybe string or duration, e.g. `\"4m\"`\n+kubebuilder:default=\"defaultPeekDelay\""
- },
- "scalingDelay": {
- "type": "string",
- "title": "An expression to determine the delay for scaling. Maybe string or duration, e.g. `\"1m\"`\n+kubebuilder:default=\"defaultScalingDelay\""
- }
- }
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Sidecar": {
- "type": "object",
- "properties": {
- "resources": {
- "title": "+kubebuilder:default={limits: {\"cpu\": \"500m\", \"memory\": \"256Mi\"}, requests: {\"cpu\": \"100m\", \"memory\": \"64Mi\"}}",
- "$ref": "#/definitions/io.k8s.api.core.v1.ResourceRequirements"
- }
- }
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Sink": {
- "type": "object",
- "properties": {
- "db": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.DBSink"
- },
- "deadLetterQueue": {
- "type": "boolean"
- },
- "http": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.HTTPSink"
- },
- "jetstream": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.JetStreamSink"
- },
- "kafka": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.KafkaSink"
- },
- "log": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Log"
- },
- "name": {
- "type": "string",
- "title": "+kubebuilder:default=default"
- },
- "s3": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.S3Sink"
- },
- "stan": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.STAN"
- },
- "volume": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.VolumeSink"
+ "default": {
+ "description": "An unexpected error response.",
+ "schema": {
+ "$ref": "#/definitions/grpc.gateway.runtime.Error"
+ }
+ }
}
}
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Source": {
+ }
+ },
+ "definitions": {
+ "eventsource.CreateEventSourceRequest": {
"type": "object",
"properties": {
- "cron": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Cron"
- },
- "db": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.DBSource"
- },
- "http": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.HTTPSource"
- },
- "jetstream": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.JetStreamSource"
- },
- "kafka": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.KafkaSource"
- },
- "name": {
- "type": "string",
- "title": "+kubebuilder:default=default"
- },
- "retry": {
- "title": "+kubebuilder:default={duration: \"100ms\", steps: 20, factorPercentage: 200, jitterPercentage: 10}",
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Backoff"
- },
- "s3": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.S3Source"
- },
- "stan": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.STAN"
+ "eventSource": {
+ "$ref": "#/definitions/io.argoproj.events.v1alpha1.EventSource"
},
- "volume": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.VolumeSource"
+ "namespace": {
+ "type": "string"
}
}
},
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Step": {
+ "eventsource.EventSourceDeletedResponse": {
+ "type": "object"
+ },
+ "eventsource.EventSourceWatchEvent": {
"type": "object",
- "title": "+kubebuilder:object:root=true\n+kubebuilder:subresource:status\n+kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas,selectorpath=.status.selector\n+kubebuilder:printcolumn:name=\"Phase\",type=string,JSONPath=`.status.phase`\n+kubebuilder:printcolumn:name=\"Reason\",type=string,JSONPath=`.status.reason`\n+kubebuilder:printcolumn:name=\"Message\",type=string,JSONPath=`.status.message`\n+kubebuilder:printcolumn:name=\"Desired\",type=string,JSONPath=`.spec.replicas`\n+kubebuilder:printcolumn:name=\"Current\",type=string,JSONPath=`.status.replicas`",
"properties": {
- "metadata": {
- "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"
- },
- "spec": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.StepSpec"
+ "object": {
+ "$ref": "#/definitions/io.argoproj.events.v1alpha1.EventSource"
},
- "status": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.StepStatus"
+ "type": {
+ "type": "string"
}
}
},
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.StepSpec": {
+ "eventsource.LogEntry": {
"type": "object",
+ "title": "structured log entry",
"properties": {
- "affinity": {
- "$ref": "#/definitions/io.k8s.api.core.v1.Affinity"
- },
- "cat": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Cat"
- },
- "code": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Code"
- },
- "container": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Container"
- },
- "dedupe": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Dedupe"
- },
- "expand": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Expand"
- },
- "filter": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Filter"
- },
- "flatten": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Flatten"
- },
- "git": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Git"
- },
- "group": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Group"
- },
- "imagePullSecrets": {
- "type": "array",
- "title": "ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images\nin pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets\ncan be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet.\nMore info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod\n+patchStrategy=merge\n+patchMergeKey=name",
- "items": {
- "$ref": "#/definitions/io.k8s.api.core.v1.LocalObjectReference"
- }
- },
- "map": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Map"
- },
- "metadata": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Metadata"
- },
- "name": {
- "type": "string",
- "title": "+kubebuilder:default=default"
- },
- "nodeSelector": {
- "type": "object",
- "additionalProperties": {
- "type": "string"
- }
- },
- "replicas": {
- "type": "integer",
- "title": "+kubebuilder:default=1"
- },
- "restartPolicy": {
+ "eventName": {
"type": "string",
- "title": "+kubebuilder:default=OnFailure"
+ "title": "optional - the event name (e.g. `example`)"
},
- "scale": {
- "title": "+kubebuilder:default={peekDelay: \"defaultPeekDelay\", scalingDelay: \"defaultScalingDelay\", desiredReplicas: \"\"}",
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Scale"
+ "eventSourceName": {
+ "type": "string"
},
- "serviceAccountName": {
+ "eventSourceType": {
"type": "string",
- "title": "+kubebuilder:default=pipeline"
- },
- "sidecar": {
- "title": "+kubebuilder:default={resources: {limits: {\"cpu\": \"500m\", \"memory\": \"256Mi\"}, requests: {\"cpu\": \"100m\", \"memory\": \"64Mi\"}}}",
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Sidecar"
- },
- "sinks": {
- "type": "array",
- "title": "+patchStrategy=merge\n+patchMergeKey=name",
- "items": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Sink"
- }
- },
- "sources": {
- "type": "array",
- "title": "+patchStrategy=merge\n+patchMergeKey=name",
- "items": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Source"
- }
- },
- "terminator": {
- "type": "boolean"
- },
- "tolerations": {
- "type": "array",
- "items": {
- "$ref": "#/definitions/io.k8s.api.core.v1.Toleration"
- }
- },
- "volumes": {
- "type": "array",
- "title": "+patchStrategy=merge\n+patchMergeKey=name",
- "items": {
- "$ref": "#/definitions/io.k8s.api.core.v1.Volume"
- }
- }
- }
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.StepStatus": {
- "type": "object",
- "properties": {
- "lastScaledAt": {
- "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"
+ "title": "optional - the event source type (e.g. `webhook`)"
},
- "message": {
+ "level": {
"type": "string"
},
- "phase": {
+ "msg": {
"type": "string"
},
- "reason": {
+ "namespace": {
"type": "string"
},
- "replicas": {
- "type": "integer"
- },
- "selector": {
- "type": "string"
+ "time": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"
}
}
},
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Storage": {
+ "eventsource.UpdateEventSourceRequest": {
"type": "object",
"properties": {
+ "eventSource": {
+ "$ref": "#/definitions/io.argoproj.events.v1alpha1.EventSource"
+ },
"name": {
"type": "string"
},
- "subPath": {
- "type": "string",
- "title": "volume name"
- }
- }
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.TLS": {
- "type": "object",
- "properties": {
- "caCertSecret": {
- "title": "CACertSecret refers to the secret that contains the CA cert",
- "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector"
- },
- "certSecret": {
- "title": "CertSecret refers to the secret that contains the cert",
- "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector"
- },
- "keySecret": {
- "title": "KeySecret refers to the secret that contains the key",
- "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector"
- }
- }
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.VolumeSink": {
- "type": "object",
- "properties": {
- "abstractVolumeSource": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.AbstractVolumeSource"
- }
- }
- },
- "github.com.argoproj_labs.argo_dataflow.api.v1alpha1.VolumeSource": {
- "type": "object",
- "properties": {
- "abstractVolumeSource": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.AbstractVolumeSource"
- },
- "concurrency": {
- "type": "integer",
- "title": "+kubebuilder:default=1"
- },
- "pollPeriod": {
- "title": "+kubebuilder:default=\"1m\"",
- "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Duration"
- },
- "readOnly": {
- "type": "boolean"
+ "namespace": {
+ "type": "string"
}
}
},
@@ -8841,6 +7245,30 @@
}
}
},
+ "io.argoproj.workflow.v1alpha1.ArtGCStatus": {
+ "description": "ArtGCStatus maintains state related to ArtifactGC",
+ "type": "object",
+ "properties": {
+ "notSpecified": {
+ "description": "if this is true, we already checked to see if we need to do it and we don't",
+ "type": "boolean"
+ },
+ "podsRecouped": {
+ "description": "have completed Pods been processed? (mapped by Pod name) used to prevent re-processing the Status of a Pod more than once",
+ "type": "object",
+ "additionalProperties": {
+ "type": "boolean"
+ }
+ },
+ "strategiesProcessed": {
+ "description": "have Pods been started to perform this strategy? (enables us not to re-process what we've already done)",
+ "type": "object",
+ "additionalProperties": {
+ "type": "boolean"
+ }
+ }
+ }
+ },
"io.argoproj.workflow.v1alpha1.Artifact": {
"description": "Artifact indicates an artifact to place at a specified path",
"type": "object",
@@ -8856,10 +7284,22 @@
"description": "ArchiveLogs indicates if the container logs should be archived",
"type": "boolean"
},
+ "artifactGC": {
+ "description": "ArtifactGC describes the strategy to use when to deleting an artifact from completed or deleted workflows",
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.ArtifactGC"
+ },
"artifactory": {
"description": "Artifactory contains artifactory artifact location details",
"$ref": "#/definitions/io.argoproj.workflow.v1alpha1.ArtifactoryArtifact"
},
+ "azure": {
+ "description": "Azure contains Azure Storage artifact location details",
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.AzureArtifact"
+ },
+ "deleted": {
+ "description": "Has this been deleted?",
+ "type": "boolean"
+ },
"from": {
"description": "From allows an artifact to reference an artifact from a previous step",
"type": "string"
@@ -8926,6 +7366,50 @@
}
}
},
+ "io.argoproj.workflow.v1alpha1.ArtifactGC": {
+ "description": "ArtifactGC describes how to delete artifacts from completed Workflows",
+ "type": "object",
+ "properties": {
+ "podMetadata": {
+ "description": "PodMetadata is an optional field for specifying the Labels and Annotations that should be assigned to the Pod doing the deletion",
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.Metadata"
+ },
+ "serviceAccountName": {
+ "description": "ServiceAccountName is an optional field for specifying the Service Account that should be assigned to the Pod doing the deletion",
+ "type": "string"
+ },
+ "strategy": {
+ "description": "Strategy is the strategy to use.",
+ "type": "string"
+ }
+ }
+ },
+ "io.argoproj.workflow.v1alpha1.ArtifactGCSpec": {
+ "description": "ArtifactGCSpec specifies the Artifacts that need to be deleted",
+ "type": "object",
+ "properties": {
+ "artifactsByNode": {
+ "description": "ArtifactsByNode maps Node name to information pertaining to Artifacts on that Node",
+ "type": "object",
+ "additionalProperties": {
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.ArtifactNodeSpec"
+ }
+ }
+ }
+ },
+ "io.argoproj.workflow.v1alpha1.ArtifactGCStatus": {
+ "description": "ArtifactGCStatus describes the result of the deletion",
+ "type": "object",
+ "properties": {
+ "artifactResultsByNode": {
+ "description": "ArtifactResultsByNode maps Node name to result",
+ "type": "object",
+ "additionalProperties": {
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.ArtifactResultNodeStatus"
+ }
+ }
+ }
+ },
"io.argoproj.workflow.v1alpha1.ArtifactLocation": {
"description": "ArtifactLocation describes a location for a single or multiple artifacts. It is used as single artifact in the context of inputs/outputs (e.g. outputs.artifacts.artname). It is also used to describe the location of multiple artifacts such as the archive location of a single workflow step, which the executor will use as a default location to store its files.",
"type": "object",
@@ -8938,6 +7422,10 @@
"description": "Artifactory contains artifactory artifact location details",
"$ref": "#/definitions/io.argoproj.workflow.v1alpha1.ArtifactoryArtifact"
},
+ "azure": {
+ "description": "Azure contains Azure Storage artifact location details",
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.AzureArtifact"
+ },
"gcs": {
"description": "GCS contains GCS artifact location details",
"$ref": "#/definitions/io.argoproj.workflow.v1alpha1.GCSArtifact"
@@ -8968,6 +7456,23 @@
}
}
},
+ "io.argoproj.workflow.v1alpha1.ArtifactNodeSpec": {
+ "description": "ArtifactNodeSpec specifies the Artifacts that need to be deleted for a given Node",
+ "type": "object",
+ "properties": {
+ "archiveLocation": {
+ "description": "ArchiveLocation is the template-level Artifact location specification",
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.ArtifactLocation"
+ },
+ "artifacts": {
+ "description": "Artifacts maps artifact name to Artifact description",
+ "type": "object",
+ "additionalProperties": {
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.Artifact"
+ }
+ }
+ }
+ },
"io.argoproj.workflow.v1alpha1.ArtifactPaths": {
"description": "ArtifactPaths expands a step from a collection of artifacts",
"type": "object",
@@ -8983,10 +7488,22 @@
"description": "ArchiveLogs indicates if the container logs should be archived",
"type": "boolean"
},
+ "artifactGC": {
+ "description": "ArtifactGC describes the strategy to use when to deleting an artifact from completed or deleted workflows",
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.ArtifactGC"
+ },
"artifactory": {
"description": "Artifactory contains artifactory artifact location details",
"$ref": "#/definitions/io.argoproj.workflow.v1alpha1.ArtifactoryArtifact"
},
+ "azure": {
+ "description": "Azure contains Azure Storage artifact location details",
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.AzureArtifact"
+ },
+ "deleted": {
+ "description": "Has this been deleted?",
+ "type": "boolean"
+ },
"from": {
"description": "From allows an artifact to reference an artifact from a previous step",
"type": "string"
@@ -9065,6 +7582,10 @@
"description": "Artifactory stores artifacts to JFrog Artifactory",
"$ref": "#/definitions/io.argoproj.workflow.v1alpha1.ArtifactoryArtifactRepository"
},
+ "azure": {
+ "description": "Azure stores artifact in an Azure Storage account",
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.AzureArtifactRepository"
+ },
"gcs": {
"description": "GCS stores artifact in a GCS object store",
"$ref": "#/definitions/io.argoproj.workflow.v1alpha1.GCSArtifactRepository"
@@ -9121,6 +7642,40 @@
}
}
},
+ "io.argoproj.workflow.v1alpha1.ArtifactResult": {
+ "description": "ArtifactResult describes the result of attempting to delete a given Artifact",
+ "type": "object",
+ "required": [
+ "name"
+ ],
+ "properties": {
+ "error": {
+ "description": "Error is an optional error message which should be set if Success==false",
+ "type": "string"
+ },
+ "name": {
+ "description": "Name is the name of the Artifact",
+ "type": "string"
+ },
+ "success": {
+ "description": "Success describes whether the deletion succeeded",
+ "type": "boolean"
+ }
+ }
+ },
+ "io.argoproj.workflow.v1alpha1.ArtifactResultNodeStatus": {
+ "description": "ArtifactResultNodeStatus describes the result of the deletion on a given node",
+ "type": "object",
+ "properties": {
+ "artifactResults": {
+ "description": "ArtifactResults maps Artifact name to result of the deletion",
+ "type": "object",
+ "additionalProperties": {
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.ArtifactResult"
+ }
+ }
+ }
+ },
"io.argoproj.workflow.v1alpha1.ArtifactoryArtifact": {
"description": "ArtifactoryArtifact is the location of an artifactory artifact",
"type": "object",
@@ -9160,6 +7715,67 @@
}
}
},
+ "io.argoproj.workflow.v1alpha1.AzureArtifact": {
+ "description": "AzureArtifact is the location of a an Azure Storage artifact",
+ "type": "object",
+ "required": [
+ "endpoint",
+ "container",
+ "blob"
+ ],
+ "properties": {
+ "accountKeySecret": {
+ "description": "AccountKeySecret is the secret selector to the Azure Blob Storage account access key",
+ "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector"
+ },
+ "blob": {
+ "description": "Blob is the blob name (i.e., path) in the container where the artifact resides",
+ "type": "string"
+ },
+ "container": {
+ "description": "Container is the container where resources will be stored",
+ "type": "string"
+ },
+ "endpoint": {
+ "description": "Endpoint is the service url associated with an account. It is most likely \"https://\u003cACCOUNT_NAME\u003e.blob.core.windows.net\"",
+ "type": "string"
+ },
+ "useSDKCreds": {
+ "description": "UseSDKCreds tells the driver to figure out credentials based on sdk defaults.",
+ "type": "boolean"
+ }
+ }
+ },
+ "io.argoproj.workflow.v1alpha1.AzureArtifactRepository": {
+ "description": "AzureArtifactRepository defines the controller configuration for an Azure Blob Storage artifact repository",
+ "type": "object",
+ "required": [
+ "endpoint",
+ "container"
+ ],
+ "properties": {
+ "accountKeySecret": {
+ "description": "AccountKeySecret is the secret selector to the Azure Blob Storage account access key",
+ "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector"
+ },
+ "blobNameFormat": {
+ "description": "BlobNameFormat is defines the format of how to store blob names. Can reference workflow variables",
+ "type": "string"
+ },
+ "container": {
+ "description": "Container is the container where resources will be stored",
+ "type": "string"
+ },
+ "endpoint": {
+ "description": "Endpoint is the service url associated with an account. It is most likely \"https://\u003cACCOUNT_NAME\u003e.blob.core.windows.net\"",
+ "type": "string"
+ },
+ "useSDKCreds": {
+ "description": "UseSDKCreds tells the driver to figure out credentials based on sdk defaults.",
+ "type": "boolean"
+ }
+ }
+ },
"io.argoproj.workflow.v1alpha1.Backoff": {
"description": "Backoff is a backoff strategy to use within retryStrategy",
"type": "object",
@@ -9178,6 +7794,20 @@
}
}
},
+ "io.argoproj.workflow.v1alpha1.BasicAuth": {
+ "description": "BasicAuth describes the secret selectors required for basic authentication",
+ "type": "object",
+ "properties": {
+ "passwordSecret": {
+ "description": "PasswordSecret is the secret selector to the repository password",
+ "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector"
+ },
+ "usernameSecret": {
+ "description": "UsernameSecret is the secret selector to the repository username",
+ "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector"
+ }
+ }
+ },
"io.argoproj.workflow.v1alpha1.Cache": {
"description": "Cache is the configuration for the type of cache to be used",
"type": "object",
@@ -9191,6 +7821,18 @@
}
}
},
+ "io.argoproj.workflow.v1alpha1.ClientCertAuth": {
+ "description": "ClientCertAuth holds necessary information for client authentication via certificates",
+ "type": "object",
+ "properties": {
+ "clientCertSecret": {
+ "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector"
+ },
+ "clientKeySecret": {
+ "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector"
+ }
+ }
+ },
"io.argoproj.workflow.v1alpha1.ClusterWorkflowTemplate": {
"description": "ClusterWorkflowTemplate is the definition of a workflow template resource in cluster scope",
"type": "object",
@@ -9279,6 +7921,17 @@
}
}
},
+ "io.argoproj.workflow.v1alpha1.CollectEventRequest": {
+ "type": "object",
+ "properties": {
+ "name": {
+ "type": "string"
+ }
+ }
+ },
+ "io.argoproj.workflow.v1alpha1.CollectEventResponse": {
+ "type": "object"
+ },
"io.argoproj.workflow.v1alpha1.Condition": {
"type": "object",
"properties": {
@@ -9303,14 +7956,14 @@
],
"properties": {
"args": {
- "description": "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
+ "description": "Arguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
"type": "array",
"items": {
"type": "string"
}
},
"command": {
- "description": "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
+ "description": "Entrypoint array. Not executed within a shell. The container image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
"type": "array",
"items": {
"type": "string"
@@ -9339,7 +7992,7 @@
}
},
"image": {
- "description": "Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.",
+ "description": "Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.",
"type": "string"
},
"imagePullPolicy": {
@@ -9909,6 +8562,9 @@
"serviceAccountName": {
"type": "string"
},
+ "serviceAccountNamespace": {
+ "type": "string"
+ },
"subject": {
"type": "string"
}
@@ -9921,6 +8577,10 @@
"repo"
],
"properties": {
+ "branch": {
+ "description": "Branch is the branch to fetch when `SingleBranch` is enabled",
+ "type": "string"
+ },
"depth": {
"description": "Depth specifies clones/fetches should be shallow and include the given number of commits from the branch tip",
"type": "integer"
@@ -9952,6 +8612,10 @@
"description": "Revision is the git commit, tag, branch to checkout",
"type": "string"
},
+ "singleBranch": {
+ "description": "SingleBranch enables single branch clone, using the `branch` parameter",
+ "type": "boolean"
+ },
"sshPrivateKeySecret": {
"description": "SSHPrivateKeySecret is the secret selector to the repository ssh private key",
"$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector"
@@ -10073,6 +8737,10 @@
"description": "Body is content of the HTTP Request",
"type": "string"
},
+ "bodyFrom": {
+ "description": "BodyFrom is content of the HTTP Request as Bytes",
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.HTTPBodySource"
+ },
"headers": {
"description": "Headers are an optional list of headers to send with HTTP requests",
"type": "array",
@@ -10081,7 +8749,7 @@
}
},
"insecureSkipVerify": {
- "description": "insecureSkipVerify is a bool when if set to true will skip TLS verification for the HTTP client",
+ "description": "InsecureSkipVerify is a bool when if set to true will skip TLS verification for the HTTP client",
"type": "boolean"
},
"method": {
@@ -10103,12 +8771,16 @@
}
},
"io.argoproj.workflow.v1alpha1.HTTPArtifact": {
- "description": "HTTPArtifact allows an file served on HTTP to be placed as an input artifact in a container",
+ "description": "HTTPArtifact allows a file served on HTTP to be placed as an input artifact in a container",
"type": "object",
"required": [
"url"
],
"properties": {
+ "auth": {
+ "description": "Auth contains information for client authentication",
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.HTTPAuth"
+ },
"headers": {
"description": "Headers are an optional list of headers to send with HTTP requests for artifacts",
"type": "array",
@@ -10122,6 +8794,30 @@
}
}
},
+ "io.argoproj.workflow.v1alpha1.HTTPAuth": {
+ "type": "object",
+ "properties": {
+ "basicAuth": {
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.BasicAuth"
+ },
+ "clientCert": {
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.ClientCertAuth"
+ },
+ "oauth2": {
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.OAuth2Auth"
+ }
+ }
+ },
+ "io.argoproj.workflow.v1alpha1.HTTPBodySource": {
+ "description": "HTTPBodySource contains the source of the HTTP body.",
+ "type": "object",
+ "properties": {
+ "bytes": {
+ "type": "string",
+ "format": "byte"
+ }
+ }
+ },
"io.argoproj.workflow.v1alpha1.HTTPHeader": {
"type": "object",
"required": [
@@ -10274,9 +8970,6 @@
},
"io.argoproj.workflow.v1alpha1.LifecycleHook": {
"type": "object",
- "required": [
- "template"
- ],
"properties": {
"arguments": {
"description": "Arguments hold arguments to the template",
@@ -10343,6 +9036,18 @@
}
}
},
+ "io.argoproj.workflow.v1alpha1.ManifestFrom": {
+ "type": "object",
+ "required": [
+ "artifact"
+ ],
+ "properties": {
+ "artifact": {
+ "description": "Artifact contains the artifact to use",
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.Artifact"
+ }
+ }
+ },
"io.argoproj.workflow.v1alpha1.MemoizationStatus": {
"description": "MemoizationStatus is the status of this memoized node",
"type": "object",
@@ -10633,6 +9338,50 @@
"description": "NoneStrategy indicates to skip tar process and upload the files or directory tree as independent files. Note that if the artifact is a directory, the artifact driver must support the ability to save/load the directory appropriately.",
"type": "object"
},
+ "io.argoproj.workflow.v1alpha1.OAuth2Auth": {
+ "description": "OAuth2Auth holds all information for client authentication via OAuth2 tokens",
+ "type": "object",
+ "properties": {
+ "clientIDSecret": {
+ "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector"
+ },
+ "clientSecretSecret": {
+ "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector"
+ },
+ "endpointParams": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.OAuth2EndpointParam"
+ }
+ },
+ "scopes": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "tokenURLSecret": {
+ "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector"
+ }
+ }
+ },
+ "io.argoproj.workflow.v1alpha1.OAuth2EndpointParam": {
+ "description": "EndpointParam is for requesting optional fields that should be sent in the oauth request",
+ "type": "object",
+ "required": [
+ "key"
+ ],
+ "properties": {
+ "key": {
+ "description": "Name is the header name",
+ "type": "string"
+ },
+ "value": {
+ "description": "Value is the literal value to use for the header",
+ "type": "string"
+ }
+ }
+ },
"io.argoproj.workflow.v1alpha1.OSSArtifact": {
"description": "OSSArtifact is the location of an Alibaba Cloud OSS artifact",
"type": "object",
@@ -10902,6 +9651,10 @@
"description": "Manifest contains the kubernetes manifest",
"type": "string"
},
+ "manifestFrom": {
+ "description": "ManifestFrom is the source for a single kubernetes manifest",
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.ManifestFrom"
+ },
"mergeStrategy": {
"description": "MergeStrategy is the strategy used to merge a patch. It defaults to \"strategic\" Must be one of: strategic, merge, json",
"type": "string"
@@ -10928,6 +9681,12 @@
"namespace": {
"type": "string"
},
+ "parameters": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
"uid": {
"type": "string"
}
@@ -10954,6 +9713,12 @@
"nodeFieldSelector": {
"type": "string"
},
+ "parameters": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
"restartSuccessful": {
"type": "boolean"
},
@@ -10983,7 +9748,7 @@
"type": "string"
},
"limit": {
- "description": "Limit is the maximum number of attempts when retrying a container",
+ "description": "Limit is the maximum number of retry attempts when retrying a container. It does not include the original container; the maximum number of total attempts will be `limit + 1`.",
"$ref": "#/definitions/io.k8s.apimachinery.pkg.util.intstr.IntOrString"
},
"retryPolicy": {
@@ -11125,14 +9890,14 @@
],
"properties": {
"args": {
- "description": "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
+ "description": "Arguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
"type": "array",
"items": {
"type": "string"
}
},
"command": {
- "description": "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
+ "description": "Entrypoint array. Not executed within a shell. The container image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
"type": "array",
"items": {
"type": "string"
@@ -11155,7 +9920,7 @@
}
},
"image": {
- "description": "Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.",
+ "description": "Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.",
"type": "string"
},
"imagePullPolicy": {
@@ -11713,14 +10478,14 @@
],
"properties": {
"args": {
- "description": "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
+ "description": "Arguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
"type": "array",
"items": {
"type": "string"
}
},
"command": {
- "description": "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
+ "description": "Entrypoint array. Not executed within a shell. The container image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
"type": "array",
"items": {
"type": "string"
@@ -11743,7 +10508,7 @@
}
},
"image": {
- "description": "Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.",
+ "description": "Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.",
"type": "string"
},
"imagePullPolicy": {
@@ -12121,6 +10886,12 @@
},
"namespace": {
"type": "string"
+ },
+ "parameters": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
}
}
},
@@ -12150,6 +10921,12 @@
"nodeFieldSelector": {
"type": "string"
},
+ "parameters": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
"restartSuccessful": {
"type": "boolean"
}
@@ -12198,6 +10975,10 @@
"description": "Arguments contain the parameters and artifacts sent to the workflow entrypoint Parameters are referencable globally using the 'workflow' variable prefix. e.g. {{io.argoproj.workflow.v1alpha1.parameters.myparam}}",
"$ref": "#/definitions/io.argoproj.workflow.v1alpha1.Arguments"
},
+ "artifactGC": {
+ "description": "ArtifactGC describes the strategy to use when deleting artifacts from completed or deleted workflows (applies to all output Artifacts unless Artifact.ArtifactGC is specified, which overrides this)",
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.ArtifactGC"
+ },
"artifactRepositoryRef": {
"description": "ArtifactRepositoryRef specifies the configMap name and key containing the artifact repository config.",
"$ref": "#/definitions/io.argoproj.workflow.v1alpha1.ArtifactRepositoryRef"
@@ -12274,7 +11055,7 @@
"$ref": "#/definitions/io.k8s.api.policy.v1beta1.PodDisruptionBudgetSpec"
},
"podGC": {
- "description": "PodGC describes the strategy to use when to deleting completed pods",
+ "description": "PodGC describes the strategy to use when deleting completed pods",
"$ref": "#/definitions/io.argoproj.workflow.v1alpha1.PodGC"
},
"podMetadata": {
@@ -12282,7 +11063,7 @@
"$ref": "#/definitions/io.argoproj.workflow.v1alpha1.Metadata"
},
"podPriority": {
- "description": "Priority to apply to workflow pods.",
+ "description": "Priority to apply to workflow pods. DEPRECATED: Use PodPriorityClassName instead.",
"type": "integer"
},
"podPriorityClassName": {
@@ -12352,7 +11133,7 @@
"$ref": "#/definitions/io.argoproj.workflow.v1alpha1.TTLStrategy"
},
"volumeClaimGC": {
- "description": "VolumeClaimGC describes the strategy to use when to deleting volumes from completed workflows",
+ "description": "VolumeClaimGC describes the strategy to use when deleting volumes from completed workflows",
"$ref": "#/definitions/io.argoproj.workflow.v1alpha1.VolumeClaimGC"
},
"volumeClaimTemplates": {
@@ -12374,7 +11155,7 @@
"x-kubernetes-patch-strategy": "merge"
},
"workflowMetadata": {
- "description": "WorkflowMetadata contains some metadata of the workflow to be refer",
+ "description": "WorkflowMetadata contains some metadata of the workflow to refer to",
"$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowMetadata"
},
"workflowTemplateRef": {
@@ -12387,6 +11168,10 @@
"description": "WorkflowStatus contains overall status information about a workflow",
"type": "object",
"properties": {
+ "artifactGCStatus": {
+ "description": "ArtifactGCStatus maintains the status of Artifact Garbage Collection",
+ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.ArtGCStatus"
+ },
"artifactRepositoryRef": {
"description": "ArtifactRepositoryRef is used to cache the repository to use so we do not need to determine it everytime we reconcile.",
"$ref": "#/definitions/io.argoproj.workflow.v1alpha1.ArtifactRepositoryRefStatus"
@@ -15308,43 +14093,6 @@
"description": "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\n (Note that \u003csuffix\u003e may be empty, from the \"\" case in \u003cdecimalSI\u003e.)\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \"+\" | \"-\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\u003cdecimalSI\u003e ::= m | \"\" | k | M | G | T | P | E\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\u003cdecimalExponent\u003e ::= \"e\" \u003csignedNumber\u003e | \"E\" \u003csignedNumber\u003e\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n a. No precision is lost\n b. No fractional digits will be emitted\n c. The exponent (or suffix) is as large as possible.\nThe sign will be omitted unless the number is negative.\n\nExamples:\n 1.5 will be serialized as \"1500m\"\n 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.",
"type": "string"
},
- "io.k8s.apimachinery.pkg.apis.meta.v1.Condition": {
- "description": "Condition contains details for one aspect of the current state of this API Resource.",
- "type": "object",
- "required": [
- "type",
- "status",
- "lastTransitionTime",
- "reason",
- "message"
- ],
- "properties": {
- "lastTransitionTime": {
- "description": "lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.",
- "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"
- },
- "message": {
- "description": "message is a human readable message indicating details about the transition. This may be an empty string.",
- "type": "string"
- },
- "observedGeneration": {
- "description": "observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance.",
- "type": "integer"
- },
- "reason": {
- "description": "reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty.",
- "type": "string"
- },
- "status": {
- "description": "status of the condition, one of True, False, Unknown.",
- "type": "string"
- },
- "type": {
- "description": "type of condition in CamelCase or in foo.example.com/CamelCase.",
- "type": "string"
- }
- }
- },
"io.k8s.apimachinery.pkg.apis.meta.v1.CreateOptions": {
"description": "CreateOptions may be provided when creating an API object.",
"type": "object",
@@ -15362,16 +14110,7 @@
},
"fieldValidation": {
"type": "string",
- "title": "fieldValidation determines how the server should respond to\nunknown/duplicate fields in the object in the request.\nIntroduced as alpha in 1.23, older servers or servers with the\n`ServerSideFieldValidation` feature disabled will discard valid values\nspecified in this param and not perform any server side field validation.\nValid values are:\n- Ignore: ignores unknown/duplicate fields.\n- Warn: responds with a warning for each\nunknown/duplicate field, but successfully serves the request.\n- Strict: fails the request on unknown/duplicate fields.\n+optional"
- }
- }
- },
- "io.k8s.apimachinery.pkg.apis.meta.v1.Duration": {
- "description": "Duration is a wrapper around time.Duration which supports correct\nmarshaling to YAML and JSON. In particular, it marshals into strings, which\ncan be used as map keys in json.",
- "type": "object",
- "properties": {
- "duration": {
- "type": "string"
+ "title": "fieldValidation instructs the server on how to handle\nobjects in the request (POST/PUT/PATCH) containing unknown\nor duplicate fields, provided that the `ServerSideFieldValidation`\nfeature gate is also enabled. Valid values are:\n- Ignore: This will ignore any unknown fields that are silently\ndropped from the object, and will ignore all but the last duplicate\nfield that the decoder encounters. This is the default behavior\nprior to v1.23 and is the default behavior when the\n`ServerSideFieldValidation` feature gate is disabled.\n- Warn: This will send a warning via the standard warning response\nheader for each unknown field that is dropped from the object, and\nfor each duplicate field that is encountered. The request will\nstill succeed if there are no other errors, and will only persist\nthe last of any duplicate fields. This is the default when the\n`ServerSideFieldValidation` feature gate is enabled.\n- Strict: This will fail the request with a BadRequest error if\nany unknown fields would be dropped from the object, or if any\nduplicate fields are present. The error returned from the server\nwill contain all unknown and duplicate fields encountered.\n+optional"
}
}
},
@@ -15655,55 +14394,6 @@
"io.k8s.apimachinery.pkg.util.intstr.IntOrString": {
"type": "string"
},
- "pipeline.DeletePipelineResponse": {
- "type": "object"
- },
- "pipeline.LogEntry": {
- "type": "object",
- "title": "structured log entry",
- "properties": {
- "msg": {
- "type": "string"
- },
- "namespace": {
- "type": "string"
- },
- "pipelineName": {
- "type": "string"
- },
- "stepName": {
- "type": "string"
- },
- "time": {
- "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"
- }
- }
- },
- "pipeline.PipelineWatchEvent": {
- "type": "object",
- "properties": {
- "object": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Pipeline"
- },
- "type": {
- "type": "string"
- }
- }
- },
- "pipeline.RestartPipelineResponse": {
- "type": "object"
- },
- "pipeline.StepWatchEvent": {
- "type": "object",
- "properties": {
- "object": {
- "$ref": "#/definitions/github.com.argoproj_labs.argo_dataflow.api.v1alpha1.Step"
- },
- "type": {
- "type": "string"
- }
- }
- },
"sensor.CreateSensorRequest": {
"type": "object",
"properties": {
@@ -15784,12 +14474,13 @@
"BearerToken": {
"description": "Bearer Token authentication",
"type": "apiKey",
- "name": "authorization",
+ "name": "Authorization",
"in": "header"
- },
- "HTTPBasic": {
- "description": "HTTP Basic authentication",
- "type": "basic"
}
- }
+ },
+ "security": [
+ {
+ "BearerToken": []
+ }
+ ]
}
\ No newline at end of file
diff --git a/cmd/argo/commands/archive/resubmit.go b/cmd/argo/commands/archive/resubmit.go
index dae6d6d92641..94408a1a48b2 100644
--- a/cmd/argo/commands/archive/resubmit.go
+++ b/cmd/argo/commands/archive/resubmit.go
@@ -45,27 +45,27 @@ func NewResubmitCommand() *cobra.Command {
# Resubmit multiple workflows:
- argo resubmit uid another-uid
+ argo archive resubmit uid another-uid
# Resubmit multiple workflows by label selector:
- argo resubmit -l workflows.argoproj.io/test=true
+ argo archive resubmit -l workflows.argoproj.io/test=true
# Resubmit multiple workflows by field selector:
- argo resubmit --field-selector metadata.namespace=argo
+ argo archive resubmit --field-selector metadata.namespace=argo
# Resubmit and wait for completion:
- argo resubmit --wait uid
+ argo archive resubmit --wait uid
# Resubmit and watch until completion:
- argo resubmit --watch uid
+ argo archive resubmit --watch uid
# Resubmit and tail logs until completion:
- argo resubmit --log uid
+ argo archive resubmit --log uid
`,
Run: func(cmd *cobra.Command, args []string) {
if cmd.Flag("priority").Changed {
@@ -82,6 +82,7 @@ func NewResubmitCommand() *cobra.Command {
},
}
+ command.Flags().StringArrayVarP(&cliSubmitOpts.Parameters, "parameter", "p", []string{}, "input parameter to override on the original workflow spec")
command.Flags().Int32Var(&resubmitOpts.priority, "priority", 0, "workflow priority")
command.Flags().StringVarP(&cliSubmitOpts.Output, "output", "o", "", "Output format. One of: name|json|yaml|wide")
command.Flags().BoolVarP(&cliSubmitOpts.Wait, "wait", "w", false, "wait for the workflow to complete, only works when a single workflow is resubmitted")
@@ -127,10 +128,11 @@ func resubmitArchivedWorkflows(ctx context.Context, archiveServiceClient workflo
resubmittedUids[string(wf.UID)] = true
lastResubmitted, err = archiveServiceClient.ResubmitArchivedWorkflow(ctx, &workflowarchivepkg.ResubmitArchivedWorkflowRequest{
- Uid: string(wf.UID),
- Namespace: wf.Namespace,
- Name: wf.Name,
- Memoized: resubmitOpts.memoized,
+ Uid: string(wf.UID),
+ Namespace: wf.Namespace,
+ Name: wf.Name,
+ Memoized: resubmitOpts.memoized,
+ Parameters: cliSubmitOpts.Parameters,
})
if err != nil {
return err
diff --git a/cmd/argo/commands/archive/retry.go b/cmd/argo/commands/archive/retry.go
new file mode 100644
index 000000000000..9a9171f3905a
--- /dev/null
+++ b/cmd/argo/commands/archive/retry.go
@@ -0,0 +1,152 @@
+package archive
+
+import (
+ "context"
+ "fmt"
+ "os"
+
+ "github.com/argoproj/pkg/errors"
+ "github.com/spf13/cobra"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/fields"
+ "k8s.io/apimachinery/pkg/types"
+
+ client "github.com/argoproj/argo-workflows/v3/cmd/argo/commands/client"
+ "github.com/argoproj/argo-workflows/v3/cmd/argo/commands/common"
+ workflowpkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/workflow"
+ workflowarchivepkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/workflowarchive"
+ wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1"
+)
+
+type retryOps struct {
+ nodeFieldSelector string // --node-field-selector
+ restartSuccessful bool // --restart-successful
+ namespace string // --namespace
+ labelSelector string // --selector
+ fieldSelector string // --field-selector
+}
+
+// hasSelector returns true if the CLI arguments selects multiple workflows
+func (o *retryOps) hasSelector() bool {
+ if o.labelSelector != "" || o.fieldSelector != "" {
+ return true
+ }
+ return false
+}
+
+func NewRetryCommand() *cobra.Command {
+ var (
+ cliSubmitOpts common.CliSubmitOpts
+ retryOpts retryOps
+ )
+ command := &cobra.Command{
+ Use: "retry [WORKFLOW...]",
+ Short: "retry zero or more workflows",
+ Example: `# Retry a workflow:
+
+ argo archive retry uid
+
+# Retry multiple workflows:
+
+ argo archive retry uid another-uid
+
+# Retry multiple workflows by label selector:
+
+ argo archive retry -l workflows.argoproj.io/test=true
+
+# Retry multiple workflows by field selector:
+
+ argo archive retry --field-selector metadata.namespace=argo
+
+# Retry and wait for completion:
+
+ argo archive retry --wait uid
+
+# Retry and watch until completion:
+
+ argo archive retry --watch uid
+
+# Retry and tail logs until completion:
+
+ argo archive retry --log uid
+`,
+ Run: func(cmd *cobra.Command, args []string) {
+ if len(args) == 0 && !retryOpts.hasSelector() {
+ cmd.HelpFunc()(cmd, args)
+ os.Exit(1)
+ }
+
+ ctx, apiClient := client.NewAPIClient(cmd.Context())
+ serviceClient := apiClient.NewWorkflowServiceClient()
+ archiveServiceClient, err := apiClient.NewArchivedWorkflowServiceClient()
+ errors.CheckError(err)
+ retryOpts.namespace = client.Namespace()
+
+ err = retryArchivedWorkflows(ctx, archiveServiceClient, serviceClient, retryOpts, cliSubmitOpts, args)
+ errors.CheckError(err)
+ },
+ }
+
+ command.Flags().StringArrayVarP(&cliSubmitOpts.Parameters, "parameter", "p", []string{}, "input parameter to override on the original workflow spec")
+ command.Flags().StringVarP(&cliSubmitOpts.Output, "output", "o", "", "Output format. One of: name|json|yaml|wide")
+ command.Flags().BoolVarP(&cliSubmitOpts.Wait, "wait", "w", false, "wait for the workflow to complete, only works when a single workflow is retried")
+ command.Flags().BoolVar(&cliSubmitOpts.Watch, "watch", false, "watch the workflow until it completes, only works when a single workflow is retried")
+ command.Flags().BoolVar(&cliSubmitOpts.Log, "log", false, "log the workflow until it completes")
+ command.Flags().BoolVar(&retryOpts.restartSuccessful, "restart-successful", false, "indicates to restart successful nodes matching the --node-field-selector")
+ command.Flags().StringVar(&retryOpts.nodeFieldSelector, "node-field-selector", "", "selector of nodes to reset, eg: --node-field-selector inputs.paramaters.myparam.value=abc")
+ command.Flags().StringVarP(&retryOpts.labelSelector, "selector", "l", "", "Selector (label query) to filter on, not including uninitialized ones, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)")
+ command.Flags().StringVar(&retryOpts.fieldSelector, "field-selector", "", "Selector (field query) to filter on, supports '=', '==', and '!='.(e.g. --field-selector key1=value1,key2=value2). The server only supports a limited number of field queries per type.")
+ return command
+}
+
+// retryWorkflows retries workflows by given retryArgs or workflow names
+func retryArchivedWorkflows(ctx context.Context, archiveServiceClient workflowarchivepkg.ArchivedWorkflowServiceClient, serviceClient workflowpkg.WorkflowServiceClient, retryOpts retryOps, cliSubmitOpts common.CliSubmitOpts, args []string) error {
+ selector, err := fields.ParseSelector(retryOpts.nodeFieldSelector)
+ if err != nil {
+ return fmt.Errorf("unable to parse node field selector '%s': %s", retryOpts.nodeFieldSelector, err)
+ }
+ var wfs wfv1.Workflows
+ if retryOpts.hasSelector() {
+ wfs, err = listArchivedWorkflows(ctx, archiveServiceClient, retryOpts.fieldSelector, retryOpts.labelSelector, 0)
+ if err != nil {
+ return err
+ }
+ }
+
+ for _, uid := range args {
+ wfs = append(wfs, wfv1.Workflow{
+ ObjectMeta: metav1.ObjectMeta{
+ UID: types.UID(uid),
+ Namespace: retryOpts.namespace,
+ },
+ })
+ }
+
+ var lastRetried *wfv1.Workflow
+ retriedUids := make(map[string]bool)
+ for _, wf := range wfs {
+ if _, ok := retriedUids[string(wf.UID)]; ok {
+ // de-duplication in case there is an overlap between the selector and given workflow names
+ continue
+ }
+ retriedUids[string(wf.UID)] = true
+
+ lastRetried, err = archiveServiceClient.RetryArchivedWorkflow(ctx, &workflowarchivepkg.RetryArchivedWorkflowRequest{
+ Uid: string(wf.UID),
+ Namespace: wf.Namespace,
+ Name: wf.Name,
+ RestartSuccessful: retryOpts.restartSuccessful,
+ NodeFieldSelector: selector.String(),
+ Parameters: cliSubmitOpts.Parameters,
+ })
+ if err != nil {
+ return err
+ }
+ printWorkflow(lastRetried, cliSubmitOpts.Output)
+ }
+ if len(retriedUids) == 1 {
+ // watch or wait when there is only one workflow retried
+ common.WaitWatchOrLog(ctx, serviceClient, lastRetried.Namespace, []string{lastRetried.Name}, cliSubmitOpts)
+ }
+ return nil
+}
diff --git a/cmd/argo/commands/archive/root.go b/cmd/argo/commands/archive/root.go
index 06caf67ede5e..bf147daefd65 100644
--- a/cmd/argo/commands/archive/root.go
+++ b/cmd/argo/commands/archive/root.go
@@ -19,5 +19,6 @@ func NewArchiveCommand() *cobra.Command {
command.AddCommand(NewListLabelKeyCommand())
command.AddCommand(NewListLabelValueCommand())
command.AddCommand(NewResubmitCommand())
+ command.AddCommand(NewRetryCommand())
return command
}
diff --git a/cmd/argo/commands/client/conn.go b/cmd/argo/commands/client/conn.go
index 5b710ab95594..5f28d1d8268d 100644
--- a/cmd/argo/commands/client/conn.go
+++ b/cmd/argo/commands/client/conn.go
@@ -16,7 +16,7 @@ import (
)
var (
- argoServerOpts = apiclient.ArgoServerOpts{}
+ ArgoServerOpts = apiclient.ArgoServerOpts{}
instanceID string
)
@@ -43,20 +43,20 @@ func GetConfig() clientcmd.ClientConfig {
func AddAPIClientFlagsToCmd(cmd *cobra.Command) {
cmd.PersistentFlags().StringVar(&instanceID, "instanceid", os.Getenv("ARGO_INSTANCEID"), "submit with a specific controller's instance id label. Default to the ARGO_INSTANCEID environment variable.")
// "-s" like kubectl
- cmd.PersistentFlags().StringVarP(&argoServerOpts.URL, "argo-server", "s", os.Getenv("ARGO_SERVER"), "API server `host:port`. e.g. localhost:2746. Defaults to the ARGO_SERVER environment variable.")
- cmd.PersistentFlags().StringVar(&argoServerOpts.Path, "argo-base-href", os.Getenv("ARGO_BASE_HREF"), "An path to use with HTTP client (e.g. due to BASE_HREF). Defaults to the ARGO_BASE_HREF environment variable.")
- cmd.PersistentFlags().BoolVar(&argoServerOpts.HTTP1, "argo-http1", os.Getenv("ARGO_HTTP1") == "true", "If true, use the HTTP client. Defaults to the ARGO_HTTP1 environment variable.")
- cmd.PersistentFlags().StringSliceVarP(&argoServerOpts.Headers, "header", "H", []string{}, "Sets additional header to all requests made by Argo CLI. (Can be repeated multiple times to add multiple headers, also supports comma separated headers) Used only when either ARGO_HTTP1 or --argo-http1 is set to true.")
+ cmd.PersistentFlags().StringVarP(&ArgoServerOpts.URL, "argo-server", "s", os.Getenv("ARGO_SERVER"), "API server `host:port`. e.g. localhost:2746. Defaults to the ARGO_SERVER environment variable.")
+ cmd.PersistentFlags().StringVar(&ArgoServerOpts.Path, "argo-base-href", os.Getenv("ARGO_BASE_HREF"), "An path to use with HTTP client (e.g. due to BASE_HREF). Defaults to the ARGO_BASE_HREF environment variable.")
+ cmd.PersistentFlags().BoolVar(&ArgoServerOpts.HTTP1, "argo-http1", os.Getenv("ARGO_HTTP1") == "true", "If true, use the HTTP client. Defaults to the ARGO_HTTP1 environment variable.")
+ cmd.PersistentFlags().StringSliceVarP(&ArgoServerOpts.Headers, "header", "H", []string{}, "Sets additional header to all requests made by Argo CLI. (Can be repeated multiple times to add multiple headers, also supports comma separated headers) Used only when either ARGO_HTTP1 or --argo-http1 is set to true.")
// "-e" for encrypted - like zip
- cmd.PersistentFlags().BoolVarP(&argoServerOpts.Secure, "secure", "e", os.Getenv("ARGO_SECURE") != "false", "Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable.")
+ cmd.PersistentFlags().BoolVarP(&ArgoServerOpts.Secure, "secure", "e", os.Getenv("ARGO_SECURE") != "false", "Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable.")
// "-k" like curl
- cmd.PersistentFlags().BoolVarP(&argoServerOpts.InsecureSkipVerify, "insecure-skip-verify", "k", os.Getenv("ARGO_INSECURE_SKIP_VERIFY") == "true", "If true, the Argo Server's certificate will not be checked for validity. This will make your HTTPS connections insecure. Defaults to the ARGO_INSECURE_SKIP_VERIFY environment variable.")
+ cmd.PersistentFlags().BoolVarP(&ArgoServerOpts.InsecureSkipVerify, "insecure-skip-verify", "k", os.Getenv("ARGO_INSECURE_SKIP_VERIFY") == "true", "If true, the Argo Server's certificate will not be checked for validity. This will make your HTTPS connections insecure. Defaults to the ARGO_INSECURE_SKIP_VERIFY environment variable.")
}
func NewAPIClient(ctx context.Context) (context.Context, apiclient.Client) {
ctx, client, err := apiclient.NewClientFromOpts(
apiclient.Opts{
- ArgoServerOpts: argoServerOpts,
+ ArgoServerOpts: ArgoServerOpts,
InstanceID: instanceID,
AuthSupplier: func() string {
return GetAuthString()
diff --git a/cmd/argo/commands/common/get.go b/cmd/argo/commands/common/get.go
index f548f8918e0d..47405f0014b2 100644
--- a/cmd/argo/commands/common/get.go
+++ b/cmd/argo/commands/common/get.go
@@ -118,6 +118,8 @@ func PrintWorkflowHelper(wf *wfv1.Workflow, getArgs GetFlags) string {
out += fmt.Sprintf(fmtStr, " "+art.Name+":", art.S3.String())
} else if art.Artifactory != nil {
out += fmt.Sprintf(fmtStr, " "+art.Name+":", art.Artifactory.String())
+ } else if art.Azure != nil {
+ out += fmt.Sprintf(fmtStr, " "+art.Name+":", art.Azure.String())
}
}
}
@@ -164,7 +166,6 @@ func PrintWorkflowHelper(wf *wfv1.Workflow, getArgs GetFlags) string {
}
}
writerBuffer := new(bytes.Buffer)
- printer.PrintSecurityNudges(*wf, writerBuffer)
out += writerBuffer.String()
return out
}
diff --git a/cmd/argo/commands/common/get_test.go b/cmd/argo/commands/common/get_test.go
index b55c512373c7..42652d5bec98 100644
--- a/cmd/argo/commands/common/get_test.go
+++ b/cmd/argo/commands/common/get_test.go
@@ -3,6 +3,7 @@ package common
import (
"bytes"
"fmt"
+ "hash/fnv"
"testing"
"text/tabwriter"
"time"
@@ -15,12 +16,32 @@ import (
"github.com/argoproj/argo-workflows/v3/workflow/util"
)
+var (
+ workflowName string = "testWF"
+)
+
+func init() {
+ // these values get used as part of determining node name and would normally be set as part of
+ // running the application
+ JobStatusIconMap = map[wfv1.NodePhase]string{
+ wfv1.NodePending: ansiFormat("Pending", FgYellow),
+ wfv1.NodeRunning: ansiFormat("Running", FgCyan),
+ wfv1.NodeSucceeded: ansiFormat("Succeeded", FgGreen),
+ wfv1.NodeSkipped: ansiFormat("Skipped", FgDefault),
+ wfv1.NodeFailed: ansiFormat("Failed", FgRed),
+ wfv1.NodeError: ansiFormat("Error", FgRed),
+ }
+ NodeTypeIconMap = map[wfv1.NodeType]string{
+ wfv1.NodeTypeSuspend: ansiFormat("Suspend", FgCyan),
+ }
+}
+
func testPrintNodeImpl(t *testing.T, expected string, node wfv1.NodeStatus, getArgs GetFlags) {
var result bytes.Buffer
w := tabwriter.NewWriter(&result, 0, 8, 1, '\t', 0)
filtered, _ := filterNode(node, getArgs)
if !filtered {
- printNode(w, node, "testWf", "", getArgs, util.GetPodNameVersion())
+ printNode(w, node, workflowName, "", getArgs, util.GetPodNameVersion())
}
err := w.Flush()
assert.NoError(t, err)
@@ -51,19 +72,24 @@ func TestPrintNode(t *testing.T) {
FinishedAt: timestamp,
Message: nodeMessage,
}
+
node.HostNodeName = kubernetesNodeName
- testPrintNodeImpl(t, fmt.Sprintf("%s %s\t%s\t%s\t%s\t%s\t%s\n", JobStatusIconMap[wfv1.NodeRunning], nodeName, "", nodeID, "0s", nodeMessage, ""), node, getArgs)
+ // derive expected pod name:
+ h := fnv.New32a()
+ _, _ = h.Write([]byte(fmt.Sprintf("%s %s", JobStatusIconMap[wfv1.NodeRunning], nodeName)))
+ expectedPodName := fmt.Sprintf("%s-%s-%v", workflowName, node.TemplateName, h.Sum32())
+ testPrintNodeImpl(t, fmt.Sprintf("%s %s\t%s\t%s\t%s\t%s\t%s\n", JobStatusIconMap[wfv1.NodeRunning], nodeName, "", expectedPodName, "0s", nodeMessage, ""), node, getArgs)
// Compatibility test
getArgs.Status = "Running"
- testPrintNodeImpl(t, fmt.Sprintf("%s %s\t\t%s\t%s\t%s\t\n", JobStatusIconMap[wfv1.NodeRunning], nodeName, nodeID, "0s", nodeMessage), node, getArgs)
+ testPrintNodeImpl(t, fmt.Sprintf("%s %s\t\t%s\t%s\t%s\t\n", JobStatusIconMap[wfv1.NodeRunning], nodeName, expectedPodName, "0s", nodeMessage), node, getArgs)
getArgs.Status = ""
getArgs.NodeFieldSelectorString = "phase=Running"
- testPrintNodeImpl(t, fmt.Sprintf("%s %s\t\t%s\t%s\t%s\t\n", JobStatusIconMap[wfv1.NodeRunning], nodeName, nodeID, "0s", nodeMessage), node, getArgs)
+ testPrintNodeImpl(t, fmt.Sprintf("%s %s\t\t%s\t%s\t%s\t\n", JobStatusIconMap[wfv1.NodeRunning], nodeName, expectedPodName, "0s", nodeMessage), node, getArgs)
getArgs.NodeFieldSelectorString = "phase!=foobar"
- testPrintNodeImpl(t, fmt.Sprintf("%s %s\t\t%s\t%s\t%s\t\n", JobStatusIconMap[wfv1.NodeRunning], nodeName, nodeID, "0s", nodeMessage), node, getArgs)
+ testPrintNodeImpl(t, fmt.Sprintf("%s %s\t\t%s\t%s\t%s\t\n", JobStatusIconMap[wfv1.NodeRunning], nodeName, expectedPodName, "0s", nodeMessage), node, getArgs)
getArgs.NodeFieldSelectorString = "phase!=Running"
testPrintNodeImpl(t, "", node, getArgs)
@@ -82,7 +108,8 @@ func TestPrintNode(t *testing.T) {
}
node.TemplateName = nodeTemplateName
- testPrintNodeImpl(t, fmt.Sprintf("%s %s\t%s\t%s\t%s\t%s\t%s\n", JobStatusIconMap[wfv1.NodeRunning], nodeName, nodeTemplateName, nodeID, "0s", nodeMessage, ""), node, getArgs)
+ expectedPodName = fmt.Sprintf("%s-%s-%v", workflowName, node.TemplateName, h.Sum32())
+ testPrintNodeImpl(t, fmt.Sprintf("%s %s\t%s\t%s\t%s\t%s\t%s\n", JobStatusIconMap[wfv1.NodeRunning], nodeName, nodeTemplateName, expectedPodName, "0s", nodeMessage, ""), node, getArgs)
node.Type = wfv1.NodeTypeSuspend
testPrintNodeImpl(t, fmt.Sprintf("%s %s\t%s\t%s\t%s\t%s\t%s\n", NodeTypeIconMap[wfv1.NodeTypeSuspend], nodeName, nodeTemplateName, "", "", nodeMessage, ""), node, getArgs)
@@ -91,16 +118,18 @@ func TestPrintNode(t *testing.T) {
Name: nodeTemplateRefName,
Template: nodeTemplateRefName,
}
+ templateName := fmt.Sprintf("%s/%s", node.TemplateRef.Name, node.TemplateRef.Template)
+ expectedPodName = fmt.Sprintf("%s-%s-%v", workflowName, templateName, h.Sum32())
testPrintNodeImpl(t, fmt.Sprintf("%s %s\t%s/%s\t%s\t%s\t%s\t%s\n", NodeTypeIconMap[wfv1.NodeTypeSuspend], nodeName, nodeTemplateRefName, nodeTemplateRefName, "", "", nodeMessage, ""), node, getArgs)
getArgs.Output = "wide"
testPrintNodeImpl(t, fmt.Sprintf("%s %s\t%s/%s\t%s\t%s\t%s\t%s\t%s\t\n", NodeTypeIconMap[wfv1.NodeTypeSuspend], nodeName, nodeTemplateRefName, nodeTemplateRefName, "", "", getArtifactsString(node), nodeMessage, ""), node, getArgs)
node.Type = wfv1.NodeTypePod
- testPrintNodeImpl(t, fmt.Sprintf("%s %s\t%s/%s\t%s\t%s\t%s\t%s\t%s\t%s\n", JobStatusIconMap[wfv1.NodeRunning], nodeName, nodeTemplateRefName, nodeTemplateRefName, nodeID, "0s", getArtifactsString(node), nodeMessage, "", kubernetesNodeName), node, getArgs)
+ testPrintNodeImpl(t, fmt.Sprintf("%s %s\t%s/%s\t%s\t%s\t%s\t%s\t%s\t%s\n", JobStatusIconMap[wfv1.NodeRunning], nodeName, nodeTemplateRefName, nodeTemplateRefName, expectedPodName, "0s", getArtifactsString(node), nodeMessage, "", kubernetesNodeName), node, getArgs)
getArgs.Output = "short"
- testPrintNodeImpl(t, fmt.Sprintf("%s %s\t%s/%s\t%s\t%s\t%s\t%s\n", NodeTypeIconMap[wfv1.NodeTypeSuspend], nodeName, nodeTemplateRefName, nodeTemplateRefName, nodeID, "0s", nodeMessage, kubernetesNodeName), node, getArgs)
+ testPrintNodeImpl(t, fmt.Sprintf("%s %s\t%s/%s\t%s\t%s\t%s\t%s\n", JobStatusIconMap[wfv1.NodeRunning], nodeName, nodeTemplateRefName, nodeTemplateRefName, expectedPodName, "0s", nodeMessage, kubernetesNodeName), node, getArgs)
getArgs.Status = "foobar"
testPrintNodeImpl(t, "", node, getArgs)
@@ -213,6 +242,7 @@ status:
finishedAt: "2020-06-02T16:04:42Z"
id: many-items-z26lj-753834747
name: many-items-z26lj[0].sleep(8:eight)
+ phase: Succeeded
startedAt: "2020-06-02T16:04:21Z"
templateName: sleep
type: Pod
@@ -222,6 +252,7 @@ status:
finishedAt: "2020-06-02T16:04:45Z"
id: many-items-z26lj-1052882686
name: many-items-z26lj[0].sleep(10:ten)
+ phase: Succeeded
startedAt: "2020-06-02T16:04:22Z"
templateName: sleep
type: Pod
@@ -255,6 +286,7 @@ status:
finishedAt: "2020-06-02T16:04:54Z"
id: many-items-z26lj-1774150289
name: many-items-z26lj[0].sleep(3:three)
+ phase: Succeeded
startedAt: "2020-06-02T16:04:21Z"
templateName: sleep
type: Pod
@@ -264,6 +296,7 @@ status:
finishedAt: "2020-06-02T16:04:48Z"
id: many-items-z26lj-1939921510
name: many-items-z26lj[0].sleep(0:zero)
+ phase: Succeeded
startedAt: "2020-06-02T16:04:21Z"
templateName: sleep
type: Pod
@@ -283,6 +316,7 @@ status:
finishedAt: "2020-06-02T16:04:53Z"
id: many-items-z26lj-2156977535
name: many-items-z26lj[0].sleep(1:one)
+ phase: Succeeded
startedAt: "2020-06-02T16:04:21Z"
templateName: sleep
type: Pod
@@ -292,6 +326,7 @@ status:
finishedAt: "2020-06-02T16:04:40Z"
id: many-items-z26lj-2619926859
name: many-items-z26lj[0].sleep(9:nine)
+ phase: Succeeded
startedAt: "2020-06-02T16:04:21Z"
templateName: sleep
type: Pod
@@ -301,6 +336,7 @@ status:
finishedAt: "2020-06-02T16:04:44Z"
id: many-items-z26lj-3011405271
name: many-items-z26lj[0].sleep(11:eleven)
+ phase: Succeeded
startedAt: "2020-06-02T16:04:22Z"
templateName: sleep
type: Pod
@@ -310,6 +346,7 @@ status:
finishedAt: "2020-06-02T16:04:57Z"
id: many-items-z26lj-3031375822
name: many-items-z26lj[0].sleep(7:seven)
+ phase: Succeeded
startedAt: "2020-06-02T16:04:21Z"
templateName: sleep
type: Pod
@@ -319,6 +356,7 @@ status:
finishedAt: "2020-06-02T16:04:59Z"
id: many-items-z26lj-3126938806
name: many-items-z26lj[0].sleep(12:twelve)
+ phase: Succeeded
startedAt: "2020-06-02T16:04:22Z"
templateName: sleep
type: Pod
@@ -328,6 +366,7 @@ status:
finishedAt: "2020-06-02T16:04:56Z"
id: many-items-z26lj-3178865096
name: many-items-z26lj[0].sleep(6:six)
+ phase: Succeeded
startedAt: "2020-06-02T16:04:21Z"
templateName: sleep
type: Pod
@@ -337,6 +376,7 @@ status:
finishedAt: "2020-06-02T16:04:51Z"
id: many-items-z26lj-3409403178
name: many-items-z26lj[0].sleep(2:two)
+ phase: Succeeded
startedAt: "2020-06-02T16:04:21Z"
templateName: sleep
type: Pod
@@ -353,14 +393,24 @@ status:
phase: Succeeded
startedAt: "2020-06-02T16:04:21Z"
`, &wf)
+
output := PrintWorkflowHelper(&wf, GetFlags{})
- assert.Contains(t, output, `
- ├─ sleep(9:nine) sleep many-items-z26lj-2619926859 19s
- ├─ sleep(10:ten) sleep many-items-z26lj-1052882686 23s
- ├─ sleep(11:eleven) sleep many-items-z26lj-3011405271 22s`)
- assert.Contains(t, output, "This workflow does not have security context set. "+
- "You can run your workflow pods more securely by setting it.\n"+
- "Learn more at https://argoproj.github.io/argo-workflows/workflow-pod-security-context/\n")
+
+ // derive expected pod name:
+ h := fnv.New32a()
+ _, _ = h.Write([]byte(fmt.Sprintf("%s %s", JobStatusIconMap[wfv1.NodeSucceeded], "sleep(9:nine)")))
+ expectedPodName := fmt.Sprintf("many-items-z26lj-sleep-%v", h.Sum32())
+ assert.Contains(t, output, fmt.Sprintf("sleep(9:nine) sleep %s 19s", expectedPodName))
+
+ h.Reset()
+ _, _ = h.Write([]byte(fmt.Sprintf("%s %s", JobStatusIconMap[wfv1.NodeSucceeded], "sleep(10:ten)")))
+ expectedPodName = fmt.Sprintf("many-items-z26lj-sleep-%v", h.Sum32())
+ assert.Contains(t, output, fmt.Sprintf("sleep(10:ten) sleep %s 23s", expectedPodName))
+
+ h.Reset()
+ _, _ = h.Write([]byte(fmt.Sprintf("%s %s", JobStatusIconMap[wfv1.NodeSucceeded], "sleep(11:eleven)")))
+ expectedPodName = fmt.Sprintf("many-items-z26lj-sleep-%v", h.Sum32())
+ assert.Contains(t, output, fmt.Sprintf("sleep(11:eleven) sleep %s 22s", expectedPodName))
})
}
@@ -383,8 +433,4 @@ func Test_printWorkflowHelperNudges(t *testing.T) {
output := PrintWorkflowHelper(&securedWf, GetFlags{})
assert.NotContains(t, output, securityNudges)
})
- t.Run("InsecureWorkflow", func(t *testing.T) {
- output := PrintWorkflowHelper(&insecureWf, GetFlags{})
- assert.Contains(t, output, securityNudges)
- })
}
diff --git a/cmd/argo/commands/common/submit.go b/cmd/argo/commands/common/submit.go
index 77687b9467c8..60b3f43ede29 100644
--- a/cmd/argo/commands/common/submit.go
+++ b/cmd/argo/commands/common/submit.go
@@ -18,7 +18,8 @@ type CliSubmitOpts struct {
Strict bool // --strict
Priority *int32 // --priority
GetArgs GetFlags
- ScheduledTime string // --scheduled-time
+ ScheduledTime string // --scheduled-time
+ Parameters []string // --parameter
}
func WaitWatchOrLog(ctx context.Context, serviceClient workflowpkg.WorkflowServiceClient, namespace string, workflowNames []string, cliSubmitOpts CliSubmitOpts) {
diff --git a/cmd/argo/commands/common/wait.go b/cmd/argo/commands/common/wait.go
index 9e872c6129bf..69f7c6b46d47 100644
--- a/cmd/argo/commands/common/wait.go
+++ b/cmd/argo/commands/common/wait.go
@@ -69,7 +69,7 @@ func waitOnOne(serviceClient workflowpkg.WorkflowServiceClient, ctx context.Cont
continue
}
wf := event.Object
- if !wf.Status.FinishedAt.IsZero() {
+ if wf != nil && !wf.Status.FinishedAt.IsZero() {
if !quiet {
fmt.Printf("%s %s at %v\n", wfName, wf.Status.Phase, wf.Status.FinishedAt)
}
diff --git a/cmd/argo/commands/cp.go b/cmd/argo/commands/cp.go
new file mode 100644
index 000000000000..d175a032b285
--- /dev/null
+++ b/cmd/argo/commands/cp.go
@@ -0,0 +1,140 @@
+package commands
+
+import (
+ "crypto/tls"
+ "fmt"
+ "io"
+ "log"
+ "net/http"
+ "os"
+ "path"
+ "path/filepath"
+ "strings"
+
+ "github.com/spf13/cobra"
+
+ "github.com/argoproj/argo-workflows/v3/cmd/argo/commands/client"
+ "github.com/argoproj/argo-workflows/v3/pkg/apiclient"
+ workflowpkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/workflow"
+ "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1"
+)
+
+func NewCpCommand() *cobra.Command {
+ var (
+ namespace string // --namespace
+ nodeId string // --node-id
+ templateName string // --template-name
+ artifactName string // --artifact-name
+ customPath string // --path
+ )
+ command := &cobra.Command{
+ Use: "cp my-wf output-directory ...",
+ Short: "copy artifacts from workflow",
+ Example: `# Copy a workflow's artifacts to a local output directory:
+
+ argo cp my-wf output-directory
+
+# Copy artifacts from a specific node in a workflow to a local output directory:
+
+ argo cp my-wf output-directory --node-id=my-wf-node-id-123
+`,
+ SilenceUsage: true,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ if len(args) != 2 {
+ cmd.HelpFunc()(cmd, args)
+ return fmt.Errorf("incorrect number of arguments")
+ }
+ workflowName := args[0]
+ outputDir := args[1]
+
+ ctx, apiClient := client.NewAPIClient(cmd.Context())
+ serviceClient := apiClient.NewWorkflowServiceClient()
+ if len(namespace) == 0 {
+ namespace = client.Namespace()
+ }
+ workflow, err := serviceClient.GetWorkflow(ctx, &workflowpkg.WorkflowGetRequest{
+ Name: workflowName,
+ Namespace: namespace,
+ })
+ if err != nil {
+ return fmt.Errorf("failed to get workflow: %w", err)
+ }
+
+ workflowName = workflow.Name
+ artifactSearchQuery := v1alpha1.ArtifactSearchQuery{
+ ArtifactName: artifactName,
+ TemplateName: templateName,
+ NodeId: nodeId,
+ }
+ artifactSearchResults := workflow.SearchArtifacts(&artifactSearchQuery)
+
+ c := &http.Client{
+ Transport: &http.Transport{
+ TLSClientConfig: &tls.Config{
+ InsecureSkipVerify: client.ArgoServerOpts.InsecureSkipVerify,
+ },
+ },
+ }
+
+ for _, artifact := range artifactSearchResults {
+ customPath := filepath.Join(outputDir, customPath)
+ nodeInfo := workflow.Status.Nodes.Find(func(n v1alpha1.NodeStatus) bool { return n.ID == artifact.NodeID })
+ if nodeInfo == nil {
+ return fmt.Errorf("could not get node status for node ID %s", artifact.NodeID)
+ }
+ customPath = strings.Replace(customPath, "{templateName}", nodeInfo.TemplateName, 1)
+ customPath = strings.Replace(customPath, "{namespace}", namespace, 1)
+ customPath = strings.Replace(customPath, "{workflowName}", workflowName, 1)
+ customPath = strings.Replace(customPath, "{nodeId}", artifact.NodeID, 1)
+ customPath = strings.Replace(customPath, "{artifactName}", artifact.Name, 1)
+ err = os.MkdirAll(customPath, os.ModePerm)
+ if err != nil {
+ return fmt.Errorf("failed to create folder path: %w", err)
+ }
+ key, err := artifact.GetKey()
+ if err != nil {
+ return fmt.Errorf("error getting key for artifact: %w", err)
+ }
+ err = getAndStoreArtifactData(namespace, workflowName, artifact.NodeID, artifact.Name, path.Base(key), customPath, c, client.ArgoServerOpts)
+ if err != nil {
+ return fmt.Errorf("failed to get and store artifact data: %w", err)
+ }
+ }
+ return nil
+ },
+ }
+ command.Flags().StringVarP(&namespace, "namespace", "n", "", "namespace of workflow")
+ command.Flags().StringVar(&nodeId, "node-id", "", "id of node in workflow")
+ command.Flags().StringVar(&templateName, "template-name", "", "name of template in workflow")
+ command.Flags().StringVar(&artifactName, "artifact-name", "", "name of output artifact in workflow")
+ command.Flags().StringVar(&customPath, "path", "{namespace}/{workflowName}/{nodeId}/outputs/{artifactName}", "use variables {workflowName}, {nodeId}, {templateName}, {artifactName}, and {namespace} to create a customized path to store the artifacts; example: {workflowName}/{templateName}/{artifactName}")
+ return command
+}
+
+func getAndStoreArtifactData(namespace string, workflowName string, nodeId string, artifactName string, fileName string, customPath string, c *http.Client, argoServerOpts apiclient.ArgoServerOpts) error {
+ request, err := http.NewRequest("GET", fmt.Sprintf("%s/artifacts/%s/%s/%s/%s", argoServerOpts.GetURL(), namespace, workflowName, nodeId, artifactName), nil)
+ if err != nil {
+ return fmt.Errorf("failed to create request: %w", err)
+ }
+ request.Header.Set("Authorization", client.GetAuthString())
+ resp, err := c.Do(request)
+ if err != nil {
+ return fmt.Errorf("request failed with: %w", err)
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != 200 {
+ return fmt.Errorf("request failed %s", resp.Status)
+ }
+ artifactFilePath := filepath.Join(customPath, fileName)
+ fileWriter, err := os.Create(artifactFilePath)
+ if err != nil {
+ return fmt.Errorf("creating file failed: %w", err)
+ }
+ defer fileWriter.Close()
+ _, err = io.Copy(fileWriter, resp.Body)
+ if err != nil {
+ return fmt.Errorf("copying file contents failed: %w", err)
+ }
+ log.Printf("Created %q", fileName)
+ return nil
+}
diff --git a/cmd/argo/commands/delete.go b/cmd/argo/commands/delete.go
index b8f663fa02c9..8c947697224e 100644
--- a/cmd/argo/commands/delete.go
+++ b/cmd/argo/commands/delete.go
@@ -22,9 +22,10 @@ func NewDeleteCommand() *cobra.Command {
all bool
allNamespaces bool
dryRun bool
+ force bool
)
command := &cobra.Command{
- Use: "delete [--dry-run] [WORKFLOW...|[--all] [--older] [--completed] [--resubmitted] [--prefix PREFIX] [--selector SELECTOR]]",
+ Use: "delete [--dry-run] [WORKFLOW...|[--all] [--older] [--completed] [--resubmitted] [--prefix PREFIX] [--selector SELECTOR] [--force] ]",
Short: "delete workflows",
Example: `# Delete a workflow:
@@ -64,7 +65,7 @@ func NewDeleteCommand() *cobra.Command {
for _, wf := range workflows {
if !dryRun {
- _, err := serviceClient.DeleteWorkflow(ctx, &workflowpkg.WorkflowDeleteRequest{Name: wf.Name, Namespace: wf.Namespace})
+ _, err := serviceClient.DeleteWorkflow(ctx, &workflowpkg.WorkflowDeleteRequest{Name: wf.Name, Namespace: wf.Namespace, Force: force})
if err != nil && status.Code(err) == codes.NotFound {
fmt.Printf("Workflow '%s' not found\n", wf.Name)
continue
@@ -85,7 +86,8 @@ func NewDeleteCommand() *cobra.Command {
command.Flags().StringVar(&flags.prefix, "prefix", "", "Delete workflows by prefix")
command.Flags().StringVar(&flags.finishedAfter, "older", "", "Delete completed workflows finished before the specified duration (e.g. 10m, 3h, 1d)")
command.Flags().StringVarP(&flags.labels, "selector", "l", "", "Selector (label query) to filter on, not including uninitialized ones, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)")
- command.Flags().StringVar(&flags.fields, "field-selector", "", "Selector (field query) to filter on, supports '=', '==', and '!='.(e.g. --field-selectorkey1=value1,key2=value2). The server only supports a limited number of field queries per type.")
+ command.Flags().StringVar(&flags.fields, "field-selector", "", "Selector (field query) to filter on, supports '=', '==', and '!='.(e.g. --field-selector key1=value1,key2=value2). The server only supports a limited number of field queries per type.")
command.Flags().BoolVar(&dryRun, "dry-run", false, "Do not delete the workflow, only print what would happen")
+ command.Flags().BoolVar(&force, "force", false, "Force delete workflows by removing finalizers")
return command
}
diff --git a/cmd/argo/commands/list.go b/cmd/argo/commands/list.go
index 024ca4955979..ce524f3e4f18 100644
--- a/cmd/argo/commands/list.go
+++ b/cmd/argo/commands/list.go
@@ -90,7 +90,7 @@ func NewListCommand() *cobra.Command {
command.Flags().Int64VarP(&listArgs.chunkSize, "chunk-size", "", 0, "Return large lists in chunks rather than all at once. Pass 0 to disable.")
command.Flags().BoolVar(&listArgs.noHeaders, "no-headers", false, "Don't print headers (default print headers).")
command.Flags().StringVarP(&listArgs.labels, "selector", "l", "", "Selector (label query) to filter on, not including uninitialized ones, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)")
- command.Flags().StringVar(&listArgs.fields, "field-selector", "", "Selector (field query) to filter on, supports '=', '==', and '!='.(e.g. --field-selectorkey1=value1,key2=value2). The server only supports a limited number of field queries per type.")
+ command.Flags().StringVar(&listArgs.fields, "field-selector", "", "Selector (field query) to filter on, supports '=', '==', and '!='.(e.g. --field-selector key1=value1,key2=value2). The server only supports a limited number of field queries per type.")
return command
}
diff --git a/cmd/argo/commands/resubmit.go b/cmd/argo/commands/resubmit.go
index 9aa1d645a374..d643dd2c8373 100644
--- a/cmd/argo/commands/resubmit.go
+++ b/cmd/argo/commands/resubmit.go
@@ -82,6 +82,7 @@ func NewResubmitCommand() *cobra.Command {
},
}
+ command.Flags().StringArrayVarP(&cliSubmitOpts.Parameters, "parameter", "p", []string{}, "input parameter to override on the original workflow spec")
command.Flags().Int32Var(&resubmitOpts.priority, "priority", 0, "workflow priority")
command.Flags().StringVarP(&cliSubmitOpts.Output, "output", "o", "", "Output format. One of: name|json|yaml|wide")
command.Flags().BoolVarP(&cliSubmitOpts.Wait, "wait", "w", false, "wait for the workflow to complete, only works when a single workflow is resubmitted")
@@ -130,9 +131,10 @@ func resubmitWorkflows(ctx context.Context, serviceClient workflowpkg.WorkflowSe
resubmittedNames[wf.Name] = true
lastResubmitted, err = serviceClient.ResubmitWorkflow(ctx, &workflowpkg.WorkflowResubmitRequest{
- Namespace: wf.Namespace,
- Name: wf.Name,
- Memoized: resubmitOpts.memoized,
+ Namespace: wf.Namespace,
+ Name: wf.Name,
+ Memoized: resubmitOpts.memoized,
+ Parameters: cliSubmitOpts.Parameters,
})
if err != nil {
return err
diff --git a/cmd/argo/commands/retry.go b/cmd/argo/commands/retry.go
index 5f2d2b01b6b0..32f05ab81d2a 100644
--- a/cmd/argo/commands/retry.go
+++ b/cmd/argo/commands/retry.go
@@ -85,6 +85,7 @@ func NewRetryCommand() *cobra.Command {
errors.CheckError(err)
},
}
+ command.Flags().StringArrayVarP(&cliSubmitOpts.Parameters, "parameter", "p", []string{}, "input parameter to override on the original workflow spec")
command.Flags().StringVarP(&cliSubmitOpts.Output, "output", "o", "", "Output format. One of: name|json|yaml|wide")
command.Flags().BoolVarP(&cliSubmitOpts.Wait, "wait", "w", false, "wait for the workflow to complete, only works when a single workflow is retried")
command.Flags().BoolVar(&cliSubmitOpts.Watch, "watch", false, "watch the workflow until it completes, only works when a single workflow is retried")
@@ -137,6 +138,7 @@ func retryWorkflows(ctx context.Context, serviceClient workflowpkg.WorkflowServi
Namespace: wf.Namespace,
RestartSuccessful: retryOpts.restartSuccessful,
NodeFieldSelector: selector.String(),
+ Parameters: cliSubmitOpts.Parameters,
})
if err != nil {
return err
diff --git a/cmd/argo/commands/root.go b/cmd/argo/commands/root.go
index 42ed3d1d248d..5df892b72488 100644
--- a/cmd/argo/commands/root.go
+++ b/cmd/argo/commands/root.go
@@ -103,6 +103,7 @@ If your server is behind an ingress with a path (you'll be running "argo server
command.AddCommand(auth.NewAuthCommand())
command.AddCommand(NewWaitCommand())
command.AddCommand(NewWatchCommand())
+ command.AddCommand(NewCpCommand())
command.AddCommand(NewStopCommand())
command.AddCommand(NewNodeCommand())
command.AddCommand(NewTerminateCommand())
diff --git a/cmd/argo/commands/server.go b/cmd/argo/commands/server.go
index b343ae36e252..bb91edc767d9 100644
--- a/cmd/argo/commands/server.go
+++ b/cmd/argo/commands/server.go
@@ -35,6 +35,7 @@ import (
"github.com/argoproj/argo-workflows/v3/util/help"
pprofutil "github.com/argoproj/argo-workflows/v3/util/pprof"
tlsutils "github.com/argoproj/argo-workflows/v3/util/tls"
+ "github.com/argoproj/argo-workflows/v3/workflow/common"
)
func NewServerCommand() *cobra.Command {
@@ -48,13 +49,14 @@ func NewServerCommand() *cobra.Command {
htst bool
namespaced bool // --namespaced
managedNamespace string // --managed-namespace
- ssoNamespace string
enableOpenBrowser bool
eventOperationQueueSize int
eventWorkerCount int
eventAsyncDispatch bool
frameOptions string
accessControlAllowOrigin string
+ apiRateLimit uint64
+ allowedLinkProtocol []string
logFormat string // --log-format
)
@@ -97,10 +99,16 @@ See %s`, help.ArgoServer),
managedNamespace = namespace
}
+ ssoNamespace := namespace
+ if managedNamespace != "" {
+ ssoNamespace = managedNamespace
+ }
+
log.WithFields(log.Fields{
"authModes": authModes,
"namespace": namespace,
"managedNamespace": managedNamespace,
+ "ssoNamespace": ssoNamespace,
"baseHRef": baseHRef,
"secure": secure,
}).Info()
@@ -142,39 +150,25 @@ See %s`, help.ArgoServer),
log.Warn("You are running without client authentication. Learn how to enable client authentication: https://argoproj.github.io/argo-workflows/argo-server-auth-mode/")
}
- if namespaced {
- // Case 1: If ssoNamespace is not specified, default it to installation namespace
- if ssoNamespace == "" {
- ssoNamespace = namespace
- }
- // Case 2: If ssoNamespace is not equal to installation or managed namespace, default it to installation namespace
- if ssoNamespace != namespace && ssoNamespace != managedNamespace {
- log.Warn("--sso-namespace should be equal to --managed-namespace or the installation namespace")
- ssoNamespace = namespace
- }
- } else {
- if ssoNamespace != "" {
- log.Warn("ignoring --sso-namespace because --namespaced is false")
- }
- ssoNamespace = namespace
- }
opts := apiserver.ArgoServerOpts{
BaseHRef: baseHRef,
TLSConfig: tlsConfig,
HSTS: htst,
Namespaced: namespaced,
Namespace: namespace,
- SSONameSpace: ssoNamespace,
Clients: clients,
RestConfig: config,
AuthModes: modes,
ManagedNamespace: managedNamespace,
+ SSONamespace: ssoNamespace,
ConfigName: configMap,
EventOperationQueueSize: eventOperationQueueSize,
EventWorkerCount: eventWorkerCount,
EventAsyncDispatch: eventAsyncDispatch,
XFrameOptions: frameOptions,
AccessControlAllowOrigin: accessControlAllowOrigin,
+ APIRateLimit: apiRateLimit,
+ AllowedLinkProtocol: allowedLinkProtocol,
}
browserOpenFunc := func(url string) {}
if enableOpenBrowser {
@@ -214,22 +208,28 @@ See %s`, help.ArgoServer),
defaultBaseHRef = "/"
}
+ defaultAllowedLinkProtocol := []string{"http", "https"}
+ if protocol := os.Getenv("ALLOWED_LINK_PROTOCOL"); protocol != "" {
+ defaultAllowedLinkProtocol = strings.Split(protocol, ",")
+ }
+
command.Flags().IntVarP(&port, "port", "p", 2746, "Port to listen on")
command.Flags().StringVar(&baseHRef, "basehref", defaultBaseHRef, "Value for base href in index.html. Used if the server is running behind reverse proxy under subpath different from /. Defaults to the environment variable BASE_HREF.")
// "-e" for encrypt, like zip
command.Flags().BoolVarP(&secure, "secure", "e", true, "Whether or not we should listen on TLS.")
command.Flags().BoolVar(&htst, "hsts", true, "Whether or not we should add a HTTP Secure Transport Security header. This only has effect if secure is enabled.")
command.Flags().StringArrayVar(&authModes, "auth-mode", []string{"client"}, "API server authentication mode. Any 1 or more length permutation of: client,server,sso")
- command.Flags().StringVar(&configMap, "configmap", "workflow-controller-configmap", "Name of K8s configmap to retrieve workflow controller configuration")
+ command.Flags().StringVar(&configMap, "configmap", common.ConfigMapName, "Name of K8s configmap to retrieve workflow controller configuration")
command.Flags().BoolVar(&namespaced, "namespaced", false, "run as namespaced mode")
command.Flags().StringVar(&managedNamespace, "managed-namespace", "", "namespace that watches, default to the installation namespace")
- command.Flags().StringVar(&ssoNamespace, "sso-namespace", "", "namespace that will be used for SSO RBAC. Defaults to installation namespace. Used only in namespaced mode")
command.Flags().BoolVarP(&enableOpenBrowser, "browser", "b", false, "enable automatic launching of the browser [local mode]")
command.Flags().IntVar(&eventOperationQueueSize, "event-operation-queue-size", 16, "how many events operations that can be queued at once")
command.Flags().IntVar(&eventWorkerCount, "event-worker-count", 4, "how many event workers to run")
command.Flags().BoolVar(&eventAsyncDispatch, "event-async-dispatch", false, "dispatch event async")
command.Flags().StringVar(&frameOptions, "x-frame-options", "DENY", "Set X-Frame-Options header in HTTP responses.")
command.Flags().StringVar(&accessControlAllowOrigin, "access-control-allow-origin", "", "Set Access-Control-Allow-Origin header in HTTP responses.")
+ command.Flags().Uint64Var(&apiRateLimit, "api-rate-limit", 1000, "Set limit per IP for api ratelimiter")
+ command.Flags().StringArrayVar(&allowedLinkProtocol, "allowed-link-protocol", defaultAllowedLinkProtocol, "Allowed link protocol in configMap. Used if the allowed configMap links protocol are different from http,https. Defaults to the environment variable ALLOWED_LINK_PROTOCOL")
command.Flags().StringVar(&logFormat, "log-format", "text", "The formatter to use for logs. One of: text|json")
viper.AutomaticEnv()
diff --git a/cmd/argo/commands/terminate.go b/cmd/argo/commands/terminate.go
index f21ee0aa51a2..c20b75d374be 100644
--- a/cmd/argo/commands/terminate.go
+++ b/cmd/argo/commands/terminate.go
@@ -103,7 +103,7 @@ func NewTerminateCommand() *cobra.Command {
}
command.Flags().StringVarP(&t.labels, "selector", "l", "", "Selector (label query) to filter on, not including uninitialized ones, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)")
- command.Flags().StringVar(&t.fields, "field-selector", "", "Selector (field query) to filter on, supports '=', '==', and '!='.(e.g. --field-selectorkey1=value1,key2=value2). The server only supports a limited number of field queries per type.")
+ command.Flags().StringVar(&t.fields, "field-selector", "", "Selector (field query) to filter on, supports '=', '==', and '!='.(e.g. --field-selector key1=value1,key2=value2). The server only supports a limited number of field queries per type.")
command.Flags().BoolVar(&t.dryRun, "dry-run", false, "Do not terminate the workflow, only print what would happen")
return command
}
diff --git a/cmd/argo/lint/formatter_pretty.go b/cmd/argo/lint/formatter_pretty.go
index 2b3ceacaec13..1d3807aed500 100644
--- a/cmd/argo/lint/formatter_pretty.go
+++ b/cmd/argo/lint/formatter_pretty.go
@@ -4,7 +4,7 @@ import (
"fmt"
"strings"
- "github.com/TwinProduction/go-color"
+ "github.com/TwiN/go-color"
)
const (
diff --git a/cmd/argo/lint/formatter_pretty_test.go b/cmd/argo/lint/formatter_pretty_test.go
index 848bb4120c81..310c79ba9d47 100644
--- a/cmd/argo/lint/formatter_pretty_test.go
+++ b/cmd/argo/lint/formatter_pretty_test.go
@@ -4,7 +4,7 @@ import (
"fmt"
"testing"
- "github.com/TwinProduction/go-color"
+ "github.com/TwiN/go-color"
"github.com/stretchr/testify/assert"
)
diff --git a/cmd/argoexec/commands/artifact/delete.go b/cmd/argoexec/commands/artifact/delete.go
new file mode 100644
index 000000000000..39ee4fd7decd
--- /dev/null
+++ b/cmd/argoexec/commands/artifact/delete.go
@@ -0,0 +1,131 @@
+package artifact
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/spf13/cobra"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/types"
+
+ "github.com/argoproj/argo-workflows/v3/cmd/argo/commands/client"
+ "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1"
+ workflow "github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned"
+ wfv1alpha1 "github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1"
+ executor "github.com/argoproj/argo-workflows/v3/workflow/artifacts"
+ "github.com/argoproj/argo-workflows/v3/workflow/common"
+)
+
+func NewArtifactDeleteCommand() *cobra.Command {
+ return &cobra.Command{
+ Use: "delete",
+ SilenceUsage: true,
+ RunE: func(cmd *cobra.Command, args []string) error {
+
+ namespace := client.Namespace()
+ clientConfig := client.GetConfig()
+
+ if podName, ok := os.LookupEnv(common.EnvVarArtifactPodName); ok {
+
+ config, err := clientConfig.ClientConfig()
+ workflowInterface := workflow.NewForConfigOrDie(config)
+ if err != nil {
+ return err
+ }
+
+ artifactGCTaskInterface := workflowInterface.ArgoprojV1alpha1().WorkflowArtifactGCTasks(namespace)
+ labelSelector := fmt.Sprintf("%s = %s", common.LabelKeyArtifactGCPodName, podName)
+
+ err = deleteArtifacts(labelSelector, cmd.Context(), artifactGCTaskInterface)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+ },
+ }
+}
+
+func deleteArtifacts(labelSelector string, ctx context.Context, artifactGCTaskInterface wfv1alpha1.WorkflowArtifactGCTaskInterface) error {
+
+ taskList, err := artifactGCTaskInterface.List(context.Background(), metav1.ListOptions{LabelSelector: labelSelector})
+ if err != nil {
+ return err
+ }
+
+ for _, task := range taskList.Items {
+ task.Status.ArtifactResultsByNode = make(map[string]v1alpha1.ArtifactResultNodeStatus)
+ for nodeName, artifactNodeSpec := range task.Spec.ArtifactsByNode {
+
+ var archiveLocation *v1alpha1.ArtifactLocation
+ artResultNodeStatus := v1alpha1.ArtifactResultNodeStatus{ArtifactResults: make(map[string]v1alpha1.ArtifactResult)}
+ if artifactNodeSpec.ArchiveLocation != nil {
+ archiveLocation = artifactNodeSpec.ArchiveLocation
+ }
+
+ var resources resources
+ resources.Files = make(map[string][]byte) // same resources for every artifact
+ for _, artifact := range artifactNodeSpec.Artifacts {
+ if archiveLocation != nil {
+ err := artifact.Relocate(archiveLocation)
+ if err != nil {
+ return err
+ }
+ }
+
+ drv, err := executor.NewDriver(ctx, &artifact, resources)
+ if err != nil {
+ return err
+ }
+
+ err = drv.Delete(&artifact)
+ if err != nil {
+ errString := err.Error()
+ artResultNodeStatus.ArtifactResults[artifact.Name] = v1alpha1.ArtifactResult{Name: artifact.Name, Success: false, Error: &errString}
+ } else {
+ artResultNodeStatus.ArtifactResults[artifact.Name] = v1alpha1.ArtifactResult{Name: artifact.Name, Success: true, Error: nil}
+ }
+ }
+
+ task.Status.ArtifactResultsByNode[nodeName] = artResultNodeStatus
+ }
+ patch, err := json.Marshal(map[string]interface{}{"status": v1alpha1.ArtifactGCStatus{ArtifactResultsByNode: task.Status.ArtifactResultsByNode}})
+ if err != nil {
+ return err
+ }
+ _, err = artifactGCTaskInterface.Patch(context.Background(), task.Name, types.MergePatchType, patch, metav1.PatchOptions{}, "status")
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+type resources struct {
+ Files map[string][]byte
+}
+
+func (r resources) GetSecret(ctx context.Context, name, key string) (string, error) {
+
+ path := filepath.Join(common.SecretVolMountPath, name, key)
+ if file, ok := r.Files[path]; ok {
+ return string(file), nil
+ }
+
+ file, err := os.ReadFile(path)
+ if err != nil {
+ return "", err
+ } else {
+ r.Files[path] = file
+ return string(file), err
+ }
+}
+
+func (r resources) GetConfigMapKey(ctx context.Context, name, key string) (string, error) {
+ return "", fmt.Errorf("not supported")
+}
diff --git a/cmd/argoexec/commands/artifact/root.go b/cmd/argoexec/commands/artifact/root.go
new file mode 100644
index 000000000000..898f412eaa4c
--- /dev/null
+++ b/cmd/argoexec/commands/artifact/root.go
@@ -0,0 +1,13 @@
+package artifact
+
+import (
+ "github.com/spf13/cobra"
+)
+
+func NewArtifactCommand() *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "artifact",
+ }
+ cmd.AddCommand(NewArtifactDeleteCommand())
+ return cmd
+}
diff --git a/cmd/argoexec/commands/emissary.go b/cmd/argoexec/commands/emissary.go
index b884749a21b1..4ad2caf015ef 100644
--- a/cmd/argoexec/commands/emissary.go
+++ b/cmd/argoexec/commands/emissary.go
@@ -16,6 +16,8 @@ import (
"syscall"
"time"
+ "github.com/argoproj/argo-workflows/v3/util/errors"
+
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"k8s.io/client-go/util/retry"
@@ -60,18 +62,6 @@ func NewEmissaryCommand() *cobra.Command {
name, args := args[0], args[1:]
- signals := make(chan os.Signal, 1)
- defer close(signals)
- signal.Notify(signals)
- defer signal.Reset()
- go func() {
- for s := range signals {
- if !osspecific.IsSIGCHLD(s) {
- _ = osspecific.Kill(-os.Getpid(), s.(syscall.Signal))
- }
- }
- }()
-
data, err := ioutil.ReadFile(varRunArgo + "/template")
if err != nil {
return fmt.Errorf("failed to read template: %w", err)
@@ -127,25 +117,28 @@ func NewEmissaryCommand() *cobra.Command {
return fmt.Errorf("failed to get retry strategy: %w", err)
}
- var command *exec.Cmd
- var stdout *os.File
- var combined *os.File
cmdErr := retry.OnError(backoff, func(error) bool { return true }, func() error {
- if stdout != nil {
- stdout.Close()
- }
- if combined != nil {
- combined.Close()
- }
- command, stdout, combined, err = createCommand(name, args, template)
+ command, stdout, combined, err := createCommand(name, args, template)
if err != nil {
return fmt.Errorf("failed to create command: %w", err)
}
-
+ defer stdout.Close()
+ defer combined.Close()
+ signals := make(chan os.Signal, 1)
+ defer close(signals)
+ signal.Notify(signals)
+ defer signal.Reset()
if err := command.Start(); err != nil {
return err
}
-
+ go func() {
+ for s := range signals {
+ if !osspecific.IsSIGCHLD(s) {
+ _ = osspecific.Kill(command.Process.Pid, s.(syscall.Signal))
+ }
+ }
+ }()
+ pid := command.Process.Pid
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
go func() {
@@ -158,16 +151,16 @@ func NewEmissaryCommand() *cobra.Command {
_ = os.Remove(varRunArgo + "/ctr/" + containerName + "/signal")
s, _ := strconv.Atoi(string(data))
if s > 0 {
- _ = osspecific.Kill(command.Process.Pid, syscall.Signal(s))
+ _ = osspecific.Kill(pid, syscall.Signal(s))
}
time.Sleep(2 * time.Second)
}
}
}()
- return command.Wait()
+ return osspecific.Wait(command.Process)
+
})
- defer stdout.Close()
- defer combined.Close()
+ logger.WithError(err).Info("sub-process exited")
if _, ok := os.LookupEnv("ARGO_DEBUG_PAUSE_AFTER"); ok {
for {
@@ -184,7 +177,7 @@ func NewEmissaryCommand() *cobra.Command {
if cmdErr == nil {
exitCode = 0
- } else if exitError, ok := cmdErr.(*exec.ExitError); ok {
+ } else if exitError, ok := cmdErr.(errors.Exited); ok {
if exitError.ExitCode() >= 0 {
exitCode = exitError.ExitCode()
} else {
diff --git a/cmd/argoexec/commands/emissary_test.go b/cmd/argoexec/commands/emissary_test.go
index 3338137b05d5..2c2d56306579 100644
--- a/cmd/argoexec/commands/emissary_test.go
+++ b/cmd/argoexec/commands/emissary_test.go
@@ -1,15 +1,15 @@
package commands
import (
+ "fmt"
"io/ioutil"
"os"
- "os/exec"
- "path/filepath"
"strconv"
"sync"
"syscall"
"testing"
- "time"
+
+ "github.com/argoproj/argo-workflows/v3/util/errors"
"github.com/stretchr/testify/assert"
)
@@ -20,16 +20,11 @@ func TestEmissary(t *testing.T) {
varRunArgo = tmp
includeScriptOutput = true
- wd, err := os.Getwd()
- assert.NoError(t, err)
-
- x := filepath.Join(wd, "../../../dist/argosay")
-
- err = ioutil.WriteFile(varRunArgo+"/template", []byte(`{}`), 0o600)
+ err := ioutil.WriteFile(varRunArgo+"/template", []byte(`{}`), 0o600)
assert.NoError(t, err)
t.Run("Exit0", func(t *testing.T) {
- err := run(x, []string{"exit"})
+ err := run("exit")
assert.NoError(t, err)
data, err := ioutil.ReadFile(varRunArgo + "/ctr/main/exitcode")
assert.NoError(t, err)
@@ -37,28 +32,28 @@ func TestEmissary(t *testing.T) {
})
t.Run("Exit1", func(t *testing.T) {
- err := run(x, []string{"exit", "1"})
- assert.Equal(t, 1, err.(*exec.ExitError).ExitCode())
+ err := run("exit 1")
+ assert.Equal(t, 1, err.(errors.Exited).ExitCode())
data, err := ioutil.ReadFile(varRunArgo + "/ctr/main/exitcode")
assert.NoError(t, err)
assert.Equal(t, "1", string(data))
})
t.Run("Stdout", func(t *testing.T) {
- err := run(x, []string{"echo", "hello", "/dev/stdout"})
+ err := run("echo hello")
assert.NoError(t, err)
data, err := ioutil.ReadFile(varRunArgo + "/ctr/main/stdout")
assert.NoError(t, err)
assert.Contains(t, string(data), "hello")
})
t.Run("Comined", func(t *testing.T) {
- err := run(x, []string{"echo", "hello", "/dev/stderr"})
+ err := run("echo hello > /dev/stderr")
assert.NoError(t, err)
data, err := ioutil.ReadFile(varRunArgo + "/ctr/main/combined")
assert.NoError(t, err)
assert.Contains(t, string(data), "hello")
})
t.Run("Signal", func(t *testing.T) {
- for signal, message := range map[syscall.Signal]string{
+ for signal := range map[syscall.Signal]string{
syscall.SIGTERM: "terminated",
syscall.SIGKILL: "killed",
} {
@@ -68,10 +63,10 @@ func TestEmissary(t *testing.T) {
wg.Add(1)
go func() {
defer wg.Done()
- err := run(x, []string{"sleep", "5s"})
- assert.EqualError(t, err, "signal: "+message)
+ err := run("sleep 3")
+ assert.EqualError(t, err, fmt.Sprintf("exit status %d", 128+signal))
}()
- time.Sleep(time.Second)
+ wg.Wait()
}
})
t.Run("Artifact", func(t *testing.T) {
@@ -85,7 +80,7 @@ func TestEmissary(t *testing.T) {
}
`), 0o600)
assert.NoError(t, err)
- err := run(x, []string{"echo", "hello", "/tmp/artifact"})
+ err := run("echo hello > /tmp/artifact")
assert.NoError(t, err)
data, err := ioutil.ReadFile(varRunArgo + "/outputs/artifacts/tmp/artifact.tgz")
assert.NoError(t, err)
@@ -102,7 +97,7 @@ func TestEmissary(t *testing.T) {
}
`), 0o600)
assert.NoError(t, err)
- err := run(x, []string{"echo", "hello", "/tmp/artifact"})
+ err := run("echo hello > /tmp/artifact")
assert.NoError(t, err)
data, err := ioutil.ReadFile(varRunArgo + "/outputs/artifacts/tmp/artifact.tgz")
assert.NoError(t, err)
@@ -121,7 +116,7 @@ func TestEmissary(t *testing.T) {
}
`), 0o600)
assert.NoError(t, err)
- err := run(x, []string{"echo", "hello", "/tmp/parameter"})
+ err := run("echo hello > /tmp/parameter")
assert.NoError(t, err)
data, err := ioutil.ReadFile(varRunArgo + "/outputs/parameters/tmp/parameter")
assert.NoError(t, err)
@@ -151,7 +146,7 @@ func TestEmissary(t *testing.T) {
`), 0o600)
assert.NoError(t, err)
_ = os.Remove("test.txt")
- err = run(x, []string{"sh", "./test/containerSetRetryTest.sh", "/tmp/artifact"})
+ err = run("sh ./test/containerSetRetryTest.sh /tmp/artifact")
assert.Error(t, err)
data, err := ioutil.ReadFile(varRunArgo + "/outputs/artifacts/tmp/artifact.tgz")
assert.NoError(t, err)
@@ -181,7 +176,7 @@ func TestEmissary(t *testing.T) {
`), 0o600)
assert.NoError(t, err)
_ = os.Remove("test.txt")
- err = run(x, []string{"sh", "./test/containerSetRetryTest.sh", "/tmp/artifact"})
+ err = run("sh ./test/containerSetRetryTest.sh /tmp/artifact")
assert.NoError(t, err)
data, err := ioutil.ReadFile(varRunArgo + "/outputs/artifacts/tmp/artifact.tgz")
assert.NoError(t, err)
@@ -189,8 +184,8 @@ func TestEmissary(t *testing.T) {
})
}
-func run(name string, args []string) error {
+func run(script string) error {
cmd := NewEmissaryCommand()
containerName = "main"
- return cmd.RunE(cmd, append([]string{name}, args...))
+ return cmd.RunE(cmd, append([]string{"sh", "-c"}, script))
}
diff --git a/cmd/argoexec/commands/kill.go b/cmd/argoexec/commands/kill.go
new file mode 100644
index 000000000000..05ebcadf5cde
--- /dev/null
+++ b/cmd/argoexec/commands/kill.go
@@ -0,0 +1,37 @@
+package commands
+
+import (
+ "fmt"
+ "os"
+ "strconv"
+ "syscall"
+
+ "github.com/spf13/cobra"
+)
+
+func NewKillCommand() *cobra.Command {
+ return &cobra.Command{
+ Use: "kill SIGNAL PID",
+ SilenceUsage: true,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ signum, err := strconv.Atoi(args[0])
+ if err != nil {
+ return err
+ }
+ pid, err := strconv.Atoi(args[1])
+ if err != nil {
+ return err
+ }
+ sig := syscall.Signal(signum)
+ p, err := os.FindProcess(pid)
+ if err != nil {
+ return err
+ }
+ fmt.Printf("killing %d with %v\n", pid, sig)
+ if err := p.Signal(sig); err != nil {
+ return err
+ }
+ return nil
+ },
+ }
+}
diff --git a/cmd/argoexec/commands/resource.go b/cmd/argoexec/commands/resource.go
index fc3a6d84d3a8..94ca7686a945 100644
--- a/cmd/argoexec/commands/resource.go
+++ b/cmd/argoexec/commands/resource.go
@@ -45,8 +45,18 @@ func execResource(ctx context.Context, action string) error {
wfExecutor.AddError(err)
return err
}
+ manifestPath := common.ExecutorResourceManifestPath
+ if wfExecutor.Template.Resource.ManifestFrom != nil {
+ targetArtName := wfExecutor.Template.Resource.ManifestFrom.Artifact.Name
+ for _, art := range wfExecutor.Template.Inputs.Artifacts {
+ if art.Name == targetArtName {
+ manifestPath = art.Path
+ break
+ }
+ }
+ }
resourceNamespace, resourceName, selfLink, err := wfExecutor.ExecResource(
- action, common.ExecutorResourceManifestPath, wfExecutor.Template.Resource.Flags,
+ action, manifestPath, wfExecutor.Template.Resource.Flags,
)
if err != nil {
wfExecutor.AddError(err)
diff --git a/cmd/argoexec/commands/root.go b/cmd/argoexec/commands/root.go
index f492c2e6e384..c01aa58c02dc 100644
--- a/cmd/argoexec/commands/root.go
+++ b/cmd/argoexec/commands/root.go
@@ -16,6 +16,7 @@ import (
"k8s.io/client-go/tools/clientcmd"
"github.com/argoproj/argo-workflows/v3"
+ "github.com/argoproj/argo-workflows/v3/cmd/argoexec/commands/artifact"
wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1"
"github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned"
"github.com/argoproj/argo-workflows/v3/util"
@@ -23,10 +24,7 @@ import (
"github.com/argoproj/argo-workflows/v3/util/logs"
"github.com/argoproj/argo-workflows/v3/workflow/common"
"github.com/argoproj/argo-workflows/v3/workflow/executor"
- "github.com/argoproj/argo-workflows/v3/workflow/executor/docker"
"github.com/argoproj/argo-workflows/v3/workflow/executor/emissary"
- "github.com/argoproj/argo-workflows/v3/workflow/executor/kubelet"
- "github.com/argoproj/argo-workflows/v3/workflow/executor/pns"
)
const (
@@ -63,10 +61,12 @@ func NewRootCommand() *cobra.Command {
command.AddCommand(NewAgentCommand())
command.AddCommand(NewEmissaryCommand())
command.AddCommand(NewInitCommand())
+ command.AddCommand(NewKillCommand())
command.AddCommand(NewResourceCommand())
command.AddCommand(NewWaitCommand())
command.AddCommand(NewDataCommand())
command.AddCommand(cmd.NewVersionCmd(CLIName))
+ command.AddCommand(artifact.NewArtifactCommand())
clientConfig = kubecli.AddKubectlFlagsToCmd(&command)
command.PersistentFlags().StringVar(&logLevel, "loglevel", "info", "Set the logging level. One of: debug|info|warn|error")
@@ -78,11 +78,10 @@ func NewRootCommand() *cobra.Command {
func initExecutor() *executor.WorkflowExecutor {
version := argo.GetVersion()
- executorType := os.Getenv(common.EnvVarContainerRuntimeExecutor)
- log.WithFields(log.Fields{"version": version.Version, "executorType": executorType}).Info("Starting Workflow Executor")
+ log.WithFields(log.Fields{"version": version.Version}).Info("Starting Workflow Executor")
config, err := clientConfig.ClientConfig()
checkErr(err)
- config = restclient.AddUserAgent(config, fmt.Sprintf("argo-workflows/%s argo-executor/%s", version.Version, executorType))
+ config = restclient.AddUserAgent(config, fmt.Sprintf("argo-workflows/%s argo-executor", version.Version))
logs.AddK8SLogTransportWrapper(config) // lets log all request as we should typically do < 5 per pod, so this is will show up problems
@@ -110,18 +109,7 @@ func initExecutor() *executor.WorkflowExecutor {
annotationPatchTickDuration, _ := time.ParseDuration(os.Getenv(common.EnvVarProgressPatchTickDuration))
progressFileTickDuration, _ := time.ParseDuration(os.Getenv(common.EnvVarProgressFileTickDuration))
- var cre executor.ContainerRuntimeExecutor
- log.Infof("Creating a %s executor", executorType)
- switch executorType {
- case common.ContainerRuntimeExecutorKubelet:
- cre, err = kubelet.NewKubeletExecutor(namespace, podName)
- case common.ContainerRuntimeExecutorPNS:
- cre, err = pns.NewPNSExecutor(clientset, podName, namespace)
- case common.ContainerRuntimeExecutorDocker:
- cre, err = docker.NewDockerExecutor(namespace, podName)
- default:
- cre, err = emissary.New()
- }
+ cre, err := emissary.New()
checkErr(err)
wfExecutor := executor.NewExecutor(
@@ -129,10 +117,10 @@ func initExecutor() *executor.WorkflowExecutor {
versioned.NewForConfigOrDie(config).ArgoprojV1alpha1().WorkflowTaskResults(namespace),
restClient,
podName,
+ types.UID(os.Getenv(common.EnvVarPodUID)),
os.Getenv(common.EnvVarWorkflowName),
os.Getenv(common.EnvVarNodeID),
namespace,
- types.UID(os.Getenv(common.EnvVarWorkflowUID)),
cre,
*tmpl,
includeScriptOutput,
diff --git a/cmd/argoexec/commands/wait.go b/cmd/argoexec/commands/wait.go
index 075c4a1ab2fc..c3b66e39e4fa 100644
--- a/cmd/argoexec/commands/wait.go
+++ b/cmd/argoexec/commands/wait.go
@@ -2,6 +2,8 @@ package commands
import (
"context"
+ "os/signal"
+ "syscall"
"time"
"github.com/argoproj/pkg/stats"
@@ -30,27 +32,24 @@ func waitContainer(ctx context.Context) error {
defer stats.LogStats()
stats.StartStatsTicker(5 * time.Minute)
- defer func() {
- if err := wfExecutor.KillSidecars(ctx); err != nil {
+ // use a block to constrain the scope of ctx
+ {
+ // this allows us to gracefully shutdown, capturing artifacts
+ ctx, cancel := signal.NotifyContext(ctx, syscall.SIGTERM)
+ defer cancel()
+
+ // Wait for main container to complete
+ err := wfExecutor.Wait(ctx)
+ if err != nil {
wfExecutor.AddError(err)
}
- }()
-
- // Wait for main container to complete
- err := wfExecutor.Wait(ctx)
- if err != nil {
- wfExecutor.AddError(err)
}
// Capture output script result
- err = wfExecutor.CaptureScriptResult(ctx)
- if err != nil {
- wfExecutor.AddError(err)
- }
- // Saving logs
- logArt, err := wfExecutor.SaveLogs(ctx)
+ err := wfExecutor.CaptureScriptResult(ctx)
if err != nil {
wfExecutor.AddError(err)
}
+
// Saving output parameters
err = wfExecutor.SaveParameters(ctx)
if err != nil {
@@ -61,11 +60,7 @@ func waitContainer(ctx context.Context) error {
if err != nil {
wfExecutor.AddError(err)
}
- // Annotating pod with output
- err = wfExecutor.ReportOutputs(ctx, logArt)
- if err != nil {
- wfExecutor.AddError(err)
- }
+ wfExecutor.SaveLogs(ctx)
return wfExecutor.HasError()
}
diff --git a/cmd/argoexec/main.go b/cmd/argoexec/main.go
index e13d6d4329eb..1bd6c581bc19 100644
--- a/cmd/argoexec/main.go
+++ b/cmd/argoexec/main.go
@@ -2,7 +2,8 @@ package main
import (
"os"
- "os/exec"
+
+ "github.com/argoproj/argo-workflows/v3/util/errors"
// load authentication plugin for obtaining credentials from cloud providers.
_ "k8s.io/client-go/plugin/pkg/client/auth"
@@ -14,7 +15,7 @@ import (
func main() {
err := commands.NewRootCommand().Execute()
if err != nil {
- if exitError, ok := err.(*exec.ExitError); ok {
+ if exitError, ok := err.(errors.Exited); ok {
if exitError.ExitCode() >= 0 {
os.Exit(exitError.ExitCode())
} else {
diff --git a/cmd/workflow-controller/main.go b/cmd/workflow-controller/main.go
index 357ffc21ca73..9b8a3964c53a 100644
--- a/cmd/workflow-controller/main.go
+++ b/cmd/workflow-controller/main.go
@@ -33,6 +33,7 @@ import (
"github.com/argoproj/argo-workflows/v3/util/env"
"github.com/argoproj/argo-workflows/v3/util/logs"
pprofutil "github.com/argoproj/argo-workflows/v3/util/pprof"
+ "github.com/argoproj/argo-workflows/v3/workflow/common"
"github.com/argoproj/argo-workflows/v3/workflow/controller"
"github.com/argoproj/argo-workflows/v3/workflow/events"
"github.com/argoproj/argo-workflows/v3/workflow/metrics"
@@ -109,7 +110,7 @@ func NewRootCommand() *cobra.Command {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- wfController, err := controller.NewWorkflowController(ctx, config, kubeclientset, wfclientset, namespace, managedNamespace, executorImage, executorImagePullPolicy, containerRuntimeExecutor, configMap, executorPlugins)
+ wfController, err := controller.NewWorkflowController(ctx, config, kubeclientset, wfclientset, namespace, managedNamespace, executorImage, executorImagePullPolicy, logFormat, containerRuntimeExecutor, configMap, executorPlugins)
errors.CheckError(err)
leaderElectionOff := os.Getenv("LEADER_ELECTION_DISABLE")
@@ -165,7 +166,7 @@ func NewRootCommand() *cobra.Command {
clientConfig = kubecli.AddKubectlFlagsToCmd(&command)
command.AddCommand(cmdutil.NewVersionCmd(CLIName))
- command.Flags().StringVar(&configMap, "configmap", "workflow-controller-configmap", "Name of K8s configmap to retrieve workflow controller configuration")
+ command.Flags().StringVar(&configMap, "configmap", common.ConfigMapName, "Name of K8s configmap to retrieve workflow controller configuration")
command.Flags().StringVar(&executorImage, "executor-image", "", "Executor image to use (overrides value in configmap)")
command.Flags().StringVar(&executorImagePullPolicy, "executor-image-pull-policy", "", "Executor imagePullPolicy to use (overrides value in configmap)")
command.Flags().StringVar(&containerRuntimeExecutor, "container-runtime-executor", "", "Container runtime executor to use (overrides value in configmap)")
diff --git a/config/config.go b/config/config.go
index 7a714c8084c9..96f34ea584b6 100644
--- a/config/config.go
+++ b/config/config.go
@@ -3,6 +3,7 @@ package config
import (
"fmt"
"math"
+ "net/url"
"time"
apiv1 "k8s.io/api/core/v1"
@@ -12,8 +13,6 @@ import (
wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1"
)
-var EmptyConfigFunc = func() interface{} { return &Config{} }
-
type ResourceRateLimit struct {
Limit float64 `json:"limit"`
Burst int `json:"burst"`
@@ -34,17 +33,6 @@ type Config struct {
// KubeConfig specifies a kube config file for the wait & init containers
KubeConfig *KubeConfig `json:"kubeConfig,omitempty"`
- // ContainerRuntimeExecutor specifies the container runtime interface to use, default is emissary
- ContainerRuntimeExecutor string `json:"containerRuntimeExecutor,omitempty"`
-
- ContainerRuntimeExecutors ContainerRuntimeExecutors `json:"containerRuntimeExecutors,omitempty"`
-
- // KubeletPort is needed when using the kubelet containerRuntimeExecutor, default to 10250
- KubeletPort int `json:"kubeletPort,omitempty"`
-
- // KubeletInsecure disable the TLS verification of the kubelet containerRuntimeExecutor, default to false
- KubeletInsecure bool `json:"kubeletInsecure,omitempty"`
-
// ArtifactRepository contains the default location of an artifact repository for container artifacts
ArtifactRepository wfv1.ArtifactRepository `json:"artifactRepository,omitempty"`
@@ -83,9 +71,6 @@ type Config struct {
// Links to related apps.
Links []*wfv1.Link `json:"links,omitempty"`
- // Config customized Docker Sock path
- DockerSockPath string `json:"dockerSockPath,omitempty"`
-
// WorkflowDefaults are values that will apply to all Workflows from this controller, unless overridden on the Workflow-level
WorkflowDefaults *wfv1.Workflow `json:"workflowDefaults,omitempty"`
@@ -116,17 +101,9 @@ type Config struct {
// NavColor is an ui navigation bar background color
NavColor string `json:"navColor,omitempty"`
-}
-func (c Config) GetContainerRuntimeExecutor(labels labels.Labels) (string, error) {
- name, err := c.ContainerRuntimeExecutors.Select(labels)
- if err != nil {
- return "", err
- }
- if name != "" {
- return name, nil
- }
- return c.ContainerRuntimeExecutor, nil
+ // SSO in settings for single-sign on
+ SSO SSOConfig `json:"sso,omitempty"`
}
func (c Config) GetExecutor() *apiv1.Container {
@@ -154,6 +131,32 @@ func (c Config) GetPodGCDeleteDelayDuration() time.Duration {
return c.PodGCDeleteDelayDuration.Duration
}
+func (c Config) ValidateProtocol(inputProtocol string, allowedProtocol []string) error {
+ for _, protocol := range allowedProtocol {
+ if inputProtocol == protocol {
+ return nil
+ }
+ }
+ return fmt.Errorf("protocol %s is not allowed", inputProtocol)
+}
+
+func (c *Config) Sanitize(allowedProtocol []string) error {
+ links := c.Links
+
+ for _, link := range links {
+ u, err := url.Parse(link.URL)
+ if err != nil {
+ return err
+ }
+ err = c.ValidateProtocol(u.Scheme, allowedProtocol)
+ if err != nil {
+ return err
+ }
+ link.URL = u.String() // reassembles the URL into a valid URL string
+ }
+ return nil
+}
+
// PodSpecLogStrategy contains the configuration for logging the pod spec in controller log for debugging purpose
type PodSpecLogStrategy struct {
FailedPod bool `json:"failedPod,omitempty"`
diff --git a/config/config_test.go b/config/config_test.go
index b8198a90876a..09faa3b7ccb8 100644
--- a/config/config_test.go
+++ b/config/config_test.go
@@ -4,8 +4,8 @@ import (
"testing"
"github.com/stretchr/testify/assert"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/labels"
+
+ wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1"
)
func TestDatabaseConfig(t *testing.T) {
@@ -13,30 +13,21 @@ func TestDatabaseConfig(t *testing.T) {
assert.Equal(t, "my-host:1234", DatabaseConfig{Host: "my-host", Port: 1234}.GetHostname())
}
-func TestContainerRuntimeExecutor(t *testing.T) {
- t.Run("Default", func(t *testing.T) {
- c := Config{ContainerRuntimeExecutor: "foo"}
- executor, err := c.GetContainerRuntimeExecutor(labels.Set{})
- assert.NoError(t, err)
- assert.Equal(t, "foo", executor)
- })
- t.Run("Error", func(t *testing.T) {
- c := Config{ContainerRuntimeExecutor: "foo", ContainerRuntimeExecutors: ContainerRuntimeExecutors{
- {Name: "bar", Selector: metav1.LabelSelector{
- MatchLabels: map[string]string{"!": "!"},
- }},
- }}
- _, err := c.GetContainerRuntimeExecutor(labels.Set{})
- assert.Error(t, err)
- })
- t.Run("NoError", func(t *testing.T) {
- c := Config{ContainerRuntimeExecutor: "foo", ContainerRuntimeExecutors: ContainerRuntimeExecutors{
- {Name: "bar", Selector: metav1.LabelSelector{
- MatchLabels: map[string]string{"baz": "qux"},
- }},
- }}
- executor, err := c.GetContainerRuntimeExecutor(labels.Set(map[string]string{"baz": "qux"}))
- assert.NoError(t, err)
- assert.Equal(t, "bar", executor)
- })
+func TestSanitize(t *testing.T) {
+ tests := []struct {
+ c Config
+ err string
+ }{
+ {Config{Links: []*wfv1.Link{{URL: "javascript:foo"}}}, "protocol javascript is not allowed"},
+ {Config{Links: []*wfv1.Link{{URL: "javASCRipt: //foo"}}}, "protocol javascript is not allowed"},
+ {Config{Links: []*wfv1.Link{{URL: "http://foo.bar/?foo=bar"}}}, ""},
+ }
+ for _, tt := range tests {
+ err := tt.c.Sanitize([]string{"http", "https"})
+ if tt.err != "" {
+ assert.Equal(t, err.Error(), tt.err)
+ } else {
+ assert.Nil(t, err)
+ }
+ }
}
diff --git a/config/controller.go b/config/controller.go
index 01ff517589ad..66b916e55c73 100644
--- a/config/controller.go
+++ b/config/controller.go
@@ -5,60 +5,37 @@ import (
"fmt"
"strings"
- log "github.com/sirupsen/logrus"
apiv1 "k8s.io/api/core/v1"
- apierr "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/fields"
- "k8s.io/apimachinery/pkg/runtime"
- runtimeutil "k8s.io/apimachinery/pkg/util/runtime"
- "k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/kubernetes"
- "k8s.io/client-go/tools/cache"
"sigs.k8s.io/yaml"
)
type Controller interface {
- Run(stopCh <-chan struct{}, onChange func(config interface{}) error)
- Get(context.Context) (interface{}, error)
+ Get(context.Context) (*Config, error)
}
type controller struct {
namespace string
// name of the config map
- configMap string
- kubeclientset kubernetes.Interface
- emptyConfigFunc func() interface{} // must return a pointer, non-nil
+ configMap string
+ kubeclientset kubernetes.Interface
}
-func NewController(namespace, name string, kubeclientset kubernetes.Interface, emptyConfigFunc func() interface{}) Controller {
- log.WithField("name", name).Info("config map")
+func NewController(namespace, name string, kubeclientset kubernetes.Interface) Controller {
return &controller{
- namespace: namespace,
- configMap: name,
- kubeclientset: kubeclientset,
- emptyConfigFunc: emptyConfigFunc,
+ namespace: namespace,
+ configMap: name,
+ kubeclientset: kubeclientset,
}
}
-func (cc *controller) updateConfig(cm *apiv1.ConfigMap, onChange func(config interface{}) error) error {
- config, err := cc.parseConfigMap(cm)
- if err != nil {
- return err
- }
- return onChange(config)
-}
-
-func (cc *controller) parseConfigMap(cm *apiv1.ConfigMap) (interface{}, error) {
- config := cc.emptyConfigFunc()
- if cm == nil {
- return config, nil
- }
+func parseConfigMap(cm *apiv1.ConfigMap, config *Config) error {
// The key in the configmap to retrieve workflow configuration from.
// Content encoding is expected to be YAML.
rawConfig, ok := cm.Data["config"]
if ok && len(cm.Data) != 1 {
- return config, fmt.Errorf("if you have an item in your config map named 'config', you must only have one item")
+ return fmt.Errorf("if you have an item in your config map named 'config', you must only have one item")
}
if !ok {
for name, value := range cm.Data {
@@ -70,64 +47,16 @@ func (cc *controller) parseConfigMap(cm *apiv1.ConfigMap) (interface{}, error) {
}
}
}
- err := yaml.Unmarshal([]byte(rawConfig), config)
- return config, err
+ err := yaml.UnmarshalStrict([]byte(rawConfig), config)
+ return err
}
-func (cc *controller) Run(stopCh <-chan struct{}, onChange func(config interface{}) error) {
- defer runtimeutil.HandleCrash(runtimeutil.PanicHandlers...)
-
- restClient := cc.kubeclientset.CoreV1().RESTClient()
- resource := "configmaps"
- fieldSelector := fields.ParseSelectorOrDie(fmt.Sprintf("metadata.name=%s", cc.configMap))
- ctx := context.Background()
- listFunc := func(options metav1.ListOptions) (runtime.Object, error) {
- options.FieldSelector = fieldSelector.String()
- req := restClient.Get().
- Namespace(cc.namespace).
- Resource(resource).
- VersionedParams(&options, metav1.ParameterCodec)
- return req.Do(ctx).Get()
- }
- watchFunc := func(options metav1.ListOptions) (watch.Interface, error) {
- options.Watch = true
- options.FieldSelector = fieldSelector.String()
- req := restClient.Get().
- Namespace(cc.namespace).
- Resource(resource).
- VersionedParams(&options, metav1.ParameterCodec)
- return req.Watch(ctx)
- }
- source := &cache.ListWatch{ListFunc: listFunc, WatchFunc: watchFunc}
- _, controller := cache.NewInformer(
- source,
- &apiv1.ConfigMap{},
- 0,
- cache.ResourceEventHandlerFuncs{
- UpdateFunc: func(old, new interface{}) {
- oldCM := old.(*apiv1.ConfigMap)
- newCM := new.(*apiv1.ConfigMap)
- if oldCM.ResourceVersion == newCM.ResourceVersion {
- return
- }
- if newCm, ok := new.(*apiv1.ConfigMap); ok {
- log.Infof("Detected ConfigMap update.")
- err := cc.updateConfig(newCm, onChange)
- if err != nil {
- log.Errorf("Update of config failed due to: %v", err)
- }
- }
- },
- })
- controller.Run(stopCh)
- log.Info("Watching config map updates")
-}
-
-func (cc *controller) Get(ctx context.Context) (interface{}, error) {
+func (cc *controller) Get(ctx context.Context) (*Config, error) {
+ config := &Config{}
cmClient := cc.kubeclientset.CoreV1().ConfigMaps(cc.namespace)
cm, err := cmClient.Get(ctx, cc.configMap, metav1.GetOptions{})
- if err != nil && !apierr.IsNotFound(err) {
- return cc.emptyConfigFunc(), err
+ if err != nil {
+ return nil, err
}
- return cc.parseConfigMap(cm)
+ return config, parseConfigMap(cm, config)
}
diff --git a/config/controller_test.go b/config/controller_test.go
index 7170760392db..bbad50ec3306 100644
--- a/config/controller_test.go
+++ b/config/controller_test.go
@@ -1,28 +1,21 @@
package config
import (
- "context"
"testing"
"github.com/stretchr/testify/assert"
apiv1 "k8s.io/api/core/v1"
- "k8s.io/client-go/kubernetes/fake"
)
func Test_parseConfigMap(t *testing.T) {
- cc := controller{emptyConfigFunc: EmptyConfigFunc}
t.Run("Empty", func(t *testing.T) {
- _, err := cc.parseConfigMap(&apiv1.ConfigMap{})
+ c := &Config{}
+ err := parseConfigMap(&apiv1.ConfigMap{}, c)
assert.NoError(t, err)
})
- t.Run("Config", func(t *testing.T) {
- c, err := cc.parseConfigMap(&apiv1.ConfigMap{Data: map[string]string{"config": "containerRuntimeExecutor: pns"}})
- if assert.NoError(t, err) {
- assert.Equal(t, "pns", c.(*Config).ContainerRuntimeExecutor)
- }
- })
t.Run("Complex", func(t *testing.T) {
- c, err := cc.parseConfigMap(&apiv1.ConfigMap{Data: map[string]string{"containerRuntimeExecutor": "pns", "artifactRepository": ` archiveLogs: true
+ c := &Config{}
+ err := parseConfigMap(&apiv1.ConfigMap{Data: map[string]string{"artifactRepository": ` archiveLogs: true
s3:
bucket: my-bucket
endpoint: minio:9000
@@ -32,24 +25,14 @@ func Test_parseConfigMap(t *testing.T) {
key: accesskey
secretKeySecret:
name: my-minio-cred
- key: secretkey`}})
+ key: secretkey`}}, c)
if assert.NoError(t, err) {
- assert.Equal(t, "pns", c.(*Config).ContainerRuntimeExecutor)
- assert.NotEmpty(t, c.(*Config).ArtifactRepository)
+ assert.NotEmpty(t, c.ArtifactRepository)
}
})
- t.Run("IgnoreGarbage", func(t *testing.T) {
- _, err := cc.parseConfigMap(&apiv1.ConfigMap{Data: map[string]string{"garbage": "garbage"}})
- assert.NoError(t, err)
+ t.Run("Garbage", func(t *testing.T) {
+ c := &Config{}
+ err := parseConfigMap(&apiv1.ConfigMap{Data: map[string]string{"garbage": "garbage"}}, c)
+ assert.Error(t, err)
})
}
-
-func Test_controller_Get(t *testing.T) {
- kube := fake.NewSimpleClientset()
- c := controller{configMap: "my-config-map", kubeclientset: kube, emptyConfigFunc: EmptyConfigFunc}
- ctx := context.Background()
- config, err := c.Get(ctx)
- if assert.NoError(t, err) {
- assert.Empty(t, config)
- }
-}
diff --git a/config/image.go b/config/image.go
index c99d8d2021f1..be061068da4e 100644
--- a/config/image.go
+++ b/config/image.go
@@ -1,6 +1,6 @@
package config
type Image struct {
- Command []string `json:"command"`
- Args []string `json:"args,omitempty"`
+ Entrypoint []string `json:"entrypoint,omitempty"`
+ Cmd []string `json:"cmd,omitempty"`
}
diff --git a/config/rbac.go b/config/rbac.go
new file mode 100644
index 000000000000..8cdf3e8d3250
--- /dev/null
+++ b/config/rbac.go
@@ -0,0 +1,9 @@
+package config
+
+type RBACConfig struct {
+ Enabled bool `json:"enabled,omitempty"`
+}
+
+func (c *RBACConfig) IsEnabled() bool {
+ return c != nil && c.Enabled
+}
diff --git a/config/sso.go b/config/sso.go
new file mode 100644
index 000000000000..4c1a18254e2f
--- /dev/null
+++ b/config/sso.go
@@ -0,0 +1,31 @@
+package config
+
+import (
+ "time"
+
+ apiv1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+type SSOConfig struct {
+ Issuer string `json:"issuer"`
+ IssuerAlias string `json:"issuerAlias,omitempty"`
+ ClientID apiv1.SecretKeySelector `json:"clientId"`
+ ClientSecret apiv1.SecretKeySelector `json:"clientSecret"`
+ RedirectURL string `json:"redirectUrl"`
+ RBAC *RBACConfig `json:"rbac,omitempty"`
+ // additional scopes (on top of "openid")
+ Scopes []string `json:"scopes,omitempty"`
+ SessionExpiry metav1.Duration `json:"sessionExpiry,omitempty"`
+ // customGroupClaimName will override the groups claim name
+ CustomGroupClaimName string `json:"customGroupClaimName,omitempty"`
+ UserInfoPath string `json:"userInfoPath,omitempty"`
+ InsecureSkipVerify bool `json:"insecureSkipVerify,omitempty"`
+}
+
+func (c SSOConfig) GetSessionExpiry() time.Duration {
+ if c.SessionExpiry.Duration > 0 {
+ return c.SessionExpiry.Duration
+ }
+ return 10 * time.Hour
+}
diff --git a/docs/CONTRIBUTING.md b/docs/CONTRIBUTING.md
index 0b7d8a7adc40..aec853da5e50 100644
--- a/docs/CONTRIBUTING.md
+++ b/docs/CONTRIBUTING.md
@@ -10,8 +10,8 @@ See [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-o
## Contributor Meetings
-A weekly opportunity for committers and maintainers of Workflows, Events, and Dataflow to discuss their current work and talk
-about what’s next. Feel free to join us! For Contributor Meeting information, minutes and recordings
+A weekly opportunity for committers and maintainers of Workflows, Events, and Dataflow to discuss their current work and
+talk about what’s next. Feel free to join us! For Contributor Meeting information, minutes and recordings
please [see here](https://bit.ly/argo-data-weekly).
## How To Contribute
@@ -19,7 +19,9 @@ please [see here](https://bit.ly/argo-data-weekly).
We're always looking for contributors.
* Documentation - something missing or unclear? Please submit a pull request!
-* Code contribution - investigate a [help wanted issue](https://github.com/argoproj/argo-workflows/issues?q=is%3Aopen+is%3Aissue+label%3A%22help+wanted%22+label%3A%22good+first+issue%22), or anything labelled with "good first issue"?
+* Code contribution - investigate
+ a [good first issue](https://github.com/argoproj/argo-workflows/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22)
+ , or anything not assigned.
* Join the `#argo-contributors` channel on [our Slack](https://argoproj.github.io/community/join-slack).
* Get a [mentor](mentoring.md) to help you get started.
@@ -27,9 +29,34 @@ We're always looking for contributors.
To run Argo Workflows locally for development: [running locally](running-locally.md).
+### Dependencies
+
+Dependencies increase the risk of security issues and have on-going maintenance costs.
+
+The dependency must pass these test:
+
+* A strong use case.
+* It has an acceptable license (e.g. MIT).
+* It is actively maintained.
+* It has no security issues.
+
+Example, should we add `fasttemplate`
+, [view the Snyk report](https://snyk.io/advisor/golang/github.com/valyala/fasttemplate):
+
+| Test | Outcome |
+|-----------------------------------------|-------------------------------------|
+| A strong use case. | ❌ Fail. We can use `text/template`. |
+| It has an acceptable license (e.g. MIT) | ✅ Pass. MIT license. |
+| It is actively maintained. | ❌ Fail. Project is inactive. |
+| It has no security issues. | ✅ Pass. No known security issues. |
+
+No, we should not add that dependency.
+
### Test Policy
-Changes without either unit or e2e tests are unlikely to be accepted. See [the pull request template](https://github.com/argoproj/argo-workflows/blob/master/.github/pull_request_template.md).
+Changes without either unit or e2e tests are unlikely to be accepted.
+See [the pull request template](https://github.com/argoproj/argo-workflows/blob/master/.github/pull_request_template.md)
+.
### Contributor Workshop
diff --git a/docs/README.md b/docs/README.md
index 78292bd060ce..2291bfe1c8a0 100644
--- a/docs/README.md
+++ b/docs/README.md
@@ -1,37 +1,17 @@
-
+# Argo Workflows
-[](https://github.com/argoproj/argo-workflows/actions?query=event%3Apush+branch%3Amaster)
-[](https://bestpractices.coreinfrastructure.org/projects/3830)
-[](https://argoproj.github.io/community/join-slack)
-
-## What is Argo Workflows?
-Argo Workflows is an open source container-native workflow engine for orchestrating parallel jobs on Kubernetes. Argo Workflows is implemented as a Kubernetes CRD (Custom Resource Definition).
+Argo Workflows is an open source container-native workflow engine for orchestrating parallel jobs on Kubernetes. Argo
+Workflows is implemented as a Kubernetes CRD (Custom Resource Definition).
* Define workflows where each step in the workflow is a container.
-* Model multi-step workflows as a sequence of tasks or capture the dependencies between tasks using a directed acyclic graph (DAG).
-* Easily run compute intensive jobs for machine learning or data processing in a fraction of the time using Argo Workflows on Kubernetes.
+* Model multi-step workflows as a sequence of tasks or capture the dependencies between tasks using a directed acyclic
+ graph (DAG).
+* Easily run compute intensive jobs for machine learning or data processing in a fraction of the time using Argo
+ Workflows on Kubernetes.
* Run CI/CD pipelines natively on Kubernetes without configuring complex software development products.
-Argo is a [Cloud Native Computing Foundation (CNCF)](https://cncf.io/) hosted project.
-
[](https://www.youtube.com/watch?v=TZgLkCFQ2tk)
-## Why Argo Workflows?
-* Designed from the ground up for containers without the overhead and limitations of legacy VM and server-based environments.
-* Cloud agnostic and can run on any Kubernetes cluster.
-* Easily orchestrate highly parallel jobs on Kubernetes.
-* Argo Workflows puts a cloud-scale supercomputer at your fingertips!
-
-# Argo Documentation
-
-### Getting Started
-
-For set-up information and running your first Workflows, please see our [Getting Started](quick-start.md) guide.
-
-### Examples
-
-For detailed examples about what Argo can do, please see our [documentation by example](https://github.com/argoproj/argo-workflows/blob/master/examples/README.md) page.
-
-### Fields
+## Getting Started
-For a full list of all the fields available in for use in Argo, and a link to examples where each is used, please see [Argo Fields](fields.md).
+For set-up information and running your first Workflows, please see our [getting started guide](quick-start.md).
diff --git a/docs/access-token.md b/docs/access-token.md
index 2aa35e679f12..63ecb459d823 100644
--- a/docs/access-token.md
+++ b/docs/access-token.md
@@ -1,37 +1,41 @@
# Access Token
## Overview
-If you want to automate tasks with the Argo Server API or CLI, you will need an access token.
-## Pre-requisites
+If you want to automate tasks with the Argo Server API or CLI, you will need an access token.
+
+## Prerequisites
+
Firstly, create a role with minimal permissions. This example role for jenkins only permission to update and list workflows:
-```sh
+```bash
kubectl create role jenkins --verb=list,update --resource=workflows.argoproj.io
```
Create a service account for your service:
-```sh
+```bash
kubectl create sa jenkins
```
### Tip for Tokens Creation
-Create a unique service account for each client:
+
+Create a unique service account for each client:
- (a) you'll be able to correctly secure your workflows
-- (b) [revoke the token](#token-revocation) without impacting other clients.
+- (b) [revoke the token](#token-revocation) without impacting other clients.
Bind the service account to the role (in this case in the `argo` namespace):
-```sh
+```bash
kubectl create rolebinding jenkins --role=jenkins --serviceaccount=argo:jenkins
```
## Token Creation
+
You now need to get a token:
-```sh
+```bash
SECRET=$(kubectl get sa jenkins -o=jsonpath='{.secrets[0].name}')
ARGO_TOKEN="Bearer $(kubectl get secret $SECRET -o=jsonpath='{.data.token}' | base64 --decode)"
echo $ARGO_TOKEN
@@ -39,18 +43,19 @@ Bearer ZXlKaGJHY2lPaUpTVXpJMU5pSXNJbXRwWkNJNkltS...
```
## Token Usage & Test
+
To use that token with the CLI you need to set `ARGO_SERVER` (see `argo --help`).
Use that token in your API requests, e.g. to list workflows:
-```sh
+```bash
curl https://localhost:2746/api/v1/workflows/argo -H "Authorization: $ARGO_TOKEN"
# 200 OK
```
You should check you cannot do things you're not allowed!
-```sh
+```bash
curl https://localhost:2746/api/v1/workflow-templates/argo -H "Authorization: $ARGO_TOKEN"
# 403 error
```
@@ -59,29 +64,33 @@ curl https://localhost:2746/api/v1/workflow-templates/argo -H "Authorization: $A
### Set additional params to initialize Argo settings
- ARGO_SERVER="${{HOST}}:443"
- KUBECONFIG=/dev/null
- ARGO_NAMESPACE=sandbox
+```bash
+ARGO_SERVER="${{HOST}}:443"
+KUBECONFIG=/dev/null
+ARGO_NAMESPACE=sandbox
+```
### Start container with settings above
+
> Note: Example for getting list of templates from an existing namespace
- docker run --rm -it \
- -e ARGO_SERVER=$ARGO_SERVER \
- -e ARGO_TOKEN=$ARGO_TOKEN \
- -e ARGO_HTTP=false \
- -e ARGO_HTTP1=true \
- -e KUBECONFIG=/dev/null \
- -e ARGO_NAMESPACE=$ARGO_NAMESPACE \
- argoproj/argocli:latest template list -v -e -k
+```bash
+docker run --rm -it \
+ -e ARGO_SERVER=$ARGO_SERVER \
+ -e ARGO_TOKEN=$ARGO_TOKEN \
+ -e ARGO_HTTP=false \
+ -e ARGO_HTTP1=true \
+ -e KUBECONFIG=/dev/null \
+ -e ARGO_NAMESPACE=$ARGO_NAMESPACE \
+ argoproj/argocli:latest template list -v -e -k
+```
## Token Revocation
Token compromised?
-```sh
+```bash
kubectl delete secret $SECRET
```
A new one will be created.
-
diff --git a/docs/architecture.md b/docs/architecture.md
index 78c6eb46c1ce..43ca2dc9ae15 100644
--- a/docs/architecture.md
+++ b/docs/architecture.md
@@ -12,3 +12,12 @@

+## Various configurations for Argo UI and Argo Server
+
+The top diagram below shows what happens if you run "make start UI=true" locally (recommended if you need the UI during local development). This runs a React application (`Webpack` HTTP server) locally which serves the `index.html` and typescript files from port 8080. From the typescript code there are calls made to the back end API (Argo Server) at port 2746. The `Webpack` HTTP server is configured for hot reload, meaning the UI will update automatically based on local code changes.
+
+The second diagram is an alternative approach for rare occasions that the React files are broken and you're doing local development. In this case, everything is served from the Argo Server at port 2746.
+
+The third diagram shows how things are configured for a Kubernetes environment. It is similar to the second diagram in that the Argo Server hosts everything for the UI.
+
+
diff --git a/docs/argo-server-auth-mode.md b/docs/argo-server-auth-mode.md
index 4ee302c2cb32..e748f53adc3f 100644
--- a/docs/argo-server-auth-mode.md
+++ b/docs/argo-server-auth-mode.md
@@ -2,13 +2,14 @@
You can choose which kube config the Argo Server uses:
-* "server" - in hosted mode, use the kube config of service account, in local mode, use your local kube config.
-* "client" - requires clients to provide their Kubernetes bearer token and use that.
-* ["sso"](./argo-server-sso.md) - since v2.9, use single sign-on, this will use the same service account as per "server" for RBAC. We expect to change this in the future so that the OAuth claims are mapped to service accounts.
+* `server` - in hosted mode, use the kube config of service account, in local mode, use your local kube config.
+* `client` - requires clients to provide their Kubernetes bearer token and use that.
+* [`sso`](./argo-server-sso.md) - since v2.9, use single sign-on, this will use the same service account as per "server" for RBAC. We expect to change this in the future so that the OAuth claims are mapped to service accounts.
The server used to start with auth mode of "server" by default, but since v3.0 it defaults to the "client".
To change the server auth mode specify the list as multiple auth-mode flags:
-```
+
+```bash
argo server --auth-mode sso --auth-mode ...
```
diff --git a/docs/argo-server-sso-argocd.md b/docs/argo-server-sso-argocd.md
index 0983b7293307..0abcaeb70e1e 100644
--- a/docs/argo-server-sso-argocd.md
+++ b/docs/argo-server-sso-argocd.md
@@ -1,9 +1,10 @@
-# Use ArgoCD Dex for authentication
+# Use Argo CD Dex for authentication
It is possible to have the Argo Workflows Server use the Argo CD Dex instance for authentication, for instance if you use Okta with SAML which cannot integrate with Argo Workflows directly. In order to make this happen, you will need the following:
-- You must be using at least Dex [v2.23.0](https://github.com/dexidp/dex/releases/tag/v2.23.0), because that's when `staticClients[].secretEnv` was added. That means ArgoCD 1.7.12 and above.
-- A secret containing two keys, `client-id` and `client-secret` to be used by both Dex and Argo Workflows Server. `client-id` is `argo-workflows-sso` in this example, `client-secret` can be any random string. If ArgoCD and ArgoWorkflows are installed in different namespaces the secret must be present in both of them. Example:
+- You must be using at least Dex [v2.23.0](https://github.com/dexidp/dex/releases/tag/v2.23.0), because that's when `staticClients[].secretEnv` was added. That means Argo CD 1.7.12 and above.
+- A secret containing two keys, `client-id` and `client-secret` to be used by both Dex and Argo Workflows Server. `client-id` is `argo-workflows-sso` in this example, `client-secret` can be any random string. If Argo CD and Argo Workflows are installed in different namespaces the secret must be present in both of them. Example:
+
```yaml
apiVersion: v1
kind: Secret
@@ -15,13 +16,15 @@ It is possible to have the Argo Workflows Server use the Argo CD Dex instance fo
# client-secret is 'MY-SECRET-STRING-CAN-BE-UUID'
client-secret: TVktU0VDUkVULVNUUklORy1DQU4tQkUtVVVJRA==
```
+
- `--auth-mode=sso` server argument added
- A Dex `staticClients` configured for `argo-workflows-sso`
- The `sso` configuration filled out in Argo Workflows Server to match
-## Example manifests for authenticating against ArgoCD's Dex (Kustomize)
+## Example manifests for authenticating against Argo CD's Dex (Kustomize)
+
+In Argo CD, add an environment variable to Dex deployment and configuration:
-In ArgoCD, add an environment variable to Dex deployment and configuration:
```yaml
---
apiVersion: apps/v1
@@ -49,7 +52,7 @@ data:
# Dex settings, but instead it will replace the entire configuration with the settings below,
# so add these to the existing config instead of setting them in a separate file
dex.config: |
- # Setting staticClients allows ArgoWorkflows to use ArgoCD's Dex installation for authentication
+ # Setting staticClients allows Argo Workflows to use Argo CD's Dex installation for authentication
staticClients:
- id: argo-workflows-sso
name: Argo Workflow
@@ -59,6 +62,7 @@ data:
```
In Argo Workflows add `--auth-mode=sso` argument to argo-server deployment.
+
```yaml
---
apiVersion: apps/v1
@@ -101,9 +105,10 @@ data:
redirectUrl: https://argo-workflows.mydomain.com/oauth2/callback
```
-## Example Helm chart configuration for authenticating against ArgoCD's Dex
+## Example Helm chart configuration for authenticating against Argo CD's Dex
`argo-cd/values.yaml`:
+
```yaml
dex:
image:
@@ -125,7 +130,8 @@ data:
secretEnv: ARGO_WORKFLOWS_SSO_CLIENT_SECRET
```
-`argo/values.yaml`:
+`argo-workflows/values.yaml`:
+
```yaml
server:
extraArgs:
diff --git a/docs/argo-server-sso.md b/docs/argo-server-sso.md
index 9aea26c3eb1a..8d47a1afbce6 100644
--- a/docs/argo-server-sso.md
+++ b/docs/argo-server-sso.md
@@ -1,17 +1,16 @@
# Argo Server SSO
-
-
> v2.9 and after
-It is possible to use [Dex](https://github.com/dexidp/dex) for authentication. [This document](argo-server-sso-argocd.md) describes how to set up ArgoWorkflows and ArgoCD so that ArgoWorkflows uses ArgoCD's Dex server for authentication.
+It is possible to use [Dex](https://github.com/dexidp/dex) for authentication. [This document](argo-server-sso-argocd.md) describes how to set up Argo Workflows and Argo CD so that Argo Workflows uses Argo CD's Dex server for authentication.
-## To start Argo Server with SSO.
+## To start Argo Server with SSO
-Firstly, configure the settings [workflow-controller-configmap.yaml](workflow-controller-configmap.yaml) with the correct OAuth 2 values. If working towards an oidc configuration the ArgoCD project has [guides](https://argoproj.github.io/argo-cd/operator-manual/user-management/#existing-oidc-provider) on its similar (though different) process for setting up oidc providers. It also includes examples for specific providers. The main difference is that the ArgoCD docs mention that their callback address endpoint is `/auth/callback`. For ArgoWorkflows, the default format is `/oauth2/callback` as shown in [this comment](https://github.com/argoproj/argo-workflows/blob/93c11a24ff06049c2197149acd787f702e5c1f9b/docs/workflow-controller-configmap.yaml#L329) in the default values.yaml file in the helm chart.
+Firstly, configure the settings [workflow-controller-configmap.yaml](workflow-controller-configmap.yaml) with the correct OAuth 2 values. If working towards an OIDC configuration the Argo CD project has [guides](https://argo-cd.readthedocs.io/en/stable/operator-manual/user-management/) on its similar (though different) process for setting up OIDC providers. It also includes examples for specific providers. The main difference is that the Argo CD docs mention that their callback address endpoint is `/auth/callback`. For Argo Workflows, the default format is `/oauth2/callback` as shown in [this comment](https://github.com/argoproj/argo-workflows/blob/93c11a24ff06049c2197149acd787f702e5c1f9b/docs/workflow-controller-configmap.yaml#L329) in the default `values.yaml` file in the helm chart.
-Next, create the k8s secrets for holding the OAuth2 `client-id` and `client-secret`. You may refer to the kubernetes documentation on [Managing secrets](https://kubernetes.io/docs/tasks/configmap-secret/). For example by using kubectl with literals:
-```
+Next, create the Kubernetes secrets for holding the OAuth2 `client-id` and `client-secret`. You may refer to the kubernetes documentation on [Managing secrets](https://kubernetes.io/docs/tasks/configmap-secret/). For example by using `kubectl` with literals:
+
+```bash
kubectl create secret -n argo generic client-id-secret \
--from-literal=client-id-key=foo
@@ -21,7 +20,7 @@ kubectl create secret -n argo generic client-secret-secret \
Then, start the Argo Server using the SSO [auth mode](argo-server-auth-mode.md):
-```
+```bash
argo server --auth-mode sso --auth-mode ...
```
@@ -31,11 +30,11 @@ argo server --auth-mode sso --auth-mode ...
As of v2.12 we issue a JWE token for users rather than give them the ID token from your OAuth2 provider. This token is opaque and has a longer expiry time (10h by default).
-The token encryption key is automatically generated by the Argo Server and stored in a Kubernetes secret name "sso".
+The token encryption key is automatically generated by the Argo Server and stored in a Kubernetes secret name `sso`.
You can revoke all tokens by deleting the encryption key and restarting the Argo Server (so it generates a new key).
-```
+```bash
kubectl delete secret sso
```
@@ -44,7 +43,6 @@ kubectl delete secret sso
All users will need to log in again. Sorry.
-
## SSO RBAC
> v2.12 and after
@@ -55,7 +53,7 @@ To allow service accounts to manage resources in other namespaces create a role
RBAC config is installation-level, so any changes will need to be made by the team that installed Argo. Many complex rules will be burdensome on that team.
-Firstly, enable the `rbac:` setting in [workflow-controller-configmap.yaml](workflow-controller-configmap.yaml). You almost certainly want to be able configure RBAC using groups, so add `scopes:` to the SSO settings:
+Firstly, enable the `rbac:` setting in [workflow-controller-configmap.yaml](workflow-controller-configmap.yaml). You almost certainly want to be able to configure RBAC using groups, so add `scopes:` to the SSO settings:
```yaml
sso:
@@ -96,35 +94,33 @@ metadata:
workflows.argoproj.io/rbac-rule-precedence: "1"
```
-
If no rule matches, we deny the user access.
-!!! Tip
- You'll probably want to configure a default account to use if no other rule matches, e.g. a read-only account, you can do this as follows:
+TIp: You'll probably want to configure a default account to use if no other rule matches, e.g. a read-only account, you can do this as follows:
- ```yaml
- metadata:
- name: read-only
- annotations:
- workflows.argoproj.io/rbac-rule: "true"
- workflows.argoproj.io/rbac-rule-precedence: "0"
- ```
+```yaml
+metadata:
+ name: read-only
+ annotations:
+ workflows.argoproj.io/rbac-rule: "true"
+ workflows.argoproj.io/rbac-rule-precedence: "0"
+```
- The precedence must be the lowest of all your service accounts.
+The precedence must be the lowest of all your service accounts.
## SSO RBAC Namespace Delegation
> v3.3 and after
You can optionally configure RBAC SSO per namespace.
-Typically, on organization has a K8s cluster and a central team manages the cluster who is the owner of the cluster. Along with this, there are multiple namespaces which are owned by individual team. This feature would help namespace owners to define RBAC for their own namespace.
+Typically, on organization has a Kubernetes cluster and a central team (the owner of the cluster) manages the cluster. Along with this, there are multiple namespaces which are owned by individual teams. This feature would help namespace owners to define RBAC for their own namespace.
The feature is currently in beta.
To enable the feature, set env variable `SSO_DELEGATE_RBAC_TO_NAMESPACE=true` in your argo-server deployment.
-#### Recommended usage
+### Recommended usage
-Configure a default account in the installation namespace which would allow all users of your organization. We will use this service account to allow a user to login to the cluster. You could optionally add workflow read-only role and rolebinding if you wish to.
+Configure a default account in the installation namespace which would allow all users of your organization. We will use this service account to allow a user to login to the cluster. You could optionally add workflow read-only role and role-binding if you wish to.
```yaml
apiVersion: v1
@@ -136,8 +132,8 @@ metadata:
workflows.argoproj.io/rbac-rule-precedence: "0"
```
-Now, for the the namespace that you own, configure a service account which would allow members of your team to perform operations in your namespace.
-Make sure that the precedence of the namespace service account is higher than the precedence of the login service account. Create approprite role that you want to grant to this serviceaccount and bind it with a role-binding.
+Now, for the namespace that you own, configure a service account which would allow members of your team to perform operations in your namespace.
+Make sure that the precedence of the namespace service account is higher than the precedence of the login service account. Create appropriate role that you want to grant to this service account and bind it with a role-binding.
```yaml
apiVersion: v1
@@ -150,26 +146,27 @@ metadata:
workflows.argoproj.io/rbac-rule-precedence: "1"
```
-Using this, whenever a user is logged in via SSO and makes a request in 'my-namespace', and the `rbac-rule`matches, we will use this service account to allow the user to perform that operation in the namespace. If no serviceaccount matches in the namespace, the first serviceaccount(`user-default-login`) and its associated role will be used to perform the operation in the namespace.
+Using this, whenever a user is logged in via SSO and makes a request in 'my-namespace', and the `rbac-rule`matches, we will use this service account to allow the user to perform that operation in the namespace. If no service account matches in the namespace, the first service account(`user-default-login`) and its associated role will be used to perform the operation in the namespace.
## SSO Login Time
> v2.12 and after
-By default, your SSO session will expire after 10 hours. You can change this by adding a sessionExpiry value to your [workflow-controller-configmap.yaml](workflow-controller-configmap.yaml) under the SSO heading.
+By default, your SSO session will expire after 10 hours. You can change this by adding a `sessionExpiry` to your [workflow-controller-configmap.yaml](workflow-controller-configmap.yaml) under the SSO heading.
```yaml
sso:
# Expiry defines how long your login is valid for in hours. (optional)
sessionExpiry: 240h
```
+
## Custom claims
> v3.1.4 and after
If your OIDC provider provides groups information with a claim name other than `groups`, you could configure config-map to specify custom claim name for groups. Argo now arbitrary custom claims and any claim can be used for `expr eval`. However, since group information is displayed in UI, it still needs to be an array of strings with group names as elements.
-customClaim in this case will be mapped to `groups` key and we can use the same key `groups` for evaluating our expressions
+The `customClaim` in this case will be mapped to `groups` key and we can use the same key `groups` for evaluating our expressions
```yaml
sso:
@@ -177,15 +174,16 @@ sso:
customGroupClaimName: argo_groups
```
-If your OIDC provider provides groups information only using the userInfo endpoint (e.g. OKta), you could configure `userInfoPath` to specify the user info endpoint that contains the groups claim.
+If your OIDC provider provides groups information only using the user-info endpoint (e.g. Okta), you could configure `userInfoPath` to specify the user info endpoint that contains the groups claim.
+
```yaml
sso:
userInfoPath: /oauth2/v1/userinfo
```
-#### Example expr
+### Example Expression
-```shell
+```bash
# assuming customClaimGroupName: argo_groups
workflows.argoproj.io/rbac-rule: "'argo_admins' in groups"
```
diff --git a/docs/argo-server.md b/docs/argo-server.md
index 84adcc79998c..eb53d1b294e0 100644
--- a/docs/argo-server.md
+++ b/docs/argo-server.md
@@ -1,7 +1,5 @@
# Argo Server
-
-
> v2.5 and after
!!! Warning "HTTP vs HTTPS"
@@ -31,12 +29,11 @@ Use this mode if:
To run locally:
-```
+```bash
argo server
```
-This will start a server on port 2746 which you can view at [https://localhost:2746](https://localhost:2746).
-
+This will start a server on port 2746 which you [can view](https://localhost:2746).
## Options
@@ -48,10 +45,10 @@ See [auth](argo-server-auth-mode.md).
See [managed namespace](managed-namespace.md).
-### Base href
+### Base HREF
-If the server is running behind reverse proxy with a subpath different from `/` (for example,
-`/argo`), you can set an alternative subpath with the `--base-href` flag or the `BASE_HREF`
+If the server is running behind reverse proxy with a sub-path different from `/` (for example,
+`/argo`), you can set an alternative sub-path with the `--base-href` flag or the `BASE_HREF`
environment variable.
You probably now should [read how to set-up an ingress](#ingress)
@@ -60,9 +57,9 @@ You probably now should [read how to set-up an ingress](#ingress)
See [TLS](tls.md).
-### SSO
+### SSO
-See [SSO](argo-server-sso.md). See [here](argo-server-sso-argocd.md) about sharing ArgoCD's Dex with ArgoWorkflows.
+See [SSO](argo-server-sso.md). See [here](argo-server-sso-argocd.md) about sharing Argo CD's Dex with Argo Workflows.
## Access the Argo Workflows UI
@@ -71,27 +68,27 @@ following:
### `kubectl port-forward`
-```sh
+```bash
kubectl -n argo port-forward svc/argo-server 2746:2746
```
-Then visit: https://127.0.0.1:2746
-
+Then visit:
### Expose a `LoadBalancer`
Update the service to be of type `LoadBalancer`.
-```sh
+```bash
kubectl patch svc argo-server -n argo -p '{"spec": {"type": "LoadBalancer"}}'
```
Then wait for the external IP to be made available:
-```sh
+```bash
kubectl get svc argo-server -n argo
```
-```sh
+
+```bash
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
argo-server LoadBalancer 10.43.43.130 172.18.0.2 2746:30008/TCP 18h
```
@@ -102,7 +99,6 @@ You can get ingress working as follows:
Add `BASE_HREF` as environment variable to `deployment/argo-server`. Do not forget to add a trailing '/' character.
-
```yaml
---
apiVersion: apps/v1
@@ -133,7 +129,7 @@ Create a ingress, with the annotation `ingress.kubernetes.io/rewrite-target: /`:
>If TLS is enabled (default in v3.0 and after), the ingress controller must be told
>that the backend uses HTTPS. The method depends on the ingress controller, e.g.
->Traefik expects an `ingress.kubernetes.io/protocol` annotation, while ingress-nginx
+>Traefik expects an `ingress.kubernetes.io/protocol` annotation, while `ingress-nginx`
>uses `nginx.ingress.kubernetes.io/backend-protocol`
```yaml
@@ -157,15 +153,25 @@ spec:
[Learn more](https://github.com/argoproj/argo-workflows/issues/3080)
-
## Security
Users should consider the following in their set-up of the Argo Server:
### API Authentication Rate Limiting
-Argo Server does not perform authenticatinon directly. It delegates this to either the Kubernetes API Server (when `--auth-mode=client`) and the OAuth provider (when `--auth-mode=sso`). In each case, it is recommended that the delegate implements any authentication rate limiting you need.
+Argo Server does not perform authentication directly. It delegates this to either the Kubernetes API Server (when `--auth-mode=client`) and the OAuth provider (when `--auth-mode=sso`). In each case, it is recommended that the delegate implements any authentication rate limiting you need.
### IP Address Logging
Argo Server does not log the IP addresses of API requests. We recommend you put the Argo Server behind a load balancer, and that load balancer is configured to log the IP addresses of requests that return authentication or authorization errors.
+
+### Rate Limiting
+
+> v3.4 and after
+
+Argo Server by default rate limits to 1000 per IP per minute, you can configure it through `--api-rate-limit`. You can access additional information through the following headers.
+
+* `X-Rate-Limit-Limit` - the rate limit ceiling that is applicable for the current request.
+* `X-Rate-Limit-Remaining` - the number of requests left for the current rate-limit window.
+* `X-Rate-Limit-Reset` - the time at which the rate limit resets, specified in UTC time.
+* `Retry-After` - indicate when a client should retry requests (when the rate limit expires), in UTC time.
diff --git a/docs/artifact-repository-ref.md b/docs/artifact-repository-ref.md
index a06c834d56fe..02c5aa97f9e0 100644
--- a/docs/artifact-repository-ref.md
+++ b/docs/artifact-repository-ref.md
@@ -1,7 +1,5 @@
# Artifact Repository Ref
-
-
> v2.9 and after
You can reduce duplication in your templates by configuring repositories that can be accessed by any workflow. This can also remove sensitive information from your templates.
@@ -46,4 +44,4 @@ spec:
This feature gives maximum benefit when used with [key-only artifacts](key-only-artifacts.md).
-Reference: [fields.md#artifactrepositoryref](fields.md#artifactrepositoryref).
\ No newline at end of file
+[Reference](fields.md#artifactrepositoryref).
diff --git a/docs/artifact-visualization.md b/docs/artifact-visualization.md
new file mode 100644
index 000000000000..a0596485284b
--- /dev/null
+++ b/docs/artifact-visualization.md
@@ -0,0 +1,97 @@
+# Artifact Visualization
+
+> since v3.4
+
+Artifacts can be viewed in the UI.
+
+Use cases:
+
+* Comparing ML pipeline runs from generated charts.
+* Visualizing end results of ML pipeline runs.
+* Debugging workflows where visual artifacts are the most helpful.
+
+[](https://youtu.be/whoRfYY9Fhk)
+
+* Artifacts appear as elements in the workflow DAG that you can click on.
+* When you click on the artifact, a panel appears.
+* The first time this appears explanatory text is shown to help you understand if you might need to change your
+ workflows to use this feature.
+* Known file types such as images, text or HTML are shown in an inline-frame (`iframe`).
+* Artifacts are sandboxed using a Content-Security-Policy that prevents JavaScript execution.
+* JSON is shown with syntax highlighting.
+
+To start, take a look at
+the [example](https://github.com/argoproj/argo-workflows/blob/master/examples/artifacts-workflowtemplate.yaml)
+.
+
+
+
+
+## Artifact Types
+
+An artifact maybe a `.tgz`, file or directory.
+
+### `.tgz`
+
+Viewing of `.tgz` is not supported in the UI. By default artifacts are compressed as a `.tgz`. Only artifacts that were
+not compressed can be viewed.
+
+To prevent compression, set `archive` to `none` to prevent compression:
+
+```yaml
+- name: artifact
+ # ...
+ archive:
+ none: { }
+```
+
+### File
+
+Files maybe shown in the UI. To determine if a file can be shown, the UI checks if the artifact's file extension is
+supported. The extension is found in the artifact's key.
+
+To view a file, add the extension to the key:
+
+```yaml
+- name: single-file
+ s3:
+ key: visualization.png
+```
+
+### Directory
+
+Directories are shown in the UI. The UI considers any key with a trailing-slash to be a directory.
+
+To view a directory, add a trailing-slash:
+
+```yaml
+- name: reports
+ s3:
+ key: reports/
+```
+
+If the directory contains `index.html`, then that will be shown, otherwise a directory listing is displayed.
+
+⚠️ HTML files may contain CSS and images served from the same origin. Scripts are not allowed. Nothing may be remotely
+loaded.
+
+## Security
+
+### Content Security Policy
+
+We assume that artifacts are not trusted, so by default, artifacts are served with a `Content-Security-Policy` that
+disables JavaScript and remote files.
+
+This is similar to what happens when you include third-party scripts, such as analytic tracking, in your website.
+However, those tracking codes are normally served from a different domain to your main website. Artifacts are served
+from the same origin, so normal browser controls are not secure enough.
+
+### Sub-Path Access
+
+Previously, users could access the artifacts of any workflows they could access. To allow HTML files to link to other files
+within their tree, you can now access any sub-paths of the artifact's key.
+
+Example:
+
+The artifact produces a folder in an S3 bucket named `my-bucket`, with a key `report/`. You can also access anything
+matching `report/*`.
diff --git a/docs/assets/alpha.svg b/docs/assets/alpha.svg
deleted file mode 100644
index 471179bef482..000000000000
--- a/docs/assets/alpha.svg
+++ /dev/null
@@ -1 +0,0 @@
-
\ No newline at end of file
diff --git a/docs/assets/argo-server-ui-configurations.png b/docs/assets/argo-server-ui-configurations.png
new file mode 100644
index 000000000000..a6d84f0ac3e8
Binary files /dev/null and b/docs/assets/argo-server-ui-configurations.png differ
diff --git a/docs/assets/artifact-gc-option-2-flow.jpg b/docs/assets/artifact-gc-option-2-flow.jpg
new file mode 100644
index 000000000000..7954fc90210b
Binary files /dev/null and b/docs/assets/artifact-gc-option-2-flow.jpg differ
diff --git a/docs/assets/artifact-gc-proposal.pptx b/docs/assets/artifact-gc-proposal.pptx
new file mode 100644
index 000000000000..a78102316960
Binary files /dev/null and b/docs/assets/artifact-gc-proposal.pptx differ
diff --git a/docs/assets/beta.svg b/docs/assets/beta.svg
deleted file mode 100644
index 29b44088563f..000000000000
--- a/docs/assets/beta.svg
+++ /dev/null
@@ -1 +0,0 @@
-
\ No newline at end of file
diff --git a/docs/assets/ecosystem.png b/docs/assets/ecosystem.png
deleted file mode 100644
index 721373b24e9a..000000000000
Binary files a/docs/assets/ecosystem.png and /dev/null differ
diff --git a/docs/assets/ga.svg b/docs/assets/ga.svg
deleted file mode 100644
index 3424357bace2..000000000000
--- a/docs/assets/ga.svg
+++ /dev/null
@@ -1 +0,0 @@
-
\ No newline at end of file
diff --git a/docs/assets/graph-report.png b/docs/assets/graph-report.png
new file mode 100644
index 000000000000..be845902da1d
Binary files /dev/null and b/docs/assets/graph-report.png differ
diff --git a/docs/assets/katacoda.png b/docs/assets/katacoda.png
deleted file mode 100644
index 0b053ea3126a..000000000000
Binary files a/docs/assets/katacoda.png and /dev/null differ
diff --git a/docs/assets/make-start-UI-true.png b/docs/assets/make-start-UI-true.png
new file mode 100644
index 000000000000..1b0a0a04a5fb
Binary files /dev/null and b/docs/assets/make-start-UI-true.png differ
diff --git a/docs/assets/test-report.png b/docs/assets/test-report.png
new file mode 100644
index 000000000000..ca204ec9f6b4
Binary files /dev/null and b/docs/assets/test-report.png differ
diff --git a/docs/async-pattern.md b/docs/async-pattern.md
index 70b98716a16a..5f463902ec12 100644
--- a/docs/async-pattern.md
+++ b/docs/async-pattern.md
@@ -2,20 +2,20 @@
## Introduction
-If triggering an external job (eg an Amazon EMR job) from Argo that does not run to completion in a container, there are two options:
+If triggering an external job (e.g. an Amazon EMR job) from Argo that does not run to completion in a container, there are two options:
- create a container that polls the external job completion status
-- combine a trigger step that starts the job with a `Suspend` step that is unsuspended by an API call to Argo when the external job is complete.
+- combine a trigger step that starts the job with a `Suspend` step that is resumed by an API call to Argo when the external job is complete.
This document describes the second option in more detail.
## The pattern
-The pattern involves two steps - the first step is a short-running step that triggers a long-running job outside Argo (eg an HTTP submission), and the second step is a `Suspend` step that suspends workflow execution and is ultimately either resumed or stopped (ie failed) via a call to the Argo API when the job outside Argo succeeds or fails.
+The pattern involves two steps - the first step is a short-running step that triggers a long-running job outside Argo (e.g. an HTTP submission), and the second step is a `Suspend` step that suspends workflow execution and is ultimately either resumed or stopped (i.e. failed) via a call to the Argo API when the job outside Argo succeeds or fails.
When implemented as a `WorkflowTemplate` it can look something like this:
-```
+```yaml
apiVersion: argoproj.io/v1alpha1
kind: WorkflowTemplate
metadata:
@@ -56,13 +56,13 @@ spec:
suspend: {}
```
-In this case the ```job-cmd``` parameter can be a command that makes an http call via curl to an endpoint that returns a job uuid. More sophisticated submission and parsing of submission output could be done with something like a Python script step.
+In this case the ```job-cmd``` parameter can be a command that makes an HTTP call via curl to an endpoint that returns a job UUID. More sophisticated submission and parsing of submission output could be done with something like a Python script step.
On job completion the external job would need to call either resume if successful:
You may need an [access token](access-token.md).
-```
+```bash
curl --request PUT \
--url https://localhost:2746/api/v1/workflows///resume
--header 'content-type: application/json' \
@@ -76,7 +76,7 @@ curl --request PUT \
or stop if unsuccessful:
-```
+```bash
curl --request PUT \
--url https://localhost:2746/api/v1/workflows///stop
--header 'content-type: application/json' \
@@ -93,15 +93,8 @@ curl --request PUT \
Using `argo retry` on failed jobs that follow this pattern will cause Argo to re-attempt the Suspend step without re-triggering the job.
-Instead you need to use the `--restart-successful` option, eg if using the template from above:
+Instead you need to use the `--restart-successful` option, e.g. if using the template from above:
-```
+```bash
argo retry --restart-successful --node-field-selector templateRef.template=run-external-job,phase=Failed
```
-
-See also:
-
-* [access token](access-token.md)
-* [resuming a workflow via automation](resuming-workflow-via-automation.md)
-* [submitting a workflow via automation](submit-workflow-via-automation.md)
-* [one workflow submitting another](workflow-submitting-workflow.md)
diff --git a/docs/cli.md b/docs/cli.md
deleted file mode 100644
index 8927bfe2e832..000000000000
--- a/docs/cli.md
+++ /dev/null
@@ -1,32 +0,0 @@
-# CLI
-
-The CLI allows to (amongst other things) submit, watch, and list workflows, e.g.:
-
-```sh
-argo submit my-wf.yaml
-argo list
-```
-
-## Reference
-
-You can find [detailed reference here](cli/argo.md)
-
-## Help
-
-Most help topics are provided by built-in help:
-
-```
-argo --help
-```
-
-## Argo Server
-
-You'll need to configure your commands to use the Argo Server if you have [offloaded node status](offloading-large-workflows.md) or are trying to access your [workflow archive](workflow-archive.md).
-
-To do so, set the `ARGO_SERVER` environment variable, e.g.:
-
-```
-export ARGO_SERVER=localhost:2746
-```
-
-See [TLS](tls.md).
\ No newline at end of file
diff --git a/docs/cli/argo.md b/docs/cli/argo.md
index 0e9b9acab9a5..890da67d4f44 100644
--- a/docs/cli/argo.md
+++ b/docs/cli/argo.md
@@ -90,6 +90,7 @@ argo [flags]
--loglevel string Set the logging level. One of: debug|info|warn|error (default "info")
-n, --namespace string If present, the namespace scope for this CLI request
--password string Password for basic authentication to the API server
+ --proxy-url string If provided, this URL will be used to connect via proxy
--request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
-e, --secure Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
--server string The address and port of the Kubernetes API server
@@ -106,6 +107,7 @@ argo [flags]
* [argo auth](argo_auth.md) - manage authentication settings
* [argo cluster-template](argo_cluster-template.md) - manipulate cluster workflow templates
* [argo completion](argo_completion.md) - output shell completion code for the specified shell (bash or zsh)
+* [argo cp](argo_cp.md) - copy artifacts from workflow
* [argo cron](argo_cron.md) - manage cron workflows
* [argo delete](argo_delete.md) - delete workflows
* [argo executor-plugin](argo_executor-plugin.md) - manage executor plugins
diff --git a/docs/cli/argo_archive.md b/docs/cli/argo_archive.md
index f935546ef05f..2375b7ff3a3f 100644
--- a/docs/cli/argo_archive.md
+++ b/docs/cli/argo_archive.md
@@ -35,6 +35,7 @@ argo archive [flags]
--loglevel string Set the logging level. One of: debug|info|warn|error (default "info")
-n, --namespace string If present, the namespace scope for this CLI request
--password string Password for basic authentication to the API server
+ --proxy-url string If provided, this URL will be used to connect via proxy
--request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
-e, --secure Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
--server string The address and port of the Kubernetes API server
@@ -54,4 +55,5 @@ argo archive [flags]
* [argo archive list-label-keys](argo_archive_list-label-keys.md) - list workflows label keys in the archive
* [argo archive list-label-values](argo_archive_list-label-values.md) - get workflow label values in the archive
* [argo archive resubmit](argo_archive_resubmit.md) - resubmit one or more workflows
+* [argo archive retry](argo_archive_retry.md) - retry zero or more workflows
diff --git a/docs/cli/argo_archive_delete.md b/docs/cli/argo_archive_delete.md
index bc2783932e5a..8381976b866a 100644
--- a/docs/cli/argo_archive_delete.md
+++ b/docs/cli/argo_archive_delete.md
@@ -35,6 +35,7 @@ argo archive delete UID... [flags]
--loglevel string Set the logging level. One of: debug|info|warn|error (default "info")
-n, --namespace string If present, the namespace scope for this CLI request
--password string Password for basic authentication to the API server
+ --proxy-url string If provided, this URL will be used to connect via proxy
--request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
-e, --secure Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
--server string The address and port of the Kubernetes API server
diff --git a/docs/cli/argo_archive_get.md b/docs/cli/argo_archive_get.md
index 7ba33a801b51..9568d375eca8 100644
--- a/docs/cli/argo_archive_get.md
+++ b/docs/cli/argo_archive_get.md
@@ -36,6 +36,7 @@ argo archive get UID [flags]
--loglevel string Set the logging level. One of: debug|info|warn|error (default "info")
-n, --namespace string If present, the namespace scope for this CLI request
--password string Password for basic authentication to the API server
+ --proxy-url string If provided, this URL will be used to connect via proxy
--request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
-e, --secure Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
--server string The address and port of the Kubernetes API server
diff --git a/docs/cli/argo_archive_list-label-keys.md b/docs/cli/argo_archive_list-label-keys.md
index 7706d6117a8c..f46da1a97622 100644
--- a/docs/cli/argo_archive_list-label-keys.md
+++ b/docs/cli/argo_archive_list-label-keys.md
@@ -35,6 +35,7 @@ argo archive list-label-keys [flags]
--loglevel string Set the logging level. One of: debug|info|warn|error (default "info")
-n, --namespace string If present, the namespace scope for this CLI request
--password string Password for basic authentication to the API server
+ --proxy-url string If provided, this URL will be used to connect via proxy
--request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
-e, --secure Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
--server string The address and port of the Kubernetes API server
diff --git a/docs/cli/argo_archive_list-label-values.md b/docs/cli/argo_archive_list-label-values.md
index f4f6249da9f3..e41582333515 100644
--- a/docs/cli/argo_archive_list-label-values.md
+++ b/docs/cli/argo_archive_list-label-values.md
@@ -36,6 +36,7 @@ argo archive list-label-values [flags]
--loglevel string Set the logging level. One of: debug|info|warn|error (default "info")
-n, --namespace string If present, the namespace scope for this CLI request
--password string Password for basic authentication to the API server
+ --proxy-url string If provided, this URL will be used to connect via proxy
--request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
-e, --secure Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
--server string The address and port of the Kubernetes API server
diff --git a/docs/cli/argo_archive_list.md b/docs/cli/argo_archive_list.md
index 2448e1c55c03..f02e35109ff7 100644
--- a/docs/cli/argo_archive_list.md
+++ b/docs/cli/argo_archive_list.md
@@ -38,6 +38,7 @@ argo archive list [flags]
--loglevel string Set the logging level. One of: debug|info|warn|error (default "info")
-n, --namespace string If present, the namespace scope for this CLI request
--password string Password for basic authentication to the API server
+ --proxy-url string If provided, this URL will be used to connect via proxy
--request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
-e, --secure Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
--server string The address and port of the Kubernetes API server
diff --git a/docs/cli/argo_archive_resubmit.md b/docs/cli/argo_archive_resubmit.md
index 9c1c6976c2b2..0ce78536b3c9 100644
--- a/docs/cli/argo_archive_resubmit.md
+++ b/docs/cli/argo_archive_resubmit.md
@@ -15,27 +15,27 @@ argo archive resubmit [WORKFLOW...] [flags]
# Resubmit multiple workflows:
- argo resubmit uid another-uid
+ argo archive resubmit uid another-uid
# Resubmit multiple workflows by label selector:
- argo resubmit -l workflows.argoproj.io/test=true
+ argo archive resubmit -l workflows.argoproj.io/test=true
# Resubmit multiple workflows by field selector:
- argo resubmit --field-selector metadata.namespace=argo
+ argo archive resubmit --field-selector metadata.namespace=argo
# Resubmit and wait for completion:
- argo resubmit --wait uid
+ argo archive resubmit --wait uid
# Resubmit and watch until completion:
- argo resubmit --watch uid
+ argo archive resubmit --watch uid
# Resubmit and tail logs until completion:
- argo resubmit --log uid
+ argo archive resubmit --log uid
```
@@ -47,6 +47,7 @@ argo archive resubmit [WORKFLOW...] [flags]
--log log the workflow until it completes
--memoized re-use successful steps & outputs from the previous run
-o, --output string Output format. One of: name|json|yaml|wide
+ -p, --parameter stringArray input parameter to override on the original workflow spec
--priority int32 workflow priority
-l, --selector string Selector (label query) to filter on, not including uninitialized ones, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)
-w, --wait wait for the workflow to complete, only works when a single workflow is resubmitted
@@ -76,6 +77,7 @@ argo archive resubmit [WORKFLOW...] [flags]
--loglevel string Set the logging level. One of: debug|info|warn|error (default "info")
-n, --namespace string If present, the namespace scope for this CLI request
--password string Password for basic authentication to the API server
+ --proxy-url string If provided, this URL will be used to connect via proxy
--request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
-e, --secure Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
--server string The address and port of the Kubernetes API server
diff --git a/docs/cli/argo_archive_retry.md b/docs/cli/argo_archive_retry.md
new file mode 100644
index 000000000000..710bdbbbda9f
--- /dev/null
+++ b/docs/cli/argo_archive_retry.md
@@ -0,0 +1,94 @@
+## argo archive retry
+
+retry zero or more workflows
+
+```
+argo archive retry [WORKFLOW...] [flags]
+```
+
+### Examples
+
+```
+# Retry a workflow:
+
+ argo archive retry uid
+
+# Retry multiple workflows:
+
+ argo archive retry uid another-uid
+
+# Retry multiple workflows by label selector:
+
+ argo archive retry -l workflows.argoproj.io/test=true
+
+# Retry multiple workflows by field selector:
+
+ argo archive retry --field-selector metadata.namespace=argo
+
+# Retry and wait for completion:
+
+ argo archive retry --wait uid
+
+# Retry and watch until completion:
+
+ argo archive retry --watch uid
+
+# Retry and tail logs until completion:
+
+ argo archive retry --log uid
+
+```
+
+### Options
+
+```
+ --field-selector string Selector (field query) to filter on, supports '=', '==', and '!='.(e.g. --field-selector key1=value1,key2=value2). The server only supports a limited number of field queries per type.
+ -h, --help help for retry
+ --log log the workflow until it completes
+ --node-field-selector string selector of nodes to reset, eg: --node-field-selector inputs.paramaters.myparam.value=abc
+ -o, --output string Output format. One of: name|json|yaml|wide
+ -p, --parameter stringArray input parameter to override on the original workflow spec
+ --restart-successful indicates to restart successful nodes matching the --node-field-selector
+ -l, --selector string Selector (label query) to filter on, not including uninitialized ones, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)
+ -w, --wait wait for the workflow to complete, only works when a single workflow is retried
+ --watch watch the workflow until it completes, only works when a single workflow is retried
+```
+
+### Options inherited from parent commands
+
+```
+ --argo-base-href string An path to use with HTTP client (e.g. due to BASE_HREF). Defaults to the ARGO_BASE_HREF environment variable.
+ --argo-http1 If true, use the HTTP client. Defaults to the ARGO_HTTP1 environment variable.
+ -s, --argo-server host:port API server host:port. e.g. localhost:2746. Defaults to the ARGO_SERVER environment variable.
+ --as string Username to impersonate for the operation
+ --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups.
+ --as-uid string UID to impersonate for the operation
+ --certificate-authority string Path to a cert file for the certificate authority
+ --client-certificate string Path to a client certificate file for TLS
+ --client-key string Path to a client key file for TLS
+ --cluster string The name of the kubeconfig cluster to use
+ --context string The name of the kubeconfig context to use
+ --gloglevel int Set the glog logging level
+ -H, --header strings Sets additional header to all requests made by Argo CLI. (Can be repeated multiple times to add multiple headers, also supports comma separated headers) Used only when either ARGO_HTTP1 or --argo-http1 is set to true.
+ --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
+ -k, --insecure-skip-verify If true, the Argo Server's certificate will not be checked for validity. This will make your HTTPS connections insecure. Defaults to the ARGO_INSECURE_SKIP_VERIFY environment variable.
+ --instanceid string submit with a specific controller's instance id label. Default to the ARGO_INSTANCEID environment variable.
+ --kubeconfig string Path to a kube config. Only required if out-of-cluster
+ --loglevel string Set the logging level. One of: debug|info|warn|error (default "info")
+ -n, --namespace string If present, the namespace scope for this CLI request
+ --password string Password for basic authentication to the API server
+ --proxy-url string If provided, this URL will be used to connect via proxy
+ --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
+ -e, --secure Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
+ --server string The address and port of the Kubernetes API server
+ --tls-server-name string If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
+ --token string Bearer token for authentication to the API server
+ --user string The name of the kubeconfig user to use
+ --username string Username for basic authentication to the API server
+ -v, --verbose Enabled verbose logging, i.e. --loglevel debug
+```
+
+### SEE ALSO
+
+* [argo archive](argo_archive.md) - manage the workflow archive
+
diff --git a/docs/cli/argo_auth.md b/docs/cli/argo_auth.md
index 4e035a0e6c33..e7ca7f211418 100644
--- a/docs/cli/argo_auth.md
+++ b/docs/cli/argo_auth.md
@@ -35,6 +35,7 @@ argo auth [flags]
--loglevel string Set the logging level. One of: debug|info|warn|error (default "info")
-n, --namespace string If present, the namespace scope for this CLI request
--password string Password for basic authentication to the API server
+ --proxy-url string If provided, this URL will be used to connect via proxy
--request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
-e, --secure Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
--server string The address and port of the Kubernetes API server
diff --git a/docs/cli/argo_auth_token.md b/docs/cli/argo_auth_token.md
index a507b8034a9f..28c320441e6c 100644
--- a/docs/cli/argo_auth_token.md
+++ b/docs/cli/argo_auth_token.md
@@ -35,6 +35,7 @@ argo auth token [flags]
--loglevel string Set the logging level. One of: debug|info|warn|error (default "info")
-n, --namespace string If present, the namespace scope for this CLI request
--password string Password for basic authentication to the API server
+ --proxy-url string If provided, this URL will be used to connect via proxy
--request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
-e, --secure Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
--server string The address and port of the Kubernetes API server
diff --git a/docs/cli/argo_cluster-template.md b/docs/cli/argo_cluster-template.md
index 73317ba79daf..5c6c8a5405c9 100644
--- a/docs/cli/argo_cluster-template.md
+++ b/docs/cli/argo_cluster-template.md
@@ -35,6 +35,7 @@ argo cluster-template [flags]
--loglevel string Set the logging level. One of: debug|info|warn|error (default "info")
-n, --namespace string If present, the namespace scope for this CLI request
--password string Password for basic authentication to the API server
+ --proxy-url string If provided, this URL will be used to connect via proxy
--request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
-e, --secure Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
--server string The address and port of the Kubernetes API server
diff --git a/docs/cli/argo_cluster-template_create.md b/docs/cli/argo_cluster-template_create.md
index 495b650896eb..4b8d7a40d89b 100644
--- a/docs/cli/argo_cluster-template_create.md
+++ b/docs/cli/argo_cluster-template_create.md
@@ -37,6 +37,7 @@ argo cluster-template create FILE1 FILE2... [flags]
--loglevel string Set the logging level. One of: debug|info|warn|error (default "info")
-n, --namespace string If present, the namespace scope for this CLI request
--password string Password for basic authentication to the API server
+ --proxy-url string If provided, this URL will be used to connect via proxy
--request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
-e, --secure Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
--server string The address and port of the Kubernetes API server
diff --git a/docs/cli/argo_cluster-template_delete.md b/docs/cli/argo_cluster-template_delete.md
index b5341da37782..99c8c2f9c5bc 100644
--- a/docs/cli/argo_cluster-template_delete.md
+++ b/docs/cli/argo_cluster-template_delete.md
@@ -36,6 +36,7 @@ argo cluster-template delete WORKFLOW_TEMPLATE [flags]
--loglevel string Set the logging level. One of: debug|info|warn|error (default "info")
-n, --namespace string If present, the namespace scope for this CLI request
--password string Password for basic authentication to the API server
+ --proxy-url string If provided, this URL will be used to connect via proxy
--request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
-e, --secure Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
--server string The address and port of the Kubernetes API server
diff --git a/docs/cli/argo_cluster-template_get.md b/docs/cli/argo_cluster-template_get.md
index 064a7b9cdbb6..89c297589cb9 100644
--- a/docs/cli/argo_cluster-template_get.md
+++ b/docs/cli/argo_cluster-template_get.md
@@ -36,6 +36,7 @@ argo cluster-template get CLUSTER WORKFLOW_TEMPLATE... [flags]
--loglevel string Set the logging level. One of: debug|info|warn|error (default "info")
-n, --namespace string If present, the namespace scope for this CLI request
--password string Password for basic authentication to the API server
+ --proxy-url string If provided, this URL will be used to connect via proxy
--request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
-e, --secure Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
--server string The address and port of the Kubernetes API server
diff --git a/docs/cli/argo_cluster-template_lint.md b/docs/cli/argo_cluster-template_lint.md
index 547dcc9030e9..49b6a79ad9c5 100644
--- a/docs/cli/argo_cluster-template_lint.md
+++ b/docs/cli/argo_cluster-template_lint.md
@@ -37,6 +37,7 @@ argo cluster-template lint FILE... [flags]
--loglevel string Set the logging level. One of: debug|info|warn|error (default "info")
-n, --namespace string If present, the namespace scope for this CLI request
--password string Password for basic authentication to the API server
+ --proxy-url string If provided, this URL will be used to connect via proxy
--request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
-e, --secure Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
--server string The address and port of the Kubernetes API server
diff --git a/docs/cli/argo_cluster-template_list.md b/docs/cli/argo_cluster-template_list.md
index b52018039b67..9526874ab320 100644
--- a/docs/cli/argo_cluster-template_list.md
+++ b/docs/cli/argo_cluster-template_list.md
@@ -36,6 +36,7 @@ argo cluster-template list [flags]
--loglevel string Set the logging level. One of: debug|info|warn|error (default "info")
-n, --namespace string If present, the namespace scope for this CLI request
--password string Password for basic authentication to the API server
+ --proxy-url string If provided, this URL will be used to connect via proxy
--request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
-e, --secure Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
--server string The address and port of the Kubernetes API server
diff --git a/docs/cli/argo_completion.md b/docs/cli/argo_completion.md
index c3eb79a3741f..136e4abdb49e 100644
--- a/docs/cli/argo_completion.md
+++ b/docs/cli/argo_completion.md
@@ -48,6 +48,7 @@ argo completion SHELL [flags]
--loglevel string Set the logging level. One of: debug|info|warn|error (default "info")
-n, --namespace string If present, the namespace scope for this CLI request
--password string Password for basic authentication to the API server
+ --proxy-url string If provided, this URL will be used to connect via proxy
--request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
-e, --secure Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
--server string The address and port of the Kubernetes API server
diff --git a/docs/cli/argo_cp.md b/docs/cli/argo_cp.md
new file mode 100644
index 000000000000..6cb6bcd0548c
--- /dev/null
+++ b/docs/cli/argo_cp.md
@@ -0,0 +1,69 @@
+## argo cp
+
+copy artifacts from workflow
+
+```
+argo cp my-wf output-directory ... [flags]
+```
+
+### Examples
+
+```
+# Copy a workflow's artifacts to a local output directory:
+
+ argo cp my-wf output-directory
+
+# Copy artifacts from a specific node in a workflow to a local output directory:
+
+ argo cp my-wf output-directory --node-id=my-wf-node-id-123
+
+```
+
+### Options
+
+```
+ --artifact-name string name of output artifact in workflow
+ -h, --help help for cp
+ --node-id string id of node in workflow
+ --path string use variables {workflowName}, {nodeId}, {templateName}, {artifactName}, and {namespace} to create a customized path to store the artifacts; example: {workflowName}/{templateName}/{artifactName} (default "{namespace}/{workflowName}/{nodeId}/outputs/{artifactName}")
+ --template-name string name of template in workflow
+```
+
+### Options inherited from parent commands
+
+```
+ --argo-base-href string An path to use with HTTP client (e.g. due to BASE_HREF). Defaults to the ARGO_BASE_HREF environment variable.
+ --argo-http1 If true, use the HTTP client. Defaults to the ARGO_HTTP1 environment variable.
+ -s, --argo-server host:port API server host:port. e.g. localhost:2746. Defaults to the ARGO_SERVER environment variable.
+ --as string Username to impersonate for the operation
+ --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups.
+ --as-uid string UID to impersonate for the operation
+ --certificate-authority string Path to a cert file for the certificate authority
+ --client-certificate string Path to a client certificate file for TLS
+ --client-key string Path to a client key file for TLS
+ --cluster string The name of the kubeconfig cluster to use
+ --context string The name of the kubeconfig context to use
+ --gloglevel int Set the glog logging level
+ -H, --header strings Sets additional header to all requests made by Argo CLI. (Can be repeated multiple times to add multiple headers, also supports comma separated headers) Used only when either ARGO_HTTP1 or --argo-http1 is set to true.
+ --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
+ -k, --insecure-skip-verify If true, the Argo Server's certificate will not be checked for validity. This will make your HTTPS connections insecure. Defaults to the ARGO_INSECURE_SKIP_VERIFY environment variable.
+ --instanceid string submit with a specific controller's instance id label. Default to the ARGO_INSTANCEID environment variable.
+ --kubeconfig string Path to a kube config. Only required if out-of-cluster
+ --loglevel string Set the logging level. One of: debug|info|warn|error (default "info")
+ -n, --namespace string If present, the namespace scope for this CLI request
+ --password string Password for basic authentication to the API server
+ --proxy-url string If provided, this URL will be used to connect via proxy
+ --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
+ -e, --secure Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
+ --server string The address and port of the Kubernetes API server
+ --tls-server-name string If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
+ --token string Bearer token for authentication to the API server
+ --user string The name of the kubeconfig user to use
+ --username string Username for basic authentication to the API server
+ -v, --verbose Enabled verbose logging, i.e. --loglevel debug
+```
+
+### SEE ALSO
+
+* [argo](argo.md) - argo is the command line interface to Argo
+
diff --git a/docs/cli/argo_cron.md b/docs/cli/argo_cron.md
index ef40dcd9816d..b4807f0382da 100644
--- a/docs/cli/argo_cron.md
+++ b/docs/cli/argo_cron.md
@@ -39,6 +39,7 @@ argo cron [flags]
--loglevel string Set the logging level. One of: debug|info|warn|error (default "info")
-n, --namespace string If present, the namespace scope for this CLI request
--password string Password for basic authentication to the API server
+ --proxy-url string If provided, this URL will be used to connect via proxy
--request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
-e, --secure Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
--server string The address and port of the Kubernetes API server
diff --git a/docs/cli/argo_cron_create.md b/docs/cli/argo_cron_create.md
index b6d260c3ef31..4964e04ef43b 100644
--- a/docs/cli/argo_cron_create.md
+++ b/docs/cli/argo_cron_create.md
@@ -45,6 +45,7 @@ argo cron create FILE1 FILE2... [flags]
--loglevel string Set the logging level. One of: debug|info|warn|error (default "info")
-n, --namespace string If present, the namespace scope for this CLI request
--password string Password for basic authentication to the API server
+ --proxy-url string If provided, this URL will be used to connect via proxy
--request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
-e, --secure Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
--server string The address and port of the Kubernetes API server
diff --git a/docs/cli/argo_cron_delete.md b/docs/cli/argo_cron_delete.md
index 00fd42543523..14352346708f 100644
--- a/docs/cli/argo_cron_delete.md
+++ b/docs/cli/argo_cron_delete.md
@@ -36,6 +36,7 @@ argo cron delete [CRON_WORKFLOW... | --all] [flags]
--loglevel string Set the logging level. One of: debug|info|warn|error (default "info")
-n, --namespace string If present, the namespace scope for this CLI request
--password string Password for basic authentication to the API server
+ --proxy-url string If provided, this URL will be used to connect via proxy
--request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
-e, --secure Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
--server string The address and port of the Kubernetes API server
diff --git a/docs/cli/argo_cron_get.md b/docs/cli/argo_cron_get.md
index d004f8703325..73badb4f7af9 100644
--- a/docs/cli/argo_cron_get.md
+++ b/docs/cli/argo_cron_get.md
@@ -36,6 +36,7 @@ argo cron get CRON_WORKFLOW... [flags]
--loglevel string Set the logging level. One of: debug|info|warn|error (default "info")
-n, --namespace string If present, the namespace scope for this CLI request
--password string Password for basic authentication to the API server
+ --proxy-url string If provided, this URL will be used to connect via proxy
--request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
-e, --secure Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
--server string The address and port of the Kubernetes API server
diff --git a/docs/cli/argo_cron_lint.md b/docs/cli/argo_cron_lint.md
index d03e32860450..85768db3cc27 100644
--- a/docs/cli/argo_cron_lint.md
+++ b/docs/cli/argo_cron_lint.md
@@ -37,6 +37,7 @@ argo cron lint FILE... [flags]
--loglevel string Set the logging level. One of: debug|info|warn|error (default "info")
-n, --namespace string If present, the namespace scope for this CLI request
--password string Password for basic authentication to the API server
+ --proxy-url string If provided, this URL will be used to connect via proxy
--request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
-e, --secure Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
--server string The address and port of the Kubernetes API server
diff --git a/docs/cli/argo_cron_list.md b/docs/cli/argo_cron_list.md
index db7161180d5d..09416e1fd673 100644
--- a/docs/cli/argo_cron_list.md
+++ b/docs/cli/argo_cron_list.md
@@ -37,6 +37,7 @@ argo cron list [flags]
--loglevel string Set the logging level. One of: debug|info|warn|error (default "info")
-n, --namespace string If present, the namespace scope for this CLI request
--password string Password for basic authentication to the API server
+ --proxy-url string If provided, this URL will be used to connect via proxy
--request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
-e, --secure Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
--server string The address and port of the Kubernetes API server
diff --git a/docs/cli/argo_cron_resume.md b/docs/cli/argo_cron_resume.md
index 54b4fdc728c1..d02698edef36 100644
--- a/docs/cli/argo_cron_resume.md
+++ b/docs/cli/argo_cron_resume.md
@@ -35,6 +35,7 @@ argo cron resume [CRON_WORKFLOW...] [flags]
--loglevel string Set the logging level. One of: debug|info|warn|error (default "info")
-n, --namespace string If present, the namespace scope for this CLI request
--password string Password for basic authentication to the API server
+ --proxy-url string If provided, this URL will be used to connect via proxy
--request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
-e, --secure Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
--server string The address and port of the Kubernetes API server
diff --git a/docs/cli/argo_cron_suspend.md b/docs/cli/argo_cron_suspend.md
index 23cffda09004..78ccaf259cf5 100644
--- a/docs/cli/argo_cron_suspend.md
+++ b/docs/cli/argo_cron_suspend.md
@@ -35,6 +35,7 @@ argo cron suspend CRON_WORKFLOW... [flags]
--loglevel string Set the logging level. One of: debug|info|warn|error (default "info")
-n, --namespace string If present, the namespace scope for this CLI request
--password string Password for basic authentication to the API server
+ --proxy-url string If provided, this URL will be used to connect via proxy
--request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
-e, --secure Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
--server string The address and port of the Kubernetes API server
diff --git a/docs/cli/argo_delete.md b/docs/cli/argo_delete.md
index c679bd9b740e..f1e2bada46ca 100644
--- a/docs/cli/argo_delete.md
+++ b/docs/cli/argo_delete.md
@@ -3,7 +3,7 @@
delete workflows
```
-argo delete [--dry-run] [WORKFLOW...|[--all] [--older] [--completed] [--resubmitted] [--prefix PREFIX] [--selector SELECTOR]] [flags]
+argo delete [--dry-run] [WORKFLOW...|[--all] [--older] [--completed] [--resubmitted] [--prefix PREFIX] [--selector SELECTOR] [--force] ] [flags]
```
### Examples
@@ -26,7 +26,8 @@ argo delete [--dry-run] [WORKFLOW...|[--all] [--older] [--completed] [--resubmit
-A, --all-namespaces Delete workflows from all namespaces
--completed Delete completed workflows
--dry-run Do not delete the workflow, only print what would happen
- --field-selector string Selector (field query) to filter on, supports '=', '==', and '!='.(e.g. --field-selectorkey1=value1,key2=value2). The server only supports a limited number of field queries per type.
+ --field-selector string Selector (field query) to filter on, supports '=', '==', and '!='.(e.g. --field-selector key1=value1,key2=value2). The server only supports a limited number of field queries per type.
+ --force Force delete workflows by removing finalizers
-h, --help help for delete
--older string Delete completed workflows finished before the specified duration (e.g. 10m, 3h, 1d)
--prefix string Delete workflows by prefix
@@ -57,6 +58,7 @@ argo delete [--dry-run] [WORKFLOW...|[--all] [--older] [--completed] [--resubmit
--loglevel string Set the logging level. One of: debug|info|warn|error (default "info")
-n, --namespace string If present, the namespace scope for this CLI request
--password string Password for basic authentication to the API server
+ --proxy-url string If provided, this URL will be used to connect via proxy
--request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
-e, --secure Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
--server string The address and port of the Kubernetes API server
diff --git a/docs/cli/argo_executor-plugin.md b/docs/cli/argo_executor-plugin.md
index 5846f6eb4cac..df57b63e03cc 100644
--- a/docs/cli/argo_executor-plugin.md
+++ b/docs/cli/argo_executor-plugin.md
@@ -35,6 +35,7 @@ argo executor-plugin [flags]
--loglevel string Set the logging level. One of: debug|info|warn|error (default "info")
-n, --namespace string If present, the namespace scope for this CLI request
--password string Password for basic authentication to the API server
+ --proxy-url string If provided, this URL will be used to connect via proxy
--request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
-e, --secure Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
--server string The address and port of the Kubernetes API server
diff --git a/docs/cli/argo_executor-plugin_build.md b/docs/cli/argo_executor-plugin_build.md
index ce2b34a4227b..6134508d21ac 100644
--- a/docs/cli/argo_executor-plugin_build.md
+++ b/docs/cli/argo_executor-plugin_build.md
@@ -35,6 +35,7 @@ argo executor-plugin build DIR [flags]
--loglevel string Set the logging level. One of: debug|info|warn|error (default "info")
-n, --namespace string If present, the namespace scope for this CLI request
--password string Password for basic authentication to the API server
+ --proxy-url string If provided, this URL will be used to connect via proxy
--request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
-e, --secure Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
--server string The address and port of the Kubernetes API server
diff --git a/docs/cli/argo_get.md b/docs/cli/argo_get.md
index b79dc96e5839..72fc870492d3 100644
--- a/docs/cli/argo_get.md
+++ b/docs/cli/argo_get.md
@@ -52,6 +52,7 @@ argo get WORKFLOW... [flags]
--loglevel string Set the logging level. One of: debug|info|warn|error (default "info")
-n, --namespace string If present, the namespace scope for this CLI request
--password string Password for basic authentication to the API server
+ --proxy-url string If provided, this URL will be used to connect via proxy
--request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
-e, --secure Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
--server string The address and port of the Kubernetes API server
diff --git a/docs/cli/argo_lint.md b/docs/cli/argo_lint.md
index 8b4431daa9b4..3bab859412c3 100644
--- a/docs/cli/argo_lint.md
+++ b/docs/cli/argo_lint.md
@@ -52,6 +52,7 @@ argo lint FILE... [flags]
--loglevel string Set the logging level. One of: debug|info|warn|error (default "info")
-n, --namespace string If present, the namespace scope for this CLI request
--password string Password for basic authentication to the API server
+ --proxy-url string If provided, this URL will be used to connect via proxy
--request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
-e, --secure Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
--server string The address and port of the Kubernetes API server
diff --git a/docs/cli/argo_list.md b/docs/cli/argo_list.md
index 11cea8093726..d8d619c3b182 100644
--- a/docs/cli/argo_list.md
+++ b/docs/cli/argo_list.md
@@ -12,7 +12,7 @@ argo list [flags]
-A, --all-namespaces Show workflows from all namespaces
--chunk-size int Return large lists in chunks rather than all at once. Pass 0 to disable.
--completed Show completed workflows. Mutually exclusive with --running.
- --field-selector string Selector (field query) to filter on, supports '=', '==', and '!='.(e.g. --field-selectorkey1=value1,key2=value2). The server only supports a limited number of field queries per type.
+ --field-selector string Selector (field query) to filter on, supports '=', '==', and '!='.(e.g. --field-selector key1=value1,key2=value2). The server only supports a limited number of field queries per type.
-h, --help help for list
--no-headers Don't print headers (default print headers).
--older string List completed workflows finished before the specified duration (e.g. 10m, 3h, 1d)
@@ -48,6 +48,7 @@ argo list [flags]
--loglevel string Set the logging level. One of: debug|info|warn|error (default "info")
-n, --namespace string If present, the namespace scope for this CLI request
--password string Password for basic authentication to the API server
+ --proxy-url string If provided, this URL will be used to connect via proxy
--request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
-e, --secure Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
--server string The address and port of the Kubernetes API server
diff --git a/docs/cli/argo_logs.md b/docs/cli/argo_logs.md
index 14d0c595006b..fc9f9ffb8a64 100644
--- a/docs/cli/argo_logs.md
+++ b/docs/cli/argo_logs.md
@@ -77,6 +77,7 @@ argo logs WORKFLOW [POD] [flags]
--loglevel string Set the logging level. One of: debug|info|warn|error (default "info")
-n, --namespace string If present, the namespace scope for this CLI request
--password string Password for basic authentication to the API server
+ --proxy-url string If provided, this URL will be used to connect via proxy
--request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
-e, --secure Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
--server string The address and port of the Kubernetes API server
diff --git a/docs/cli/argo_node.md b/docs/cli/argo_node.md
index 18c2952c806f..d291e1eccf19 100644
--- a/docs/cli/argo_node.md
+++ b/docs/cli/argo_node.md
@@ -52,6 +52,7 @@ argo node ACTION WORKFLOW FLAGS [flags]
--loglevel string Set the logging level. One of: debug|info|warn|error (default "info")
-n, --namespace string If present, the namespace scope for this CLI request
--password string Password for basic authentication to the API server
+ --proxy-url string If provided, this URL will be used to connect via proxy
--request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
-e, --secure Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
--server string The address and port of the Kubernetes API server
diff --git a/docs/cli/argo_resubmit.md b/docs/cli/argo_resubmit.md
index 2f260acc0597..0d32b3e27e35 100644
--- a/docs/cli/argo_resubmit.md
+++ b/docs/cli/argo_resubmit.md
@@ -51,6 +51,7 @@ argo resubmit [WORKFLOW...] [flags]
--log log the workflow until it completes
--memoized re-use successful steps & outputs from the previous run
-o, --output string Output format. One of: name|json|yaml|wide
+ -p, --parameter stringArray input parameter to override on the original workflow spec
--priority int32 workflow priority
-l, --selector string Selector (label query) to filter on, not including uninitialized ones, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)
-w, --wait wait for the workflow to complete, only works when a single workflow is resubmitted
@@ -80,6 +81,7 @@ argo resubmit [WORKFLOW...] [flags]
--loglevel string Set the logging level. One of: debug|info|warn|error (default "info")
-n, --namespace string If present, the namespace scope for this CLI request
--password string Password for basic authentication to the API server
+ --proxy-url string If provided, this URL will be used to connect via proxy
--request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
-e, --secure Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
--server string The address and port of the Kubernetes API server
diff --git a/docs/cli/argo_resume.md b/docs/cli/argo_resume.md
index 95be9120d8be..70c400ccad09 100644
--- a/docs/cli/argo_resume.md
+++ b/docs/cli/argo_resume.md
@@ -48,6 +48,7 @@ argo resume WORKFLOW1 WORKFLOW2... [flags]
--loglevel string Set the logging level. One of: debug|info|warn|error (default "info")
-n, --namespace string If present, the namespace scope for this CLI request
--password string Password for basic authentication to the API server
+ --proxy-url string If provided, this URL will be used to connect via proxy
--request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
-e, --secure Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
--server string The address and port of the Kubernetes API server
diff --git a/docs/cli/argo_retry.md b/docs/cli/argo_retry.md
index ca40841d65de..dd8670100255 100644
--- a/docs/cli/argo_retry.md
+++ b/docs/cli/argo_retry.md
@@ -51,6 +51,7 @@ argo retry [WORKFLOW...] [flags]
--log log the workflow until it completes
--node-field-selector string selector of nodes to reset, eg: --node-field-selector inputs.paramaters.myparam.value=abc
-o, --output string Output format. One of: name|json|yaml|wide
+ -p, --parameter stringArray input parameter to override on the original workflow spec
--restart-successful indicates to restart successful nodes matching the --node-field-selector
-l, --selector string Selector (label query) to filter on, not including uninitialized ones, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)
-w, --wait wait for the workflow to complete, only works when a single workflow is retried
@@ -80,6 +81,7 @@ argo retry [WORKFLOW...] [flags]
--loglevel string Set the logging level. One of: debug|info|warn|error (default "info")
-n, --namespace string If present, the namespace scope for this CLI request
--password string Password for basic authentication to the API server
+ --proxy-url string If provided, this URL will be used to connect via proxy
--request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
-e, --secure Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
--server string The address and port of the Kubernetes API server
diff --git a/docs/cli/argo_server.md b/docs/cli/argo_server.md
index 7d101d8a11c7..562133e1e66d 100644
--- a/docs/cli/argo_server.md
+++ b/docs/cli/argo_server.md
@@ -17,6 +17,8 @@ See https://argoproj.github.io/argo-workflows/argo-server/
```
--access-control-allow-origin string Set Access-Control-Allow-Origin header in HTTP responses.
+ --allowed-link-protocol stringArray Allowed link protocol in configMap. Used if the allowed configMap links protocol are different from http,https. Defaults to the environment variable ALLOWED_LINK_PROTOCOL (default [http,https])
+ --api-rate-limit uint Set limit per IP for api ratelimiter (default 1000)
--auth-mode stringArray API server authentication mode. Any 1 or more length permutation of: client,server,sso (default [client])
--basehref string Value for base href in index.html. Used if the server is running behind reverse proxy under subpath different from /. Defaults to the environment variable BASE_HREF. (default "/")
-b, --browser enable automatic launching of the browser [local mode]
@@ -30,7 +32,6 @@ See https://argoproj.github.io/argo-workflows/argo-server/
--managed-namespace string namespace that watches, default to the installation namespace
--namespaced run as namespaced mode
-p, --port int Port to listen on (default 2746)
- --sso-namespace string namespace that will be used for SSO RBAC. Defaults to installation namespace. Used only in namespaced mode
--x-frame-options string Set X-Frame-Options header in HTTP responses. (default "DENY")
```
@@ -57,6 +58,7 @@ See https://argoproj.github.io/argo-workflows/argo-server/
--loglevel string Set the logging level. One of: debug|info|warn|error (default "info")
-n, --namespace string If present, the namespace scope for this CLI request
--password string Password for basic authentication to the API server
+ --proxy-url string If provided, this URL will be used to connect via proxy
--request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
-e, --secure Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
--server string The address and port of the Kubernetes API server
diff --git a/docs/cli/argo_stop.md b/docs/cli/argo_stop.md
index 95818ce0783b..c39c73b9d33a 100644
--- a/docs/cli/argo_stop.md
+++ b/docs/cli/argo_stop.md
@@ -61,6 +61,7 @@ argo stop WORKFLOW WORKFLOW2... [flags]
--loglevel string Set the logging level. One of: debug|info|warn|error (default "info")
-n, --namespace string If present, the namespace scope for this CLI request
--password string Password for basic authentication to the API server
+ --proxy-url string If provided, this URL will be used to connect via proxy
--request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
-e, --secure Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
--server string The address and port of the Kubernetes API server
diff --git a/docs/cli/argo_submit.md b/docs/cli/argo_submit.md
index d943b03eac54..823c427efa6b 100644
--- a/docs/cli/argo_submit.md
+++ b/docs/cli/argo_submit.md
@@ -79,6 +79,7 @@ argo submit [FILE... | --from `kind/name] [flags]
--loglevel string Set the logging level. One of: debug|info|warn|error (default "info")
-n, --namespace string If present, the namespace scope for this CLI request
--password string Password for basic authentication to the API server
+ --proxy-url string If provided, this URL will be used to connect via proxy
--request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
-e, --secure Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
--server string The address and port of the Kubernetes API server
diff --git a/docs/cli/argo_suspend.md b/docs/cli/argo_suspend.md
index a83352431403..daa8119ff96b 100644
--- a/docs/cli/argo_suspend.md
+++ b/docs/cli/argo_suspend.md
@@ -47,6 +47,7 @@ argo suspend WORKFLOW1 WORKFLOW2... [flags]
--loglevel string Set the logging level. One of: debug|info|warn|error (default "info")
-n, --namespace string If present, the namespace scope for this CLI request
--password string Password for basic authentication to the API server
+ --proxy-url string If provided, this URL will be used to connect via proxy
--request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
-e, --secure Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
--server string The address and port of the Kubernetes API server
diff --git a/docs/cli/argo_template.md b/docs/cli/argo_template.md
index a56eb17b6d94..bbfc6a438c8d 100644
--- a/docs/cli/argo_template.md
+++ b/docs/cli/argo_template.md
@@ -35,6 +35,7 @@ argo template [flags]
--loglevel string Set the logging level. One of: debug|info|warn|error (default "info")
-n, --namespace string If present, the namespace scope for this CLI request
--password string Password for basic authentication to the API server
+ --proxy-url string If provided, this URL will be used to connect via proxy
--request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
-e, --secure Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
--server string The address and port of the Kubernetes API server
diff --git a/docs/cli/argo_template_create.md b/docs/cli/argo_template_create.md
index 048f702a8660..953f408f3be8 100644
--- a/docs/cli/argo_template_create.md
+++ b/docs/cli/argo_template_create.md
@@ -37,6 +37,7 @@ argo template create FILE1 FILE2... [flags]
--loglevel string Set the logging level. One of: debug|info|warn|error (default "info")
-n, --namespace string If present, the namespace scope for this CLI request
--password string Password for basic authentication to the API server
+ --proxy-url string If provided, this URL will be used to connect via proxy
--request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
-e, --secure Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
--server string The address and port of the Kubernetes API server
diff --git a/docs/cli/argo_template_delete.md b/docs/cli/argo_template_delete.md
index 22cad2abc9c9..74c8077b4494 100644
--- a/docs/cli/argo_template_delete.md
+++ b/docs/cli/argo_template_delete.md
@@ -36,6 +36,7 @@ argo template delete WORKFLOW_TEMPLATE [flags]
--loglevel string Set the logging level. One of: debug|info|warn|error (default "info")
-n, --namespace string If present, the namespace scope for this CLI request
--password string Password for basic authentication to the API server
+ --proxy-url string If provided, this URL will be used to connect via proxy
--request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
-e, --secure Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
--server string The address and port of the Kubernetes API server
diff --git a/docs/cli/argo_template_get.md b/docs/cli/argo_template_get.md
index 68f65264f743..ee0f42f4751c 100644
--- a/docs/cli/argo_template_get.md
+++ b/docs/cli/argo_template_get.md
@@ -36,6 +36,7 @@ argo template get WORKFLOW_TEMPLATE... [flags]
--loglevel string Set the logging level. One of: debug|info|warn|error (default "info")
-n, --namespace string If present, the namespace scope for this CLI request
--password string Password for basic authentication to the API server
+ --proxy-url string If provided, this URL will be used to connect via proxy
--request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
-e, --secure Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
--server string The address and port of the Kubernetes API server
diff --git a/docs/cli/argo_template_lint.md b/docs/cli/argo_template_lint.md
index c3f9898507e2..65ac604d8a77 100644
--- a/docs/cli/argo_template_lint.md
+++ b/docs/cli/argo_template_lint.md
@@ -37,6 +37,7 @@ argo template lint (DIRECTORY | FILE1 FILE2 FILE3...) [flags]
--loglevel string Set the logging level. One of: debug|info|warn|error (default "info")
-n, --namespace string If present, the namespace scope for this CLI request
--password string Password for basic authentication to the API server
+ --proxy-url string If provided, this URL will be used to connect via proxy
--request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
-e, --secure Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
--server string The address and port of the Kubernetes API server
diff --git a/docs/cli/argo_template_list.md b/docs/cli/argo_template_list.md
index 5bf7d97d5852..a1dbf6aa5397 100644
--- a/docs/cli/argo_template_list.md
+++ b/docs/cli/argo_template_list.md
@@ -37,6 +37,7 @@ argo template list [flags]
--loglevel string Set the logging level. One of: debug|info|warn|error (default "info")
-n, --namespace string If present, the namespace scope for this CLI request
--password string Password for basic authentication to the API server
+ --proxy-url string If provided, this URL will be used to connect via proxy
--request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
-e, --secure Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
--server string The address and port of the Kubernetes API server
diff --git a/docs/cli/argo_terminate.md b/docs/cli/argo_terminate.md
index 1e986ff3770b..463c84c38b02 100644
--- a/docs/cli/argo_terminate.md
+++ b/docs/cli/argo_terminate.md
@@ -31,7 +31,7 @@ argo terminate WORKFLOW WORKFLOW2... [flags]
```
--dry-run Do not terminate the workflow, only print what would happen
- --field-selector string Selector (field query) to filter on, supports '=', '==', and '!='.(e.g. --field-selectorkey1=value1,key2=value2). The server only supports a limited number of field queries per type.
+ --field-selector string Selector (field query) to filter on, supports '=', '==', and '!='.(e.g. --field-selector key1=value1,key2=value2). The server only supports a limited number of field queries per type.
-h, --help help for terminate
-l, --selector string Selector (label query) to filter on, not including uninitialized ones, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)
```
@@ -59,6 +59,7 @@ argo terminate WORKFLOW WORKFLOW2... [flags]
--loglevel string Set the logging level. One of: debug|info|warn|error (default "info")
-n, --namespace string If present, the namespace scope for this CLI request
--password string Password for basic authentication to the API server
+ --proxy-url string If provided, this URL will be used to connect via proxy
--request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
-e, --secure Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
--server string The address and port of the Kubernetes API server
diff --git a/docs/cli/argo_version.md b/docs/cli/argo_version.md
index 5325fdcaa648..a780cc7483d6 100644
--- a/docs/cli/argo_version.md
+++ b/docs/cli/argo_version.md
@@ -36,6 +36,7 @@ argo version [flags]
--loglevel string Set the logging level. One of: debug|info|warn|error (default "info")
-n, --namespace string If present, the namespace scope for this CLI request
--password string Password for basic authentication to the API server
+ --proxy-url string If provided, this URL will be used to connect via proxy
--request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
-e, --secure Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
--server string The address and port of the Kubernetes API server
diff --git a/docs/cli/argo_wait.md b/docs/cli/argo_wait.md
index 359e6c7fd231..76d5d459afc6 100644
--- a/docs/cli/argo_wait.md
+++ b/docs/cli/argo_wait.md
@@ -49,6 +49,7 @@ argo wait [WORKFLOW...] [flags]
--loglevel string Set the logging level. One of: debug|info|warn|error (default "info")
-n, --namespace string If present, the namespace scope for this CLI request
--password string Password for basic authentication to the API server
+ --proxy-url string If provided, this URL will be used to connect via proxy
--request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
-e, --secure Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
--server string The address and port of the Kubernetes API server
diff --git a/docs/cli/argo_watch.md b/docs/cli/argo_watch.md
index a3b307c8e092..3ff9c999a5c6 100644
--- a/docs/cli/argo_watch.md
+++ b/docs/cli/argo_watch.md
@@ -50,6 +50,7 @@ argo watch WORKFLOW [flags]
--loglevel string Set the logging level. One of: debug|info|warn|error (default "info")
-n, --namespace string If present, the namespace scope for this CLI request
--password string Password for basic authentication to the API server
+ --proxy-url string If provided, this URL will be used to connect via proxy
--request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
-e, --secure Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
--server string The address and port of the Kubernetes API server
diff --git a/docs/client-libraries.md b/docs/client-libraries.md
index e30203281244..15b62236ecf9 100644
--- a/docs/client-libraries.md
+++ b/docs/client-libraries.md
@@ -11,18 +11,17 @@ Client libraries often handle common tasks such as authentication for you.
The following client libraries are officially maintained by the Argo team.
-| Language | Client Library | Examples/Docs |
-|----------|----------------|---------------|
-| Golang | [apiclient.go](https://github.com/argoproj/argo-workflows/blob/master/pkg/apiclient/apiclient.go) | [Example](https://github.com/argoproj/argo-workflows/blob/master/cmd/argo/commands/submit.go)
-| Java | [Java](https://github.com/argoproj/argo-workflows/blob/master/sdks/java) | |
-| Python | [Python](https://github.com/argoproj/argo-workflows/blob/master/sdks/python) | [Examples](https://github.com/argoproj/argo-workflows/tree/master/sdks/python/examples)/[Docs](https://github.com/argoproj/argo-workflows/tree/master/sdks/python/client/docs) |
+| Language | Client Library | Examples/Docs |
+|----------|-----------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------|
+| Golang | [`apiclient.go`](https://github.com/argoproj/argo-workflows/blob/master/pkg/apiclient/apiclient.go) | [Example](https://github.com/argoproj/argo-workflows/blob/master/cmd/argo/commands/submit.go) |
+| Java | [Java](https://github.com/argoproj/argo-workflows/blob/master/sdks/java) | |
+| Python | [Python](https://github.com/argoproj/argo-workflows/blob/master/sdks/python) | |
## Community-maintained client libraries
The following client libraries are provided and maintained by their authors, not the Argo team.
-| Language | Client Library | Examples/Docs |
-|----------|----------------|---------------|
-| Python | [Couler](https://github.com/couler-proj/couler) | Multi-workflow engine support Python SDK |
-| Python | [Hera](https://github.com/argoproj-labs/hera-workflows) | Easy and accessible Argo workflows construction and submission in Python |
-
+| Language | Client Library | Examples/Docs |
+|----------|---------------------------------------------------------|--------------------------------------------------------------------------|
+| Python | [Couler](https://github.com/couler-proj/couler) | Multi-workflow engine support Python SDK |
+| Python | [Hera](https://github.com/argoproj-labs/hera-workflows) | Easy and accessible Argo workflows construction and submission in Python |
diff --git a/docs/cluster-workflow-templates.md b/docs/cluster-workflow-templates.md
index fe6541df4078..a56070f61909 100644
--- a/docs/cluster-workflow-templates.md
+++ b/docs/cluster-workflow-templates.md
@@ -4,8 +4,8 @@
## Introduction
-`ClusterWorkflowTemplates` are cluster scoped `WorkflowTemplates`. `ClusterWorkflowTemplate`
-can be created cluster scoped like `ClusterRole` and can be accessed all namespaces in the cluster.
+`ClusterWorkflowTemplates` are cluster scoped `WorkflowTemplates`. `ClusterWorkflowTemplate`
+can be created cluster scoped like `ClusterRole` and can be accessed across all namespaces in the cluster.
`WorkflowTemplates` documentation [link](./workflow-templates.md)
@@ -30,11 +30,11 @@ spec:
## Referencing other `ClusterWorkflowTemplates`
-You can reference `templates` from another `ClusterWorkflowTemplates` using a `templateRef` field with `clusterScope: true` .
+You can reference `templates` from other `ClusterWorkflowTemplates` using a `templateRef` field with `clusterScope: true` .
Just as how you reference other `templates` within the same `Workflow`, you should do so from a `steps` or `dag` template.
Here is an example:
-More examples []()
+
```yaml
apiVersion: argoproj.io/v1alpha1
kind: Workflow
@@ -55,18 +55,22 @@ spec:
- name: message
value: "hello world"
```
+
> 2.9 and after
-#### Create `Workflow` from `ClusterWorkflowTemplate` Spec
-You can create `Workflow` from `ClusterWorkflowTemplate` spec using `workflowTemplateRef` with `clusterScope: true`. If you pass the arguments to created `Workflow`, it will be merged with ClusterWorkflowTemplate arguments
+
+### Create `Workflow` from `ClusterWorkflowTemplate` Spec
+
+You can create `Workflow` from `ClusterWorkflowTemplate` spec using `workflowTemplateRef` with `clusterScope: true`. If you pass the arguments to created `Workflow`, it will be merged with cluster workflow template arguments
Here is an example for `ClusterWorkflowTemplate` with `entrypoint` and `arguments`
+
```yaml
apiVersion: argoproj.io/v1alpha1
kind: ClusterWorkflowTemplate
metadata:
name: cluster-workflow-template-submittable
spec:
- entryPoint: whalesay-template
+ entrypoint: whalesay-template
arguments:
parameters:
- name: message
@@ -82,7 +86,9 @@ spec:
args: ["{{inputs.parameters.message}}"]
```
+
Here is an example for creating `ClusterWorkflowTemplate` as Workflow with passing `entrypoint` and `arguments` to `ClusterWorkflowTemplate`
+
```yaml
apiVersion: argoproj.io/v1alpha1
kind: Workflow
@@ -100,6 +106,7 @@ spec:
```
Here is an example of a creating `WorkflowTemplate` as Workflow and using `WorkflowTemplates`'s `entrypoint` and `Workflow Arguments`
+
```yaml
apiVersion: argoproj.io/v1alpha1
kind: Workflow
@@ -112,28 +119,27 @@ spec:
```
-
-
## Managing `ClusterWorkflowTemplates`
### CLI
You can create some example templates as follows:
-```
+```bash
argo cluster-template create https://raw.githubusercontent.com/argoproj/argo-workflows/master/examples/cluster-workflow-template/clustertemplates.yaml
```
The submit a workflow using one of those templates:
-```
+```bash
argo submit https://raw.githubusercontent.com/argoproj/argo-workflows/master/examples/cluster-workflow-template/cluster-wftmpl-dag.yaml
```
> 2.7 and after
>
The submit a `ClusterWorkflowTemplate` as a `Workflow`:
-```sh
+
+```bash
argo submit --from clusterworkflowtemplate/workflow-template-submittable
```
diff --git a/docs/conditional-artifacts-parameters.md b/docs/conditional-artifacts-parameters.md
index 2a10d6a166a5..3b33c843f520 100644
--- a/docs/conditional-artifacts-parameters.md
+++ b/docs/conditional-artifacts-parameters.md
@@ -33,7 +33,7 @@ under step/DAG level output parameter. Both use the
## Conditional Parameters
-```yaml
+```yaml
- name: coinflip
steps:
- - name: flip-coin
@@ -61,12 +61,12 @@ Convenient functions added to support more use cases:
1. `asInt` - convert the string to integer (e.g: `asInt('1')`)
2. `asFloat` - convert the string to Float (e.g: `asFloat('1.23')`)
3. `string` - convert the int/float to string (e.g: `string(1)`)
-4. `jsonpath` - Extract the element from Json using jsonpath (
+4. `jsonpath` - Extract the element from JSON using JSON Path (
e.g: `jsonpath('{"employee":{"name":"sonoo","salary":56000,"married":true}}", "$.employee.name" )` )
5. [Sprig](http://masterminds.github.io/sprig/) - Support all `sprig` functions
* [Advanced example: fibonacci Sequence](https://raw.githubusercontent.com/argoproj/argo-workflows/master/examples/fibonacci-seq-conditional-param.yaml)
-!!! NOTE
+!!! NOTE
Expressions will decode the `-` as operator if template name has `-`, it will fail the expression. So here solution
for template name which has `-` in its name. `step['one-two-three'].outputs.artifacts`
diff --git a/docs/configure-archive-logs.md b/docs/configure-archive-logs.md
index f6b6f66fb5e5..023f66366c4b 100644
--- a/docs/configure-archive-logs.md
+++ b/docs/configure-archive-logs.md
@@ -1,25 +1,27 @@
# Configuring Archive Logs
-To enable automatic pipeline logging, you need to configure ***archiveLogs*** at workflow-controller configmap, workflow spec, or template level. You also need to configure [Artifact Repository](configure-artifact-repository.md) to define where this logging artifact is stored.
+⚠️ We do not recommend you rely on Argo Workflows to archive logs. Instead, use a conventional Kubernetes logging facility.
+
+To enable automatic pipeline logging, you need to configure `archiveLogs` at workflow-controller config-map, workflow spec, or template level. You also need to configure [Artifact Repository](configure-artifact-repository.md) to define where this logging artifact is stored.
Archive logs follows priorities:
workflow-controller config (on) > workflow spec (on/off) > template (on/off)
-| Controller Configmap | Workflow Spec | Template | are we archiving logs? |
-|---|---|---|---|
-| true | true | true | true |
-| true | true | false | true |
-| true | false | true | true |
-| true | false | false | true |
-| false | true | true | true |
-| false | true | false | false |
-| false | false | true | true |
-| false | false | false | false |
+| Controller Config Map | Workflow Spec | Template | are we archiving logs? |
+|-----------------------|---------------|----------|------------------------|
+| true | true | true | true |
+| true | true | false | true |
+| true | false | true | true |
+| true | false | false | true |
+| false | true | true | true |
+| false | true | false | false |
+| false | false | true | true |
+| false | false | false | false |
-## Configuring Workflow Controller Configmap
+## Configuring Workflow Controller Config Map
-See [Workflow Controller Configmap](workflow-controller-configmap.md)
+See [Workflow Controller Config Map](workflow-controller-configmap.md)
## Configuring Workflow Spec
diff --git a/docs/configure-artifact-repository.md b/docs/configure-artifact-repository.md
index 8cb109215d8b..41a442a4eff6 100644
--- a/docs/configure-artifact-repository.md
+++ b/docs/configure-artifact-repository.md
@@ -2,12 +2,13 @@
To run Argo workflows that use artifacts, you must configure and use an artifact
repository. Argo supports any S3 compatible artifact repository such as AWS, GCS
-and Minio. This section shows how to configure the artifact repository.
+and MinIO. This section shows how to configure the artifact repository.
Subsequent sections will show how to use it.
| Name | Inputs | Outputs | Usage (Feb 2020) |
|---|---|---|---|
| Artifactory | Yes | Yes | 11% |
+| Azure Blob | Yes | Yes | - |
| GCS | Yes | Yes | - |
| Git | Yes | No | - |
| HDFS | Yes | Yes | 3% |
@@ -19,39 +20,41 @@ Subsequent sections will show how to use it.
The actual repository used by a workflow is chosen by the following rules:
1. Anything explicitly configured using [Artifact Repository Ref](artifact-repository-ref.md). This is the most flexible, safe, and secure option.
-2. From a config map named `artifact-repositories` if it has the `workflows.argoproj.io/default-artifact-repository` annotation in the workflow's namespace.
-3. From a workflow controller configmap.
+2. From a config map named `artifact-repositories` if it has the `workflows.argoproj.io/default-artifact-repository` annotation in the workflow's namespace.
+3. From a workflow controller config-map.
-## Configuring Minio
+## Configuring MinIO
-```
-$ brew install helm # mac, helm 3.x
-$ helm repo add minio https://helm.min.io/ # official minio Helm charts
-$ helm repo update
-$ helm install argo-artifacts minio/minio --set service.type=LoadBalancer --set fullnameOverride=argo-artifacts
+NOTE: MinIO is already included in the [quick-start manifests](quick-start.md).
+
+```bash
+brew install helm # mac, helm 3.x
+helm repo add minio https://helm.min.io/ # official minio Helm charts
+helm repo update
+helm install argo-artifacts minio/minio --set service.type=LoadBalancer --set fullnameOverride=argo-artifacts
```
-Login to the Minio UI using a web browser (port 9000) after obtaining the
+Login to the MinIO UI using a web browser (port 9000) after obtaining the
external IP using `kubectl`.
-```
-$ kubectl get service argo-artifacts
+```bash
+kubectl get service argo-artifacts
```
On Minikube:
-```
-$ minikube service --url argo-artifacts
+```bash
+minikube service --url argo-artifacts
```
-NOTE: When minio is installed via Helm, it generates
+NOTE: When MinIO is installed via Helm, it generates
credentials, which you will use to login to the UI:
Use the commands shown below to see the credentials
-- AccessKey: kubectl get secret argo-artifacts -o jsonpath='{.data.accesskey}' | base64 --decode
-- SecretKey: kubectl get secret argo-artifacts -o jsonpath='{.data.secretkey}' | base64 --decode
+- `AccessKey`: `kubectl get secret argo-artifacts -o jsonpath='{.data.accesskey}' | base64 --decode`
+- `SecretKey`: `kubectl get secret argo-artifacts -o jsonpath='{.data.secretkey}' | base64 --decode`
-Create a bucket named `my-bucket` from the Minio UI.
+Create a bucket named `my-bucket` from the MinIO UI.
## Configuring AWS S3
@@ -62,7 +65,7 @@ an access key, you will need to create a user with just the permissions you want
to associate with the access key. Otherwise, you can just create an access key
using your existing user account.
-```
+```bash
$ export mybucket=bucket249
$ cat > policy.json < policy.json < access-key.json
```
+If you have Artifact Garbage Collection configured, you should also add "s3:DeleteObject" to the list of Actions above.
+
NOTE: if you want argo to figure out which region your buckets belong in, you
must additionally set the following statement policy. Otherwise, you must
specify a bucket region in your workflow configuration.
-```
- ...
+```json
{
"Effect":"Allow",
"Action":[
@@ -104,7 +109,7 @@ specify a bucket region in your workflow configuration.
## Configuring GCS (Google Cloud Storage)
Create a bucket from the GCP Console
-(https://console.cloud.google.com/storage/browser).
+().
There are 2 ways to configure a Google Cloud Storage.
@@ -135,10 +140,10 @@ artifacts:
```
If it's a GKE cluster, and Workload Identity is configured, there's no need to
-create the Service Account key and store it as a K8s secret,
+create the service account key and store it as a Kubernetes secret,
`serviceAccountKeySecret` is also not needed in this case. Please follow the
link to configure Workload Identity
-(https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity).
+().
### Use S3 APIs
@@ -146,7 +151,7 @@ Enable S3 compatible access and create an access key. Note that S3 compatible
access is on a per project rather than per bucket basis.
- Navigate to Storage > Settings
- (https://console.cloud.google.com/storage/settings).
+ ().
- Enable interoperability access if needed.
- Create a new key if needed.
- Configure `s3` artifact as following example.
@@ -173,61 +178,162 @@ artifacts:
## Configuring Alibaba Cloud OSS (Object Storage Service)
-To configure artifact storage for Alibaba Cloud OSS, please first follow
-the [official documentation](https://www.alibabacloud.com/product/oss) to set up
-an OSS account and bucket.
+Create your bucket and access key for the bucket. Suggest to limit the permission
+for the access key, you will need to create a user with just the permissions you
+want to associate with the access key. Otherwise, you can just create an access key
+using your existing user account.
-Once it's set up, you can find endpoint and bucket
-information on your OSS dashboard and then use them like the following to
-configure the artifact storage for your workflow:
+Setup [Alibaba Cloud CLI](https://www.alibabacloud.com/help/en/alibaba-cloud-cli/latest/product-introduction)
+and follow the steps to configure the artifact storage for your workflow:
-```yaml
-artifacts:
- - name: my-art
- path: /my-artifact
+```bash
+$ export mybucket=bucket-workflow-artifect
+$ export myregion=cn-zhangjiakou
+$ # limit permission to read/write the bucket.
+$ cat > policy.json < access-key.json
+$ # create secret in demo namespace, replace demo with your namespace.
+$ kubectl create secret generic $mybucket-credentials -n demo\
+ --from-literal "accessKey=$(cat access-key.json | jq -r .AccessKey.AccessKeyId)" \
+ --from-literal "secretKey=$(cat access-key.json | jq -r .AccessKey.AccessKeySecret)"
+$ # create configmap to config default artifact for a namespace.
+$ cat > default-artifact-repository.yaml << EOF
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ # If you want to use this config map by default, name it "artifact-repositories". Otherwise, you can provide a reference to a
+ # different config map in `artifactRepositoryRef.configMap`.
+ name: artifact-repositories
+ annotations:
+ # v3.0 and after - if you want to use a specific key, put that key into this annotation.
+ workflows.argoproj.io/default-artifact-repository: default-oss-artifact-repository
+data:
+ default-oss-artifact-repository: |
oss:
- endpoint: http://oss-cn-hangzhou-zmf.aliyuncs.com
- bucket: test-bucket-name
- key: test/mydirectory/ # this is path in the bucket
+ endpoint: http://oss-cn-zhangjiakou-internal.aliyuncs.com
+ bucket: $mybucket
# accessKeySecret and secretKeySecret are secret selectors.
- # It references the k8s secret named 'my-oss-credentials'.
+ # It references the k8s secret named 'bucket-workflow-artifect-credentials'.
# This secret is expected to have have the keys 'accessKey'
# and 'secretKey', containing the base64 encoded credentials
# to the bucket.
accessKeySecret:
- name: my-oss-credentials
+ name: $mybucket-credentials
key: accessKey
secretKeySecret:
- name: my-oss-credentials
+ name: $mybucket-credentials
key: secretKey
+EOF
+# create cm in demo namespace, replace demo with your namespace.
+$ k apply -f default-artifact-repository.yaml -n demo
```
You can also set `createBucketIfNotPresent` to `true` to tell the artifact driver to automatically create the OSS bucket if it doesn't exist yet when saving artifacts. Note that you'll need to set additional permission for your OSS account to create new buckets.
-# Configure the Default Artifact Repository
+## Configuring Azure Blob Storage
+
+Create an Azure Storage account and a container within that account. There are a number of
+ways to accomplish this, including the [Azure Portal](https://portal.azure.com) or the
+[CLI](https://docs.microsoft.com/en-us/cli/azure/).
+
+1. Retrieve the blob service endpoint for the storage account. For example:
+
+ ```bash
+ az storage account show -n mystorageaccountname --query 'primaryEndpoints.blob' -otsv
+ ```
+
+2. Retrieve the access key for the storage account. For example:
+
+ ```bash
+ az storage account keys list -n mystorageaccountname --query '[0].value' -otsv
+ ```
+
+3. Create a kubernetes secret to hold the storage account key. For example:
+
+ ```bash
+ kubectl create secret generic my-azure-storage-credentials \
+ --from-literal "account-access-key=$(az storage account keys list -n mystorageaccountname --query '[0].value' -otsv)"
+ ```
+
+4. Configure `azure` artifact as following in the yaml.
+
+```yaml
+artifacts:
+ - name: message
+ path: /tmp/message
+ azure:
+ endpoint: https://mystorageaccountname.blob.core.windows.net
+ container: my-container-name
+ blob: path/in/container
+ # accountKeySecret is a secret selector.
+ # It references the k8s secret named 'my-azure-storage-credentials'.
+ # This secret is expected to have have the key 'account-access-key',
+ # containing the base64 encoded credentials to the storage account.
+ #
+ # If a managed identity has been assigned to the machines running the
+ # workflow (e.g., https://docs.microsoft.com/en-us/azure/aks/use-managed-identity)
+ # then accountKeySecret is not needed, and useSDKCreds should be
+ # set to true instead:
+ # useSDKCreds: true
+ accountKeySecret:
+ name: my-azure-storage-credentials
+ key: account-access-key
+```
+
+If `useSDKCreds` is set to `true`, then the `accountKeySecret` value is not
+used and authentication with Azure will be attempted using a
+[`DefaultAzureCredential`](https://docs.microsoft.com/en-us/azure/developer/go/azure-sdk-authentication)
+instead.
+
+## Configure the Default Artifact Repository
In order for Argo to use your artifact repository, you can configure it as the
default repository. Edit the workflow-controller config map with the correct
endpoint and access/secret keys for your repository.
-## S3 compatible artifact repository bucket (such as AWS, GCS, Minio, and Alibaba Cloud OSS)
+### S3 compatible artifact repository bucket (such as AWS, GCS, MinIO, and Alibaba Cloud OSS)
Use the `endpoint` corresponding to your provider:
-- AWS: s3.amazonaws.com
-- GCS: storage.googleapis.com
-- Minio: my-minio-endpoint.default:9000
-- Alibaba Cloud OSS: oss-cn-hangzhou-zmf.aliyuncs.com
+- AWS: `s3.amazonaws.com`
+- GCS: `storage.googleapis.com`
+- MinIO: `my-minio-endpoint.default:9000`
+- Alibaba Cloud OSS: `oss-cn-hangzhou-zmf.aliyuncs.com`
The `key` is name of the object in the `bucket` The `accessKeySecret` and
`secretKeySecret` are secret selectors that reference the specified kubernetes
-secret. The secret is expected to have the keys 'accessKey' and 'secretKey',
-containing the base64 encoded credentials to the bucket.
+secret. The secret is expected to have the keys `accessKey` and `secretKey`,
+containing the `base64` encoded credentials to the bucket.
For AWS, the `accessKeySecret` and `secretKeySecret` correspond to
`AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` respectively.
-EC2 provides a metadata API via which applications using the AWS SDK may assume
+EC2 provides a meta-data API via which applications using the AWS SDK may assume
IAM roles associated with the instance. If you are running argo on EC2 and the
instance role allows access to your S3 bucket, you can configure the workflow
step pods to assume the role. To do so, simply omit the `accessKeySecret` and
@@ -238,20 +344,20 @@ can be obtained from the GCP Console. Note that S3 compatible access is on a per
project rather than per bucket basis.
- Navigate to Storage > Settings
- (https://console.cloud.google.com/storage/settings).
+ ().
- Enable interoperability access if needed.
- Create a new key if needed.
-For Minio, the `accessKeySecret` and `secretKeySecret` naturally correspond the
-AccessKey and SecretKey.
+For MinIO, the `accessKeySecret` and `secretKeySecret` naturally correspond the
+`AccessKey` and `SecretKey`.
For Alibaba Cloud OSS, the `accessKeySecret` and `secretKeySecret` corresponds to
`accessKeyID` `and accessKeySecret` respectively.
Example:
-```
-$ kubectl edit configmap workflow-controller-configmap -n argo # assumes argo was installed in the argo namespace
+```bash
+$ kubectl edit configmap workflow-controller-configmap -n argo # assumes argo was installed in the argo namespace
...
data:
artifactRepository: |
@@ -272,16 +378,16 @@ data:
The secrets are retrieved from the namespace you use to run your workflows. Note
that you can specify a `keyFormat`.
-## Google Cloud Storage (GCS)
+### Google Cloud Storage (GCS)
Argo also can use native GCS APIs to access a Google Cloud Storage bucket.
-`serviceAccountKeySecret` references to a k8 secret which stores a Google Cloud
+`serviceAccountKeySecret` references to a Kubernetes secret which stores a Google Cloud
service account key to access the bucket.
Example:
-```
+```bash
$ kubectl edit configmap workflow-controller-configmap -n argo # assumes argo was installed in the argo namespace
...
data:
@@ -294,7 +400,29 @@ data:
key: serviceAccountKey
```
-# Accessing Non-Default Artifact Repositories
+### Azure Blob Storage
+
+Argo can use native Azure APIs to access a Azure Blob Storage container.
+
+`accountKeySecret` references to a Kubernetes secret which stores an Azure Blob
+Storage account shared key to access the container.
+
+Example:
+
+```bash
+$ kubectl edit configmap workflow-controller-configmap -n argo # assumes argo was installed in the argo namespace
+...
+data:
+ artifactRepository: |
+ azure:
+ container: my-container
+ blobNameFormat: prefix/in/container #optional, it could reference workflow variables, such as "{{workflow.name}}/{{pod.name}}"
+ accountKeySecret:
+ name: my-azure-storage-credentials
+ key: account-access-key
+```
+
+## Accessing Non-Default Artifact Repositories
This section shows how to access artifacts from non-default artifact
repositories.
@@ -302,7 +430,7 @@ repositories.
The `endpoint`, `accessKeySecret` and `secretKeySecret` are the same as for
configuring the default artifact repository described previously.
-```
+```yaml
templates:
- name: artifact-example
inputs:
diff --git a/docs/container-set-template.md b/docs/container-set-template.md
index 5c63624bdf9d..4e792986bd94 100644
--- a/docs/container-set-template.md
+++ b/docs/container-set-template.md
@@ -1,7 +1,5 @@
# Container Set Template
-
-
> v3.1 and after
A container set templates is similar to a normal container or script template, but allows you to specify multiple
@@ -40,7 +38,7 @@ spec:
parameters:
- name: message
valueFrom:
- path: /workpsace/message
+ path: /workspace/message
```
There are a couple of caveats:
@@ -68,11 +66,11 @@ Instead, have a workspace volume and make sure all artifacts paths are on that v
## ⚠️ Resource Requests
-A container set actually starts all containers, and the Emmissary only starts the main container process when the containers it depends on have completed. This mean that even though the container is doing no useful work, it is still consume resources and you're still getting billed for them.
+A container set actually starts all containers, and the Emissary only starts the main container process when the containers it depends on have completed. This mean that even though the container is doing no useful work, it is still consuming resources and you're still getting billed for them.
If your requests are small, this won't be a problem.
-If your request are large, set the resource requests so the sum total is the most you'll need at once.
+If your requests are large, set the resource requests so the sum total is the most you'll need at once.
Example A: a simple sequence e.g. `a -> b -> c`
@@ -109,7 +107,6 @@ Example B: Lopsided requests, e.g. `a -> b` where `a` is cheap and `b` is expens
* `a` needs 100 cpu, 1Mi memory, runs for 10h
* `b` needs 8Ki GPU, 100 Gi memory, 200 Ki GPU, runs for 5m
-Can you see the problem here? `a` only wont small requests, but the container set will use the total of all requests. So it's as if you're using all that GPU for 10h. This will be expensive.
+Can you see the problem here? `a` only has small requests, but the container set will use the total of all requests. So it's as if you're using all that GPU for 10h. This will be expensive.
Solution: do not use container set when you have lopsided requests.
-
diff --git a/docs/core-concepts.md b/docs/core-concepts.md
deleted file mode 100644
index 171a96cc5d27..000000000000
--- a/docs/core-concepts.md
+++ /dev/null
@@ -1,21 +0,0 @@
-# Core Concepts
-
-!!! note
- Please read [Kubernetes concepts](https://kubernetes.io/docs/concepts/) first.
-
-* **Workflow**: a Kubernetes resource defining the execution of one or more **template**. Workflows are named.
-* **Template**: a **step**, **steps** or **dag**.
-* **Step**: a single step of a **workflow**, typically run a container based on **inputs** and capture the **outputs**.
-* **Steps**: a list of **steps**
-* **Entrypoint**: the first **step** to execute when running a **workflow**
-* **Node**: a step
-* **Directed Acyclic Graph (DAG)**: a set of **steps** (nodes) and the dependencies (edges) between them.
-* **Workflow Template**: a Kubernetes resource defining a reusable workflow for a namespace
-* **Cluster Workflow Template**: a Kubernetes resource defining a reusable workflow for a cluster
-* **Inputs**: **parameters** and **artifacts** passed to the **step**,
-* **Outputs**: **parameters** and **artifacts** outputted by a **step**
-* **Parameters**: objects, strings, booleans, arrays
-* **Artifacts**: files saved by a container
-* **Artifact Repository**: a place where **artifacts** are stored
-* **Executor**: the method to execute a container, e.g. Docker, PNS ([learn more](workflow-executors.md))
-* **Workflow Service Account**: the service account that a workflow is executed as ([learn more](service-accounts.md))
diff --git a/docs/cost-optimisation.md b/docs/cost-optimisation.md
index 46ca2e4fc208..4391beeb735d 100644
--- a/docs/cost-optimisation.md
+++ b/docs/cost-optimisation.md
@@ -1,16 +1,16 @@
-# Cost Optimisation
+# Cost Optimization
-## User Cost Optimisations
+## User Cost Optimizations
Suggestions for users running workflows.
-### Set The Workflows Pod Resource Requests
+### Set The Workflows Pod Resource Requests
-> Suitable if you are running a workflow with many homogenous pods.
+> Suitable if you are running a workflow with many homogeneous pods.
[Resource duration](resource-duration.md) shows the amount of CPU and memory requested by a pod and is indicative of the cost. You can use this to find costly steps within your workflow.
-Smaller requests can be set in the pod spec patch's [resource requirements](fields.md#resourcerequirements).
+Smaller requests can be set in the pod spec patch's [resource requirements](fields.md#resourcerequirements).
## Use A Node Selector To Use Cheaper Instances
@@ -25,22 +25,23 @@ nodeSelector:
> Suitable if you have a workflow that passes a lot of artifacts within itself.
-Copying artifacts to and from storage outside of a cluster can be expensive. The correct choice is dependent on your artifact storage provider is vs. what volume they are using. For example, we believe it may be more expensive to allocate and delete a new block storage volume (AWS EBS, GCP persistent disk) every workflow using the PVC feature, than it is to upload and download some small files to object storage (AWS S3, GCP cloud storage).
+Copying artifacts to and from storage outside of a cluster can be expensive. The correct choice is dependent on what your artifact storage provider is vs. what volume they are using. For example, we believe it may be more expensive to allocate and delete a new block storage volume (AWS EBS, GCP persistent disk) every workflow using the PVC feature, than it is to upload and download some small files to object storage (AWS S3, GCP cloud storage).
-On the other hand if they are using a NFS volume shared between all their workflows with large artifacts, that might be cheaper than the data transfer and storage costs of object storage.
+On the other hand if you are using a NFS volume shared between all your workflows with large artifacts, that might be cheaper than the data transfer and storage costs of object storage.
Consider:
* Data transfer costs (upload/download vs. copying)
* Data storage costs (object storage vs. volume)
* Requirement for parallel access to data (NFS vs. block storage vs. artifact)
+
### Limit The Total Number Of Workflows And Pods
> Suitable for all.
-A workflow (and for that matter, any Kubernetes resource) will incur a cost as long as they exist in your cluster, even after they are no longer running.
+A workflow (and for that matter, any Kubernetes resource) will incur a cost as long as it exists in your cluster, even after it's no longer running.
-The workflow controller memory and CPU needs increase linearly with the number of pods and workflows you are currently running.
+The workflow controller memory and CPU needs to increase linearly with the number of pods and workflows you are currently running.
You should delete workflows once they are no longer needed, or enable a [Workflow Archive](workflow-archive.md) and you can still view them after they are removed from Kubernetes.
@@ -68,7 +69,7 @@ You can set these configurations globally using [Default Workflow Spec](default-
Changing these settings will not delete workflows that have already run. To list old workflows:
-```
+```bash
argo list --completed --since 7d
```
@@ -76,12 +77,12 @@ argo list --completed --since 7d
To list/delete workflows completed over 7 days ago:
-```
+```bash
argo list --older 7d
argo delete --older 7d
```
-## Operator Cost Optimisations
+## Operator Cost Optimizations
Suggestions for operators who installed Argo Workflows.
@@ -89,7 +90,7 @@ Suggestions for operators who installed Argo Workflows.
> Suitable if you have many instances, e.g. on dozens of clusters or namespaces.
-Set a resource requests and limits for the `workflow-controller` and `argo-server`, e.g.
+Set resource requests and limits for the `workflow-controller` and `argo-server`, e.g.
```yaml
requests:
diff --git a/docs/cron-backfill.md b/docs/cron-backfill.md
index 8eade87f28c5..87d7f93bbea1 100644
--- a/docs/cron-backfill.md
+++ b/docs/cron-backfill.md
@@ -2,7 +2,7 @@
## Use Case
-* You are using cron workflows to run daily jobs, you may need to re-run for a date, or run some historical days.
+* You are using cron workflows to run daily jobs, you may need to re-run for a date, or run some historical days.
## Solution
@@ -16,4 +16,3 @@ This [full example](https://raw.githubusercontent.com/argoproj/argo-workflows/ma
* A cron workflow named `daily-job`.
* A workflow named `backfill-v1` that uses a resource template to create one workflow for each backfill date.
* A alternative workflow named `backfill-v2` that uses a steps templates to run one task for each backfill date.
-
diff --git a/docs/cron-workflows.md b/docs/cron-workflows.md
index b13a82269c39..4ab3517ea3c5 100644
--- a/docs/cron-workflows.md
+++ b/docs/cron-workflows.md
@@ -1,12 +1,10 @@
# Cron Workflows
-
-
> v2.5 and after
## Introduction
-`CronWorkflow` are workflows that run on a preset schedule. They are designed to be converted from `Workflow` easily and to mimick the same options as Kubernetes `CronJob`. In essence, `CronWorkflow` = `Workflow` + some specific cron options.
+`CronWorkflow` are workflows that run on a preset schedule. They are designed to be converted from `Workflow` easily and to mimic the same options as Kubernetes `CronJob`. In essence, `CronWorkflow` = `Workflow` + some specific cron options.
## `CronWorkflow` Spec
@@ -35,7 +33,7 @@ spec:
`CronWorkflow.spec.workflowSpec` is the same type as `Workflow.spec` and serves as a template for `Workflow` objects that are created from it. Everything under this spec will be converted to a `Workflow`.
-The resuling `Workflow` name will be a generated name based on the `CronWorkflow` name. In this example it could be something like `test-cron-wf-tj6fe`.
+The resulting `Workflow` name will be a generated name based on the `CronWorkflow` name. In this example it could be something like `test-cron-wf-tj6fe`.
`CronWorkflow.spec.workflowMetadata` can be used to add `labels` and `annotations`.
@@ -43,7 +41,7 @@ The resuling `Workflow` name will be a generated name based on the `CronWorkflow
| Option Name | Default Value | Description |
|:----------------------------:|:----------------------:|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| `schedule` | None, must be provided | Schedule at which the `Workflow` will be run. E.g. `5 4 * * * ` |
+| `schedule` | None, must be provided | Schedule at which the `Workflow` will be run. E.g. `5 4 * * *` |
| `timezone` | Machine timezone | Timezone during which the Workflow will be run from the IANA timezone standard, e.g. `America/Los_Angeles` |
| `suspend` | `false` | If `true` Workflow scheduling will not occur. Can be set from the CLI, GitOps, or directly |
| `concurrencyPolicy` | `Allow` | Policy that determines what to do if multiple `Workflows` are scheduled at the same time. Available options: `Allow`: allow all, `Replace`: remove all old before scheduling a new, `Forbid`: do not allow any new while there are old |
@@ -51,6 +49,12 @@ The resuling `Workflow` name will be a generated name based on the `CronWorkflow
| `successfulJobsHistoryLimit` | `3` | Number of successful `Workflows` that will be persisted at a time |
| `failedJobsHistoryLimit` | `1` | Number of failed `Workflows` that will be persisted at a time |
+### Cron Schedule Syntax
+
+The cron scheduler uses the standard cron syntax, as [documented on Wikipedia](https://en.wikipedia.org/wiki/Cron).
+
+More detailed documentation for the specific library used is [documented here](https://pkg.go.dev/github.com/robfig/cron#hdr-CRON_Expression_Format).
+
### Crash Recovery
If the `workflow-controller` crashes (and hence the `CronWorkflow` controller), there are some options you can set to ensure that `CronWorkflows` that would have been scheduled while the controller was down can still run. Mainly `startingDeadlineSeconds` can be set to specify the maximum number of seconds past the last successful run of a `CronWorkflow` during which a missed run will still be executed.
@@ -65,7 +69,7 @@ This setting can also be configured in tandem with `concurrencyPolicy` to achiev
Daylight Saving (DST) is taken into account when using timezone. This means that, depending on the local time of the scheduled job, argo will schedule the workflow once, twice, or not at all when the clock moves forward or back.
-For example, with timezone set at `America/Los_Angeles`, we have daylight saving
+For example, with timezone set at `America/Los_Angeles`, we have daylight saving
- +1 hour (DST start) at 2020-03-08 02:00:00:
@@ -73,13 +77,13 @@ For example, with timezone set at `America/Los_Angeles`, we have daylight saving
| cron | sequence | workflow execution time |
|------------|----------|-------------------------------|
- | 59 1 * * * | 1 | 2020-03-08 01:59:00 -0800 PST |
+ | 59 1 ** * | 1 | 2020-03-08 01:59:00 -0800 PST |
| | 2 | 2020-03-09 01:59:00 -0700 PDT |
| | 3 | 2020-03-10 01:59:00 -0700 PDT |
- | 0 2 * * * | 1 | 2020-03-09 02:00:00 -0700 PDT |
+ | 0 2 ** * | 1 | 2020-03-09 02:00:00 -0700 PDT |
| | 2 | 2020-03-10 02:00:00 -0700 PDT |
| | 3 | 2020-03-11 02:00:00 -0700 PDT |
- | 1 2 * * * | 1 | 2020-03-09 02:01:00 -0700 PDT |
+ | 1 2 ** * | 1 | 2020-03-09 02:01:00 -0700 PDT |
| | 2 | 2020-03-10 02:01:00 -0700 PDT |
| | 3 | 2020-03-11 02:01:00 -0700 PDT |
@@ -89,13 +93,13 @@ For example, with timezone set at `America/Los_Angeles`, we have daylight saving
| cron | sequence | workflow execution time |
|------------|----------|-------------------------------|
- | 59 1 * * * | 1 | 2020-11-01 01:59:00 -0700 PDT |
+ | 59 1 ** * | 1 | 2020-11-01 01:59:00 -0700 PDT |
| | 2 | 2020-11-01 01:59:00 -0800 PST |
| | 3 | 2020-11-02 01:59:00 -0800 PST |
- | 0 2 * * * | 1 | 2020-11-01 02:00:00 -0800 PST |
+ | 0 2 ** * | 1 | 2020-11-01 02:00:00 -0800 PST |
| | 2 | 2020-11-02 02:00:00 -0800 PST |
| | 3 | 2020-11-03 02:00:00 -0800 PST |
- | 1 2 * * * | 1 | 2020-11-01 02:01:00 -0800 PST |
+ | 1 2 ** * | 1 | 2020-11-01 02:01:00 -0800 PST |
| | 2 | 2020-11-02 02:01:00 -0800 PST |
| | 3 | 2020-11-03 02:01:00 -0800 PST |
@@ -105,7 +109,7 @@ For example, with timezone set at `America/Los_Angeles`, we have daylight saving
`CronWorkflow` can be created from the CLI by using basic commands:
-```sh
+```bash
$ argo cron create cron.yaml
Name: test-cron-wf
Namespace: argo
@@ -144,7 +148,7 @@ Active Workflows: test-cron-wf-rt4nf
Using `kubectl apply -f` and `kubectl get cwf`
-## Backfilling Days
+## Back-Filling Days
See [cron backfill](cron-backfill.md).
diff --git a/docs/data-sourcing-and-transformation.md b/docs/data-sourcing-and-transformation.md
index 2634a025fcda..8eacded5f6b6 100644
--- a/docs/data-sourcing-and-transformation.md
+++ b/docs/data-sourcing-and-transformation.md
@@ -2,22 +2,22 @@
> v3.1 and after
-#### Development
-
We have intentionally made this feature available with only bare-bones functionality. Our hope is that we are able to build this feature with our community's feedback. If you have ideas and use cases for this feature, please open an [enhancement proposal](https://github.com/argoproj/argo-workflows/issues/new?assignees=&labels=enhancement&template=enhancement_proposal.md) on GitHub.
Additionally, please take a look at our current ideas at the bottom of this document.
## Introduction
+
Users often source and transform data as part of their workflows. The `data` template provides first-class support for these common operations.
`data` templates can best be understood by looking at a common data sourcing and transformation operation in `bash`:
```bash
-$ find -r . | grep ".pdf" | sed "s/foo/foo.ready/"
+find -r . | grep ".pdf" | sed "s/foo/foo.ready/"
```
Such operations consist of two main parts:
+
* A "source" of data: `find -r .`
* A series of "transformations" which transform the output of the source serially: `| grep ".pdf" | sed "s/foo/foo.ready/"`
diff --git a/docs/debug-pause.md b/docs/debug-pause.md
index 9dbf8dfd57c8..d62ef29c5261 100644
--- a/docs/debug-pause.md
+++ b/docs/debug-pause.md
@@ -1,4 +1,4 @@
-# Debug pause
+# Debug Pause
> v3.3 and after
@@ -6,8 +6,9 @@
The `debug pause` feature makes it possible to pause individual workflow steps for debugging before, after or both and then release the steps from the paused state. Currently this feature is only supported when using the [Emissary Executor](workflow-executors.md#emissary-emissary)
-In order to pause a container env variables are used:
-- `ARGO_DEBUG_PAUSE_AFTER` - to pause a step after execution
+In order to pause a container env variables are used:
+
+- `ARGO_DEBUG_PAUSE_AFTER` - to pause a step after execution
- `ARGO_DEBUG_PAUSE_BEFORE` - to pause a step before execution
Example workflow:
@@ -32,9 +33,10 @@ In order to release a step from a pause state, marker files are used named `/var
## Example
-1) Create a workflow where the debug pause env in set, in this example `ARGO_DEBUG_PAUSE_AFTER` will be set and thus the step will be paused after execution of the user code.
+1) Create a workflow where the debug pause env in set, in this example `ARGO_DEBUG_PAUSE_AFTER` will be set and thus the step will be paused after execution of the user code.
pause-after.yaml
+
```yaml
apiVersion: argoproj.io/v1alpha1
kind: Workflow
@@ -51,19 +53,19 @@ spec:
value: 'true'
```
-```bash
+```bash
argo submit -n argo --watch pause-after.yaml
```
-2) Create a shell in the container of interest of create a ephemeral container in the pod, in this example ephemeral containers are used.
+Create a shell in the container of interest of create a ephemeral container in the pod, in this example ephemeral containers are used.
-```
+```bash
kubectl debug -n argo -it POD_NAME --image=busybox --target=main --share-processes
```
-In order to have access to the persistence volume used by the workflow step, [`--share-processes`](https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/) will have to be used.
+In order to have access to the persistence volume used by the workflow step, [`--share-processes`](https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/) will have to be used.
-The ephemeral container can be used to perform debugging operations. When debugging has been completed, create the marker file to allow the workflow step to continue. When using process name space sharing container filesystems are visible to other containers in the pod through the /proc/$pid/root link.
+The ephemeral container can be used to perform debugging operations. When debugging has been completed, create the marker file to allow the workflow step to continue. When using process name space sharing container file systems are visible to other containers in the pod through the `/proc/$pid/root` link.
```bash
touch /proc/1/root/run/argo/ctr/main/after
diff --git a/docs/default-workflow-specs.md b/docs/default-workflow-specs.md
index 6fbbc1c0e20b..e5a567143c18 100644
--- a/docs/default-workflow-specs.md
+++ b/docs/default-workflow-specs.md
@@ -1,7 +1,5 @@
# Default Workflow Spec
-
-
> v2.7 and after
## Introduction
@@ -12,7 +10,7 @@ If a Workflow has a value that also has a default value set in the config map, t
## Setting Default Workflow Values
Default Workflow values can be specified by adding them under the `workflowDefaults` key in the [`workflow-controller-configmap`](./workflow-controller-configmap.yaml).
-Values can be added as the would under the `Workflow.spec` tag.
+Values can be added as they would under the `Workflow.spec` tag.
For example, to specify default values that would partially produce the following `Workflow`:
diff --git a/docs/disaster-recovery.md b/docs/disaster-recovery.md
index 72ae2a3c5a7f..7474a7395be0 100644
--- a/docs/disaster-recovery.md
+++ b/docs/disaster-recovery.md
@@ -4,15 +4,14 @@ We only store data in your Kubernetes cluster. You should consider backing this
Exporting example:
-```
-kubectl get wf,cwf,cwft,wftmpl -o yaml > backup.yaml
+```bash
+kubectl get wf,cwf,cwft,wftmpl -A -o yaml > backup.yaml
```
Importing example:
-```
-kubectl apply -f backup.yaml
-
+```bash
+kubectl apply -f backup.yaml
```
-You should also back-up any SQL persistence you use regularly with whatever tool is provided with it.
\ No newline at end of file
+You should also back-up any SQL persistence you use regularly with whatever tool is provided with it.
diff --git a/docs/doc-changes.md b/docs/doc-changes.md
new file mode 100644
index 000000000000..df6e369598c7
--- /dev/null
+++ b/docs/doc-changes.md
@@ -0,0 +1,38 @@
+# Documentation Changes
+
+Docs help our customers understand how to use workflows and fix their own problems.
+
+Doc changes are checked for spelling, broken links, and lint issues by CI. To check locally run `make docs`.
+
+* Explain when you would want to use a feature.
+* Provide working examples.
+* Use simple short sentences and avoid jargon.
+* Format code using back-ticks to avoid it being reported spelling error.
+* Avoid use title-case mid-sentence. E.g. instead of "the Workflow", write "the workflow".
+* Headings should be title-case. E.g. instead of "and", write "And".
+
+## Running Locally
+
+To test/run locally:
+
+```bash
+make docs-serve
+```
+
+## Tips
+
+Use a service like [Grammarly](https://www.grammarly.com) to check your grammar.
+
+Having your computer read text out loud is a way to catch problems, e.g.:
+
+* Word substitutions (i.e. the wrong word is used, but spelled.
+correctly).
+* Sentences that do not read correctly will sound wrong.
+
+On Mac, to set-up:
+
+* Go to `System Preferences / Accessibility / Spoken Content`.
+* Choose a System Voice (I like `Siri Voice 1`).
+* Enable `Speak selection`.
+
+To hear text, select the text you want to hear, then press option+escape.
diff --git a/docs/empty-dir.md b/docs/empty-dir.md
index 748c748eca2f..95495ad6c9bd 100644
--- a/docs/empty-dir.md
+++ b/docs/empty-dir.md
@@ -2,12 +2,12 @@
While by default, the Docker and PNS [workflow executors](workflow-executors.md) can get output artifacts/parameters from the base layer (e.g. `/tmp`), neither the Kubelet nor the K8SAPI executors can. It is unlikely you can get output artifacts/parameters from the base layer if you run your workflow pods with a [security context](workflow-pod-security-context.md).
-You can work-around this constraint by mounting volumes onto your pod. The easiest way to do this is to use as `emptyDir` volume.
+You can work-around this constraint by mounting volumes onto your pod. The easiest way to do this is to use as `emptyDir` volume.
-!!! Note
+!!! Note
This is only needed for output artifacts/parameters. Input artifacts/parameters are automatically mounted to an empty-dir if needed
-This example shows how to mount an output volume:
+This example shows how to mount an output volume:
```yaml
apiVersion: argoproj.io/v1alpha1
diff --git a/docs/enhanced-depends-logic.md b/docs/enhanced-depends-logic.md
index 9f360761271c..dc0e6e1e5490 100644
--- a/docs/enhanced-depends-logic.md
+++ b/docs/enhanced-depends-logic.md
@@ -1,7 +1,5 @@
# Enhanced Depends Logic
-
-
> v2.9 and after
## Introduction
@@ -24,38 +22,39 @@ available task results is as follows:
| `.Failed` | Task Failed | Task exited with a non-0 exit code |
| `.Errored` | Task Errored | Task had an error other than a non-0 exit code |
| `.Skipped` | Task Skipped | Task was skipped |
+| `.Omitted` | Task Omitted | Task was omitted |
| `.Daemoned` | Task is Daemoned and is not Pending | |
For convenience, if an omitted task result is equivalent to `(task.Succeeded || task.Skipped || task.Daemoned)`.
For example:
-```
+```yaml
depends: "task || task-2.Failed"
```
is equivalent to:
-```
+```yaml
depends: (task.Succeeded || task.Skipped || task.Daemoned) || task-2.Failed
```
Full boolean logic is also available. Operators include:
- * `&&`
- * `||`
- * `!`
+* `&&`
+* `||`
+* `!`
Example:
-```
+```yaml
depends: "(task-2.Succeeded || task-2.Skipped) && !task-3.Failed"
```
-In the case that you're depending on a task that uses withItems, you can depend on
-whether any of the item tasks are successful or all have failed using .AnySucceeded and .AllFailed, for example:
+In the case that you're depending on a task that uses `withItems`, you can depend on
+whether any of the item tasks are successful or all have failed using `.AnySucceeded` and `.AllFailed`, for example:
-```
+```yaml
depends: "task-1.AnySucceeded || task-2.AllFailed"
```
@@ -65,13 +64,13 @@ This feature is fully compatible with `dependencies` and conversion is easy.
To convert simply join your `dependencies` with `&&`:
-```
+```yaml
dependencies: ["A", "B", "C"]
```
is equivalent to:
-```
+```yaml
depends: "A && B && C"
```
diff --git a/docs/environment-variables.md b/docs/environment-variables.md
index 2ba3a62f864b..41a5dee8d617 100644
--- a/docs/environment-variables.md
+++ b/docs/environment-variables.md
@@ -1,63 +1,66 @@
# Environment Variables
-This document outlines the set of environment variables that can be used to customize the behaviours at different
-levels. These environment variables are typically added to test out experimental features and should not be needed by
-most users. Note that these environment variables may be removed at any time.
+This document outlines the set of environment variables that can be used to customize the behavior at different
+levels.
+
+⚠️ Environment variables are typically added to test out experimental features and should not be used by
+most users. Environment variables may be removed at any time.
## Controller
-| Name | Type | Default | Description |
-|------|------|---------|-------------|
-| `ARGO_AGENT_TASK_WORKERS` | `int` | `16` | The number of task workers for the agent pod. |
-| `ALL_POD_CHANGES_SIGNIFICANT` | `bool` | `false` | Whether to consider all pod changes as significant during pod reconciliation. |
-| `ALWAYS_OFFLOAD_NODE_STATUS` | `bool` | `false` | Whether to always offload the node status. |
-| `ARCHIVED_WORKFLOW_GC_PERIOD` | `time.Duration` | `24h` | The periodicity for GC of archived workflows. |
-| `ARGO_PPROF` | `bool` | `false` | Enable pprof endpoints |
-| `ARGO_PROGRESS_PATCH_TICK_DURATION` | `time.Duration` | `1m` | How often self reported progress is patched into the pod annotations which means how long it takes until the controller picks up the progress change. Set to 0 to disable self reporting progress. |
-| `ARGO_PROGRESS_FILE_TICK_DURATION` | `time.Duration` | `3s` | How often the progress file is read by the executor. Set to 0 to disable self reporting progress. |
-| `ARGO_REMOVE_PVC_PROTECTION_FINALIZER` | `bool` | `false` | Remove the `kubernetes.io/pvc-protection` finalizer from persistent volume claims (PVC) after marking PVCs created for the workflow for deletion, so deleted is not blocked until the pods are deleted. [#6629](https://github.com/argoproj/argo-workflows/issues/6629) |
-| `ARGO_TRACE` | `string` | `"1"` | Whether to enable tracing statements in Argo components. |
-| `ARGO_AGENT_PATCH_RATE` | `time.Duration` | `DEFAULT_REQUEUE_TIME` | Rate that the Argo Agent will patch the Workflow TaskSet. |
-| `ARGO_AGENT_CPU_LIMIT` | `resource.Quantity` | `100m` | CPU resource limit for the agent. |
-| `ARGO_AGENT_MEMORY_LIMIT` | `resource.Quantity` | `256m` | Memory resource limit for the agent. |
-| `BUBBLE_ENTRY_TEMPLATE_ERR` | `bool` | `true` | Whether to bubble up template errors to workflow. |
-| `CACHE_GC_PERIOD` | `time.Duration` | `0s` | How often to perform memoization cache GC, which is disabled by default and can be enabled by providing a non-zero duration. |
-| `CACHE_GC_AFTER_NOT_HIT_DURATION` | `time.Duration` | `30s` | When a memoization cache has not been hit after this duration, it will be deleted. |
-| `CRON_SYNC_PERIOD` | `time.Duration` | `10s` | How often to sync cron workflows. |
-| `DEFAULT_REQUEUE_TIME` | `time.Duration` | `10s` | The requeue time for the rate limiter of the workflow queue. |
-| `EXPRESSION_TEMPLATES` | `bool` | `true` | Escape hatch to disable expression templates. |
-| `GRPC_MESSAGE_SIZE` | `string` | Use different GRPC Max message size for Argo server deployment (supporting huge workflows). |
-| `GZIP_IMPLEMENTATION` | `string` | `"PGZip"` | The implementation of compression/decompression. Currently only "PGZip" and "GZip" are supported. |
-| `INFORMER_WRITE_BACK` | `bool` | `true` | Whether to write back to informer instead of catching up. |
-| `HEALTHZ_AGE` | `time.Duration` | `5m` | How old a un-reconciled workflow is to report unhealthy. |
-| `INDEX_WORKFLOW_SEMAPHORE_KEYS` | `bool` | `true` | Whether or not to index semaphores. |
-| `LEADER_ELECTION_IDENTITY` | `string` | Controller's `metadata.name` | The ID used for workflow controllers to elect a leader. |
-| `LEADER_ELECTION_DISABLE` | `bool` | `false` | Whether leader election should be disabled. |
-| `LEADER_ELECTION_LEASE_DURATION` | `time.Duration` | `15s` | The duration that non-leader candidates will wait to force acquire leadership. |
-| `LEADER_ELECTION_RENEW_DEADLINE` | `time.Duration` | `10s` | The duration that the acting master will retry refreshing leadership before giving up. |
-| `LEADER_ELECTION_RETRY_PERIOD` | `time.Duration` | `5s` | The duration that the leader election clients should wait between tries of actions. |
-| `MAX_OPERATION_TIME` | `time.Duration` | `30s` | The maximum time a workflow operation is allowed to run for before requeuing the workflow onto the work queue. |
-| `OFFLOAD_NODE_STATUS_TTL` | `time.Duration` | `5m` | The TTL to delete the offloaded node status. Currently only used for testing. |
-| `POD_NAMES` | `string` | `v2` | Whether to have pod names contain the template name (v2) or be the node id (v1). |
-| `RECENTLY_STARTED_POD_DURATION` | `time.Duration` | `10s` | The duration of a pod before the pod is considered to be recently started. |
-| `RETRY_BACKOFF_DURATION` | `time.Duration` | `10ms` | The retry backoff duration when retrying API calls. |
-| `RETRY_BACKOFF_FACTOR` | `float` | `2.0` | The retry backoff factor when retrying API calls. |
-| `RETRY_BACKOFF_STEPS` | `int` | `5` | The retry backoff steps when retrying API calls. |
-| `RETRY_HOST_NAME_LABEL_KEY` | `string` | `kubernetes.io/hostname` | The label key for host name used when retrying templates. |
-| `TRANSIENT_ERROR_PATTERN` | `string` | `""` | The regular expression that represents additional patterns for transient errors. |
-| `WF_DEL_PROPAGATION_POLICY` | `string` | `""` | The deletion propagation policy for workflows. |
-| `WORKFLOW_GC_PERIOD` | `time.Duration` | `5m` | The periodicity for GC of workflows. |
+| Name | Type | Default | Description |
+|----------------------------------------|---------------------|---------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `ARGO_AGENT_TASK_WORKERS` | `int` | `16` | The number of task workers for the agent pod. |
+| `ALL_POD_CHANGES_SIGNIFICANT` | `bool` | `false` | Whether to consider all pod changes as significant during pod reconciliation. |
+| `ALWAYS_OFFLOAD_NODE_STATUS` | `bool` | `false` | Whether to always offload the node status. |
+| `ARCHIVED_WORKFLOW_GC_PERIOD` | `time.Duration` | `24h` | The periodicity for GC of archived workflows. |
+| `ARGO_PPROF` | `bool` | `false` | Enable `pprof` endpoints |
+| `ARGO_PROGRESS_PATCH_TICK_DURATION` | `time.Duration` | `1m` | How often self reported progress is patched into the pod annotations which means how long it takes until the controller picks up the progress change. Set to 0 to disable self reporting progress. |
+| `ARGO_PROGRESS_FILE_TICK_DURATION` | `time.Duration` | `3s` | How often the progress file is read by the executor. Set to 0 to disable self reporting progress. |
+| `ARGO_REMOVE_PVC_PROTECTION_FINALIZER` | `bool` | `true` | Remove the `kubernetes.io/pvc-protection` finalizer from persistent volume claims (PVC) after marking PVCs created for the workflow for deletion, so deleted is not blocked until the pods are deleted. [#6629](https://github.com/argoproj/argo-workflows/issues/6629) |
+| `ARGO_TRACE` | `string` | `` | Whether to enable tracing statements in Argo components. |
+| `ARGO_AGENT_PATCH_RATE` | `time.Duration` | `DEFAULT_REQUEUE_TIME` | Rate that the Argo Agent will patch the workflow task-set. |
+| `ARGO_AGENT_CPU_LIMIT` | `resource.Quantity` | `100m` | CPU resource limit for the agent. |
+| `ARGO_AGENT_MEMORY_LIMIT` | `resource.Quantity` | `256m` | Memory resource limit for the agent. |
+| `BUBBLE_ENTRY_TEMPLATE_ERR` | `bool` | `true` | Whether to bubble up template errors to workflow. |
+| `CACHE_GC_PERIOD` | `time.Duration` | `0s` | How often to perform memoization cache GC, which is disabled by default and can be enabled by providing a non-zero duration. |
+| `CACHE_GC_AFTER_NOT_HIT_DURATION` | `time.Duration` | `30s` | When a memoization cache has not been hit after this duration, it will be deleted. |
+| `CRON_SYNC_PERIOD` | `time.Duration` | `10s` | How often to sync cron workflows. |
+| `DEFAULT_REQUEUE_TIME` | `time.Duration` | `10s` | The re-queue time for the rate limiter of the workflow queue. |
+| `EXPRESSION_TEMPLATES` | `bool` | `true` | Escape hatch to disable expression templates. |
+| `EVENT_AGGREGATION_WITH_ANNOTATIONS` | `bool` | `false` | Whether event annotations will be used when aggregating events. |
+| `GRPC_MESSAGE_SIZE` | `string` | Use different GRPC Max message size for Argo server deployment (supporting huge workflows). |
+| `GZIP_IMPLEMENTATION` | `string` | `PGZip` | The implementation of compression/decompression. Currently only "`PGZip`" and "`GZip`" are supported. |
+| `INFORMER_WRITE_BACK` | `bool` | `true` | Whether to write back to informer instead of catching up. |
+| `HEALTHZ_AGE` | `time.Duration` | `5m` | How old a un-reconciled workflow is to report unhealthy. |
+| `INDEX_WORKFLOW_SEMAPHORE_KEYS` | `bool` | `true` | Whether or not to index semaphores. |
+| `LEADER_ELECTION_IDENTITY` | `string` | Controller's `metadata.name` | The ID used for workflow controllers to elect a leader. |
+| `LEADER_ELECTION_DISABLE` | `bool` | `false` | Whether leader election should be disabled. |
+| `LEADER_ELECTION_LEASE_DURATION` | `time.Duration` | `15s` | The duration that non-leader candidates will wait to force acquire leadership. |
+| `LEADER_ELECTION_RENEW_DEADLINE` | `time.Duration` | `10s` | The duration that the acting master will retry refreshing leadership before giving up. |
+| `LEADER_ELECTION_RETRY_PERIOD` | `time.Duration` | `5s` | The duration that the leader election clients should wait between tries of actions. |
+| `MAX_OPERATION_TIME` | `time.Duration` | `30s` | The maximum time a workflow operation is allowed to run for before re-queuing the workflow onto the work queue. |
+| `OFFLOAD_NODE_STATUS_TTL` | `time.Duration` | `5m` | The TTL to delete the offloaded node status. Currently only used for testing. |
+| `POD_NAMES` | `string` | `v2` | Whether to have pod names contain the template name (v2) or be the node id (v1) - should be set the same for Argo Server. |
+| `RECENTLY_STARTED_POD_DURATION` | `time.Duration` | `10s` | The duration of a pod before the pod is considered to be recently started. |
+| `RETRY_BACKOFF_DURATION` | `time.Duration` | `10ms` | The retry back-off duration when retrying API calls. |
+| `RETRY_BACKOFF_FACTOR` | `float` | `2.0` | The retry back-off factor when retrying API calls. |
+| `RETRY_BACKOFF_STEPS` | `int` | `5` | The retry back-off steps when retrying API calls. |
+| `RETRY_HOST_NAME_LABEL_KEY` | `string` | `kubernetes.io/hostname` | The label key for host name used when retrying templates. |
+| `TRANSIENT_ERROR_PATTERN` | `string` | `""` | The regular expression that represents additional patterns for transient errors. |
+| `WF_DEL_PROPAGATION_POLICY` | `string` | `""` | The deletion propagation policy for workflows. |
+| `WORKFLOW_GC_PERIOD` | `time.Duration` | `5m` | The periodicity for GC of workflows. |
CLI parameters of the `argo-server` and `workflow-controller` can be specified as environment variables with the `ARGO_`
prefix. For example:
-```
+```bash
workflow-controller --managed-namespace=argo
```
Can be expressed as:
-```
+```bash
ARGO_MANAGED_NAMESPACE=argo workflow-controller
```
@@ -115,22 +118,18 @@ spec:
## Executor
-| Name | Type | Default | Description |
-|------|------|---------|-------------|
-| `ARGO_CONTAINER_RUNTIME_EXECUTOR` | `string` | `"docker"` | The name of the container runtime executor. |
-| `ARGO_KUBELET_PORT` | `int` | `10250` | The port to the Kubelet API. |
-| `ARGO_KUBELET_INSECURE` | `bool` | `false` | Whether to disable the TLS verification. |
-| `EXECUTOR_RETRY_BACKOFF_DURATION` | `time.Duration` | `1s` | The retry backoff duration when the workflow executor performs retries. |
-| `EXECUTOR_RETRY_BACKOFF_FACTOR` | `float` | `1.6` | The retry backoff factor when the workflow executor performs retries. |
-| `EXECUTOR_RETRY_BACKOFF_JITTER` | `float` | `0.5` | The retry backoff jitter when the workflow executor performs retries. |
-| `EXECUTOR_RETRY_BACKOFF_STEPS` | `int` | `5` | The retry backoff steps when the workflow executor performs retries. |
-| `PNS_PRIVILEGED` | `bool` | `false` | Whether to always set privileged on for PNS when PNS executor is used. |
-| `REMOVE_LOCAL_ART_PATH` | `bool` | `false` | Whether to remove local artifacts. |
-| `RESOURCE_STATE_CHECK_INTERVAL` | `time.Duration` | `5s` | The time interval between resource status checks against the specified success and failure conditions. |
-| `WAIT_CONTAINER_STATUS_CHECK_INTERVAL` | `time.Duration` | `5s` | The time interval for wait container to check whether the containers have completed. |
+| Name | Type | Default | Description |
+|----------------------------------------|-----------------|---------|--------------------------------------------------------------------------------------------------------|
+| `EXECUTOR_RETRY_BACKOFF_DURATION` | `time.Duration` | `1s` | The retry back-off duration when the workflow executor performs retries. |
+| `EXECUTOR_RETRY_BACKOFF_FACTOR` | `float` | `1.6` | The retry back-off factor when the workflow executor performs retries. |
+| `EXECUTOR_RETRY_BACKOFF_JITTER` | `float` | `0.5` | The retry back-off jitter when the workflow executor performs retries. |
+| `EXECUTOR_RETRY_BACKOFF_STEPS` | `int` | `5` | The retry back-off steps when the workflow executor performs retries. |
+| `REMOVE_LOCAL_ART_PATH` | `bool` | `false` | Whether to remove local artifacts. |
+| `RESOURCE_STATE_CHECK_INTERVAL` | `time.Duration` | `5s` | The time interval between resource status checks against the specified success and failure conditions. |
+| `WAIT_CONTAINER_STATUS_CHECK_INTERVAL` | `time.Duration` | `5s` | The time interval for wait container to check whether the containers have completed. |
You can set the environment variables for executor by customizing executor container's environment variables in your
-controller's configmap like the following:
+controller's config-map like the following:
```yaml
apiVersion: v1
@@ -147,8 +146,9 @@ data:
## Argo Server
-| Name | Type | Default | Description |
-|------|------|---------|-------------|
-| `FIRST_TIME_USER_MODAL` | `bool` | `true` | Show this modal. |
-| `FEEDBACK_MODAL` | `bool` | `true` | Show this modal. |
-| `NEW_VERSION_MODAL` | `bool` | `true` | Show this modal. |
+| Name | Type | Default | Description |
+|-------------------------|----------|---------|------------------------------------------------------------------------------------------------------------------------------|
+| `FIRST_TIME_USER_MODAL` | `bool` | `true` | Show this modal. |
+| `FEEDBACK_MODAL` | `bool` | `true` | Show this modal. |
+| `NEW_VERSION_MODAL` | `bool` | `true` | Show this modal. |
+| `POD_NAMES` | `string` | `v2` | Whether to have pod names contain the template name (v2) or be the node id (v1) - should be set the same for Controller |
diff --git a/docs/estimated-duration.md b/docs/estimated-duration.md
index 439b85e24a94..0af7d1028fbc 100644
--- a/docs/estimated-duration.md
+++ b/docs/estimated-duration.md
@@ -5,14 +5,14 @@
When you run a workflow, the controller will try to estimate its duration.
This is based on the most recently successful workflow submitted from the same workflow template, cluster workflow template or cron workflow.
-
-To get this data, the controller queries the Kubernetes API first (as this is faster) and then [workflow archive](workflow-archive.md) (if enabled).
+
+To get this data, the controller queries the Kubernetes API first (as this is faster) and then [workflow archive](workflow-archive.md) (if enabled).
If you've used tools like Jenkins, you'll know that that estimates can be inaccurate:
* A pod spent a long amount of time pending scheduling.
-* The workflow is non-deterministic, e.g. it uses `when` to execute different paths.
+* The workflow is non-deterministic, e.g. it uses `when` to execute different paths.
* The workflow can vary is scale, e.g. sometimes it uses `withItems` and so sometimes run 100 nodes, sometimes a 1000.
* If the pod runtimes are unpredictable.
-* The workflow is parameterized, and different parameters affect its duration.
-
+* The workflow is parametrized, and different parameters affect its duration.
+
\ No newline at end of file
diff --git a/docs/events.md b/docs/events.md
index 288568853d72..edb0fd833d64 100644
--- a/docs/events.md
+++ b/docs/events.md
@@ -1,12 +1,10 @@
# Events
-
-
> v2.11 and after
## Overview
-To support external webhooks, we have this endpoint `/api/v1/events/{namespace}/{discriminator}`. Events can be sent to that can be any JSON data.
+To support external webhooks, we have this endpoint `/api/v1/events/{namespace}/{discriminator}`. Events sent to that can be any JSON data.
These events can submit *workflow templates* or *cluster workflow templates*.
@@ -14,7 +12,7 @@ You may also wish to read about [webhooks](webhooks.md).
## Authentication and Security
-Clients wanting to send events to the endpoint need an [access token](access-token.md).
+Clients wanting to send events to the endpoint need an [access token](access-token.md).
It is only possible to submit workflow templates your access token has access to: [example role](https://raw.githubusercontent.com/argoproj/argo-workflows/master/manifests/quick-start/base/webhooks/submit-workflow-template-role.yaml).
@@ -37,12 +35,12 @@ curl https://localhost:2746/api/v1/events/argo/my-discriminator \
The event endpoint will always return in under 10 seconds because the event will be queued and processed asynchronously. This means you will not be notified synchronously of failure. It will return a failure (503) if the event processing queue is full.
!!! Warning "Processing Order"
- Events may not always be processed in the order they are received.
+ Events may not always be processed in the order they are received.
## Workflow Template triggered by the event
Before the binding between an event and a workflow template, you must create the workflow template that you want to trigger.
-The following one takes in input the "message" parameter specified into the API call body, passed through the WorkflowEventBinding parameters section, and finally resolved here as the message of the whalesay image.
+The following one takes in input the "message" parameter specified into the API call body, passed through the `WorkflowEventBinding` parameters section, and finally resolved here as the message of the `whalesay` image.
```yaml
apiVersion: argoproj.io/v1alpha1
@@ -66,8 +64,8 @@ spec:
## Submitting A Workflow From A Workflow Template
-A workflow template will be submitted (i.e. workflow created from it) and that can be created using parameters from the event itself.
-The following example will be triggered by an event with "message" in the payload. That message will be used as an argument for the created workflow. Note that the name of the metadata header "x-argo-e2e" is lowercase in the selector to match. Incoming header names are converted to lowercase.
+A workflow template will be submitted (i.e. workflow created from it) and that can be created using parameters from the event itself.
+The following example will be triggered by an event with "message" in the payload. That message will be used as an argument for the created workflow. Note that the name of the meta-data header "x-argo-e2e" is lowercase in the selector to match. Incoming header names are converted to lowercase.
```yaml
apiVersion: argoproj.io/v1alpha1
@@ -87,8 +85,9 @@ spec:
valueFrom:
event: payload.message
```
-Please, notice that "workflowTemplateRef" refers to a template with the name "my-wf-tmple", this template has to be created before the triggering of the event.
-After that you have to apply the above explained WorkflowEventBinding (in this example this is called event-template.yml) to realize the binding between Workflow Template and event (you can use kubectl to do that):
+
+Please, notice that `workflowTemplateRef` refers to a template with the name `my-wf-tmple`, this template has to be created before the triggering of the event.
+After that you have to apply the above explained `WorkflowEventBinding` (in this example this is called `event-template.yml`) to realize the binding between Workflow Template and event (you can use `kubectl` to do that):
```bash
kubectl apply -f event-template.yml
@@ -106,39 +105,42 @@ curl $ARGO_SERVER/api/v1/events/argo/my-discriminator \
```
!!! Warning "Malformed Expressions"
- If the expression is malformed, this is logged. It is not visible in logs or the UI.
+ If the expression is malformed, this is logged. It is not visible in logs or the UI.
+
+### Customizing the Workflow Meta-Data
-### Customizing the Workflow Metadata
You can customize the name of the submitted workflow as well as add annotations and
labels. This is done by adding a `metadata` object to the submit object.
Normally the name of the workflow created from an event is simply the name of the
-template with a timestamp appended. This can be customized by setting the name in the
+template with a time-stamp appended. This can be customized by setting the name in the
`metadata` object.
Annotations and labels are added in the same fashion.
All the values for the name, annotations and labels are treated as expressions (see
-below for details). The `metadata` object is the same `metadata` type as on all
+below for details). The `metadata` object is the same `metadata` type as on all
Kubernetes resources and as such is parsed in the same manner. It is best to enclose
the expression in single quotes to avoid any problems when submitting the event
binding to Kubernetes.
This is an example snippet of how to set the name, annotations and labels. This is
based on the workflow binding from above, and the first event.
+
```yaml
- submit:
- metadata:
- annotations:
- anAnnotation: 'event.payload.message'
- name: 'event.payload.message + "-world"'
- labels:
- someLabel: '"literal string"'
+submit:
+ metadata:
+ annotations:
+ anAnnotation: 'event.payload.message'
+ name: 'event.payload.message + "-world"'
+ labels:
+ someLabel: '"literal string"'
```
+
This will result in the workflow being named "hello-world" instead of
-`my-wf-tmple-`. There will be an extra label with the key "someLabel" and
+`my-wf-tmple-`. There will be an extra label with the key `someLabel` and
a value of "literal string". There will also be an extra annotation with the key
-"anAnnotation" and a value of "hello"
+`anAnnotation` and a value of "hello"
Be careful when setting the name. If the name expression evaluates to that of a currently
existing workflow, the new workflow will fail to submit.
@@ -161,7 +163,7 @@ Because the endpoint accepts any JSON data, it is the user's responsibility to w
The event environment contains:
* `payload` the event payload.
-* `metadata` event metadata, including HTTP headers.
+* `metadata` event meta-data, including HTTP headers.
* `discriminator` the discriminator from the URL.
### Payload
@@ -170,17 +172,17 @@ This is the JSON payload of the event.
Example:
-```
+```text
payload.repository.clone_url == "http://gihub.com/argoproj/argo"
```
-### MetaData
+### Meta-Data
-Metadata is data about the event, this includes **headers**:
+Meta-data is data about the event, this includes **headers**:
#### Headers
-HTTP header names are lowercase and only include those that have `x-` as their prefix. Their values are lists, not single values.
+HTTP header names are lowercase and only include those that have `x-` as their prefix. Their values are lists, not single values.
* Wrong: `metadata["X-Github-Event"] == "push"`
* Wrong: `metadata["x-github-event"] == "push"`
@@ -191,32 +193,32 @@ HTTP header names are lowercase and only include those that have `x-` as their p
Example:
-```
+```text
metadata["x-argo"] == ["yes"]
```
### Discriminator
-This is only for edge-cases where neither the payload, or metadata provide enough information to discriminate. Typically, it should be empty and ignored.
+This is only for edge-cases where neither the payload, or meta-data provide enough information to discriminate. Typically, it should be empty and ignored.
Example:
-```
+```text
discriminator == "my-discriminator"
```
## High-Availability
!!! Warning "Run Minimum 2 Replicas"
- You MUST run a minimum of two Argo Server replicas if you do not want to lose events.
+ You MUST run a minimum of two Argo Server replicas if you do not want to lose events.
If you are processing large numbers of events, you may need to scale up the Argo Server to handle them. By default, a single Argo Server can be processing 64 events before the endpoint will start returning 503 errors.
Vertically you can:
-
+
* Increase the size of the event operation queue `--event-operation-queue-size` (good for temporary event bursts).
* Increase the number of workers `--event-worker-count` (good for sustained numbers of events).
Horizontally you can:
-
+
* Run more Argo Servers (good for sustained numbers of events AND high-availability).
diff --git a/docs/executor_plugins.md b/docs/executor_plugins.md
index 7b3074509ae9..69acb5a729e4 100644
--- a/docs/executor_plugins.md
+++ b/docs/executor_plugins.md
@@ -48,7 +48,7 @@ We need the following:
A template executor plugin services HTTP POST requests on `/api/v1/template.execute`:
-```shell
+```bash
curl http://localhost:4355/api/v1/template.execute -d \
'{
"workflow": {
@@ -174,14 +174,14 @@ spec:
Build and install as follows:
-```shell
+```bash
argo executor-plugin build .
kubectl -n argo apply -f hello-executor-plugin-configmap.yaml
```
Check your controller logs:
-```
+```text
level=info msg="Executor plugin added" name=hello-controller-plugin
```
@@ -264,10 +264,10 @@ Transient errors are retried, all other errors are considered fatal.
Fatal errors will result in failed steps.
-### Requeue
+### Re-Queue
It might be the case that the plugin can't finish straight away. E.g. it starts a long running task. When that happens,
-you return "Pending" or "Running" a and a requeue time:
+you return "Pending" or "Running" a and a re-queue time:
```json
{
@@ -285,7 +285,7 @@ In this example, the task will be re-queued and `template.execute` will be calle
You can find the plugin's log in the agent pod's sidecar, e.g.:
-```shell
+```bash
kubectl -n argo logs ${agentPodName} -c hello-executor-plugin
```
@@ -293,7 +293,7 @@ kubectl -n argo logs ${agentPodName} -c hello-executor-plugin
Because plugins are just config maps, you can list them using `kubectl`:
-```shell
+```bash
kubectl get cm -l workflows.argoproj.io/configmap-type=ExecutorPlugin
```
diff --git a/docs/executor_swagger.md b/docs/executor_swagger.md
index c04999788199..a2a956793656 100644
--- a/docs/executor_swagger.md
+++ b/docs/executor_swagger.md
@@ -81,22 +81,21 @@ ownership management and SELinux relabeling.
| Name | Type | Go type | Required | Default | Description | Example |
|------|------|---------|:--------:| ------- |-------------|---------|
-| fsType | string| `string` | | | Filesystem type of the volume that you want to mount.
+| fsType | string| `string` | | | fsType is the filesystem type of the volume that you want to mount.
Tip: Ensure that the filesystem type is supported by the host operating system.
Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
TODO: how do we prevent errors in the filesystem from compromising the machine
+optional | |
-| partition | int32 (formatted integer)| `int32` | | | The partition in the volume that you want to mount.
+| partition | int32 (formatted integer)| `int32` | | | partition is the partition in the volume that you want to mount.
If omitted, the default is to mount by volume name.
Examples: For volume /dev/sda1, you specify the partition as "1".
Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).
+optional | |
-| readOnly | boolean| `bool` | | | Specify "true" to force and set the ReadOnly property in VolumeMounts to "true".
-If omitted, the default is "false".
+| readOnly | boolean| `bool` | | | readOnly value true will force the readOnly setting in VolumeMounts.
More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+optional | |
-| volumeID | string| `string` | | | Unique ID of the persistent disk resource in AWS (Amazon EBS volume).
+| volumeID | string| `string` | | | volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume).
More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore | |
@@ -201,7 +200,10 @@ It will marshall back to string - marshalling is not symmetric. | |
|------|------|---------|:--------:| ------- |-------------|---------|
| archive | [ArchiveStrategy](#archive-strategy)| `ArchiveStrategy` | | | | |
| archiveLogs | boolean| `bool` | | | ArchiveLogs indicates if the container logs should be archived | |
+| artifactGC | [ArtifactGC](#artifact-g-c)| `ArtifactGC` | | | | |
| artifactory | [ArtifactoryArtifact](#artifactory-artifact)| `ArtifactoryArtifact` | | | | |
+| azure | [AzureArtifact](#azure-artifact)| `AzureArtifact` | | | | |
+| deleted | boolean| `bool` | | | Has this been deleted? | |
| from | string| `string` | | | From allows an artifact to reference an artifact from a previous step | |
| fromExpression | string| `string` | | | FromExpression, if defined, is evaluated to specify the value for the artifact | |
| gcs | [GCSArtifact](#g-c-s-artifact)| `GCSArtifact` | | | | |
@@ -223,6 +225,37 @@ set when loading input artifacts. | |
+### ArtifactGC
+
+
+> ArtifactGC describes how to delete artifacts from completed Workflows
+
+
+
+
+
+
+**Properties**
+
+| Name | Type | Go type | Required | Default | Description | Example |
+|------|------|---------|:--------:| ------- |-------------|---------|
+| podMetadata | [Metadata](#metadata)| `Metadata` | | | | |
+| serviceAccountName | string| `string` | | | ServiceAccountName is an optional field for specifying the Service Account that should be assigned to the Pod doing the deletion | |
+| strategy | [ArtifactGCStrategy](#artifact-g-c-strategy)| `ArtifactGCStrategy` | | | | |
+
+
+
+### ArtifactGCStrategy
+
+
+
+
+| Name | Type | Go type | Default | Description | Example |
+|------|------|---------| ------- |-------------|---------|
+| ArtifactGCStrategy | string| string | | | |
+
+
+
### ArtifactLocation
@@ -241,6 +274,7 @@ of a single workflow step, which the executor will use as a default location to
|------|------|---------|:--------:| ------- |-------------|---------|
| archiveLogs | boolean| `bool` | | | ArchiveLogs indicates if the container logs should be archived | |
| artifactory | [ArtifactoryArtifact](#artifactory-artifact)| `ArtifactoryArtifact` | | | | |
+| azure | [AzureArtifact](#azure-artifact)| `AzureArtifact` | | | | |
| gcs | [GCSArtifact](#g-c-s-artifact)| `GCSArtifact` | | | | |
| git | [GitArtifact](#git-artifact)| `GitArtifact` | | | | |
| hdfs | [HDFSArtifact](#h-d-f-s-artifact)| `HDFSArtifact` | | | | |
@@ -267,7 +301,10 @@ of a single workflow step, which the executor will use as a default location to
|------|------|---------|:--------:| ------- |-------------|---------|
| archive | [ArchiveStrategy](#archive-strategy)| `ArchiveStrategy` | | | | |
| archiveLogs | boolean| `bool` | | | ArchiveLogs indicates if the container logs should be archived | |
+| artifactGC | [ArtifactGC](#artifact-g-c)| `ArtifactGC` | | | | |
| artifactory | [ArtifactoryArtifact](#artifactory-artifact)| `ArtifactoryArtifact` | | | | |
+| azure | [AzureArtifact](#azure-artifact)| `AzureArtifact` | | | | |
+| deleted | boolean| `bool` | | | Has this been deleted? | |
| from | string| `string` | | | From allows an artifact to reference an artifact from a previous step | |
| fromExpression | string| `string` | | | FromExpression, if defined, is evaluated to specify the value for the artifact | |
| gcs | [GCSArtifact](#g-c-s-artifact)| `GCSArtifact` | | | | |
@@ -316,25 +353,53 @@ set when loading input artifacts. | |
[][Artifact](#artifact)
+### AzureArtifact
+
+
+> AzureArtifact is the location of a an Azure Storage artifact
+
+
+
+
+
+
+**Properties**
+
+| Name | Type | Go type | Required | Default | Description | Example |
+|------|------|---------|:--------:| ------- |-------------|---------|
+| accountKeySecret | [SecretKeySelector](#secret-key-selector)| `SecretKeySelector` | | | | |
+| blob | string| `string` | | | Blob is the blob name (i.e., path) in the container where the artifact resides | |
+| container | string| `string` | | | Container is the container where resources will be stored | |
+| endpoint | string| `string` | | | Endpoint is the service url associated with an account. It is most likely "https://.blob.core.windows.net" | |
+| useSDKCreds | boolean| `bool` | | | UseSDKCreds tells the driver to figure out credentials based on sdk defaults. | |
+
+
+
### AzureDataDiskCachingMode
+> +enum
+
+
| Name | Type | Go type | Default | Description | Example |
|------|------|---------| ------- |-------------|---------|
-| AzureDataDiskCachingMode | string| string | | | |
+| AzureDataDiskCachingMode | string| string | | +enum | |
### AzureDataDiskKind
+> +enum
+
+
| Name | Type | Go type | Default | Description | Example |
|------|------|---------| ------- |-------------|---------|
-| AzureDataDiskKind | string| string | | | |
+| AzureDataDiskKind | string| string | | +enum | |
@@ -350,14 +415,14 @@ set when loading input artifacts. | |
| Name | Type | Go type | Required | Default | Description | Example |
|------|------|---------|:--------:| ------- |-------------|---------|
| cachingMode | [AzureDataDiskCachingMode](#azure-data-disk-caching-mode)| `AzureDataDiskCachingMode` | | | | |
-| diskName | string| `string` | | | The Name of the data disk in the blob storage | |
-| diskURI | string| `string` | | | The URI the data disk in the blob storage | |
-| fsType | string| `string` | | | Filesystem type to mount.
+| diskName | string| `string` | | | diskName is the Name of the data disk in the blob storage | |
+| diskURI | string| `string` | | | diskURI is the URI of data disk in the blob storage | |
+| fsType | string| `string` | | | fsType is Filesystem type to mount.
Must be a filesystem type supported by the host operating system.
Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+optional | |
| kind | [AzureDataDiskKind](#azure-data-disk-kind)| `AzureDataDiskKind` | | | | |
-| readOnly | boolean| `bool` | | | Defaults to false (read/write). ReadOnly here will force
+| readOnly | boolean| `bool` | | | readOnly Defaults to false (read/write). ReadOnly here will force
the ReadOnly setting in VolumeMounts.
+optional | |
@@ -374,11 +439,11 @@ the ReadOnly setting in VolumeMounts.
| Name | Type | Go type | Required | Default | Description | Example |
|------|------|---------|:--------:| ------- |-------------|---------|
-| readOnly | boolean| `bool` | | | Defaults to false (read/write). ReadOnly here will force
+| readOnly | boolean| `bool` | | | readOnly defaults to false (read/write). ReadOnly here will force
the ReadOnly setting in VolumeMounts.
+optional | |
-| secretName | string| `string` | | | the name of secret that contains Azure Storage Account Name and Key | |
-| shareName | string| `string` | | | Share Name | |
+| secretName | string| `string` | | | secretName is the name of secret that contains Azure Storage Account Name and Key | |
+| shareName | string| `string` | | | shareName is the azure share Name | |
@@ -402,6 +467,25 @@ the ReadOnly setting in VolumeMounts.
+### BasicAuth
+
+
+> BasicAuth describes the secret selectors required for basic authentication
+
+
+
+
+
+
+**Properties**
+
+| Name | Type | Go type | Required | Default | Description | Example |
+|------|------|---------|:--------:| ------- |-------------|---------|
+| passwordSecret | [SecretKeySelector](#secret-key-selector)| `SecretKeySelector` | | | | |
+| usernameSecret | [SecretKeySelector](#secret-key-selector)| `SecretKeySelector` | | | | |
+
+
+
### CSIVolumeSource
@@ -416,17 +500,17 @@ the ReadOnly setting in VolumeMounts.
| Name | Type | Go type | Required | Default | Description | Example |
|------|------|---------|:--------:| ------- |-------------|---------|
-| driver | string| `string` | | | Driver is the name of the CSI driver that handles this volume.
+| driver | string| `string` | | | driver is the name of the CSI driver that handles this volume.
Consult with your admin for the correct name as registered in the cluster. | |
-| fsType | string| `string` | | | Filesystem type to mount. Ex. "ext4", "xfs", "ntfs".
+| fsType | string| `string` | | | fsType to mount. Ex. "ext4", "xfs", "ntfs".
If not provided, the empty value is passed to the associated CSI driver
which will determine the default filesystem to apply.
+optional | |
| nodePublishSecretRef | [LocalObjectReference](#local-object-reference)| `LocalObjectReference` | | | | |
-| readOnly | boolean| `bool` | | | Specifies a read-only configuration for the volume.
+| readOnly | boolean| `bool` | | | readOnly specifies a read-only configuration for the volume.
Defaults to false (read/write).
+optional | |
-| volumeAttributes | map of string| `map[string]string` | | | VolumeAttributes stores driver-specific properties that are passed to the CSI
+| volumeAttributes | map of string| `map[string]string` | | | volumeAttributes stores driver-specific properties that are passed to the CSI
driver. Consult your driver's documentation for supported values.
+optional | |
@@ -497,19 +581,19 @@ Cephfs volumes do not support ownership management or SELinux relabeling.
| Name | Type | Go type | Required | Default | Description | Example |
|------|------|---------|:--------:| ------- |-------------|---------|
-| monitors | []string| `[]string` | | | Required: Monitors is a collection of Ceph monitors
+| monitors | []string| `[]string` | | | monitors is Required: Monitors is a collection of Ceph monitors
More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it | |
-| path | string| `string` | | | Optional: Used as the mounted root, rather than the full Ceph tree, default is /
+| path | string| `string` | | | path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /
+optional | |
-| readOnly | boolean| `bool` | | | Optional: Defaults to false (read/write). ReadOnly here will force
+| readOnly | boolean| `bool` | | | readOnly is Optional: Defaults to false (read/write). ReadOnly here will force
the ReadOnly setting in VolumeMounts.
More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+optional | |
-| secretFile | string| `string` | | | Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret
+| secretFile | string| `string` | | | secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret
More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+optional | |
| secretRef | [LocalObjectReference](#local-object-reference)| `LocalObjectReference` | | | | |
-| user | string| `string` | | | Optional: User is the rados user name, default is admin
+| user | string| `string` | | | user is optional: User is the rados user name, default is admin
More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+optional | |
@@ -531,21 +615,40 @@ Cinder volumes support ownership management and SELinux relabeling.
| Name | Type | Go type | Required | Default | Description | Example |
|------|------|---------|:--------:| ------- |-------------|---------|
-| fsType | string| `string` | | | Filesystem type to mount.
+| fsType | string| `string` | | | fsType is the filesystem type to mount.
Must be a filesystem type supported by the host operating system.
Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+optional | |
-| readOnly | boolean| `bool` | | | Optional: Defaults to false (read/write). ReadOnly here will force
+| readOnly | boolean| `bool` | | | readOnly defaults to false (read/write). ReadOnly here will force
the ReadOnly setting in VolumeMounts.
More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+optional | |
| secretRef | [LocalObjectReference](#local-object-reference)| `LocalObjectReference` | | | | |
-| volumeID | string| `string` | | | volume id used to identify the volume in cinder.
+| volumeID | string| `string` | | | volumeID used to identify the volume in cinder.
More info: https://examples.k8s.io/mysql-cinder-pd/README.md | |
+### ClientCertAuth
+
+
+> ClientCertAuth holds necessary information for client authentication via certificates
+
+
+
+
+
+
+**Properties**
+
+| Name | Type | Go type | Required | Default | Description | Example |
+|------|------|---------|:--------:| ------- |-------------|---------|
+| clientCertSecret | [SecretKeySelector](#secret-key-selector)| `SecretKeySelector` | | | | |
+| clientKeySecret | [SecretKeySelector](#secret-key-selector)| `SecretKeySelector` | | | | |
+
+
+
### ConfigMapEnvSource
@@ -573,10 +676,13 @@ TODO: Add other useful fields. apiVersion, kind, uid?
### ConfigMapKeySelector
+> +structType=atomic
+
+
**Properties**
| Name | Type | Go type | Required | Default | Description | Example |
@@ -609,7 +715,7 @@ mode.
| Name | Type | Go type | Required | Default | Description | Example |
|------|------|---------|:--------:| ------- |-------------|---------|
-| items | [][KeyToPath](#key-to-path)| `[]*KeyToPath` | | | If unspecified, each key-value pair in the Data field of the referenced
+| items | [][KeyToPath](#key-to-path)| `[]*KeyToPath` | | | items if unspecified, each key-value pair in the Data field of the referenced
ConfigMap will be projected into the volume as a file whose name is the
key and content is the value. If specified, the listed keys will be
projected into the specified paths, and unlisted keys will not be
@@ -621,7 +727,7 @@ relative and may not contain the '..' path or start with '..'.
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?
+optional | |
-| optional | boolean| `bool` | | | Specify whether the ConfigMap or its keys must be defined
+| optional | boolean| `bool` | | | optional specify whether the ConfigMap or its keys must be defined
+optional | |
@@ -643,7 +749,7 @@ ConfigMap volumes support ownership management and SELinux relabeling.
| Name | Type | Go type | Required | Default | Description | Example |
|------|------|---------|:--------:| ------- |-------------|---------|
-| defaultMode | int32 (formatted integer)| `int32` | | | Optional: mode bits used to set permissions on created files by default.
+| defaultMode | int32 (formatted integer)| `int32` | | | defaultMode is optional: mode bits used to set permissions on created files by default.
Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
Defaults to 0644.
@@ -651,7 +757,7 @@ Directories within the path are not affected by this setting.
This might be in conflict with other options that affect the file
mode, like fsGroup, and the result can be other mode bits set.
+optional | |
-| items | [][KeyToPath](#key-to-path)| `[]*KeyToPath` | | | If unspecified, each key-value pair in the Data field of the referenced
+| items | [][KeyToPath](#key-to-path)| `[]*KeyToPath` | | | items if unspecified, each key-value pair in the Data field of the referenced
ConfigMap will be projected into the volume as a file whose name is the
key and content is the value. If specified, the listed keys will be
projected into the specified paths, and unlisted keys will not be
@@ -663,7 +769,7 @@ relative and may not contain the '..' path or start with '..'.
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?
+optional | |
-| optional | boolean| `bool` | | | Specify whether the ConfigMap or its keys must be defined
+| optional | boolean| `bool` | | | optional specify whether the ConfigMap or its keys must be defined
+optional | |
@@ -680,21 +786,21 @@ TODO: Add other useful fields. apiVersion, kind, uid?
| Name | Type | Go type | Required | Default | Description | Example |
|------|------|---------|:--------:| ------- |-------------|---------|
| args | []string| `[]string` | | | Arguments to the entrypoint.
-The docker image's CMD is used if this is not provided.
+The container image's CMD is used if this is not provided.
Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
-cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax
-can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded,
-regardless of whether the variable exists or not.
-Cannot be updated.
+cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
+to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
+produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
+of whether the variable exists or not. Cannot be updated.
More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
+optional | |
| command | []string| `[]string` | | | Entrypoint array. Not executed within a shell.
-The docker image's ENTRYPOINT is used if this is not provided.
+The container image's ENTRYPOINT is used if this is not provided.
Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
-cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax
-can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded,
-regardless of whether the variable exists or not.
-Cannot be updated.
+cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
+to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
+produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
+of whether the variable exists or not. Cannot be updated.
More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
+optional | |
| env | [][EnvVar](#env-var)| `[]*EnvVar` | | | List of environment variables to set in the container.
@@ -709,7 +815,7 @@ sources, the value associated with the last source will take precedence.
Values defined by an Env with a duplicate key will take precedence.
Cannot be updated.
+optional | |
-| image | string| `string` | | | Docker image name.
+| image | string| `string` | | | Container image name.
More info: https://kubernetes.io/docs/concepts/containers/images
This field is optional to allow higher level config management to default or override
container images in workload controllers like Deployments and StatefulSets.
@@ -790,21 +896,21 @@ Cannot be updated.
| Name | Type | Go type | Required | Default | Description | Example |
|------|------|---------|:--------:| ------- |-------------|---------|
| args | []string| `[]string` | | | Arguments to the entrypoint.
-The docker image's CMD is used if this is not provided.
+The container image's CMD is used if this is not provided.
Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
-cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax
-can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded,
-regardless of whether the variable exists or not.
-Cannot be updated.
+cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
+to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
+produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
+of whether the variable exists or not. Cannot be updated.
More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
+optional | |
| command | []string| `[]string` | | | Entrypoint array. Not executed within a shell.
-The docker image's ENTRYPOINT is used if this is not provided.
+The container image's ENTRYPOINT is used if this is not provided.
Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
-cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax
-can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded,
-regardless of whether the variable exists or not.
-Cannot be updated.
+cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
+to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
+produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
+of whether the variable exists or not. Cannot be updated.
More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
+optional | |
| dependencies | []string| `[]string` | | | | |
@@ -820,7 +926,7 @@ sources, the value associated with the last source will take precedence.
Values defined by an Env with a duplicate key will take precedence.
Cannot be updated.
+optional | |
-| image | string| `string` | | | Docker image name.
+| image | string| `string` | | | Container image name.
More info: https://kubernetes.io/docs/concepts/containers/images
This field is optional to allow higher level config management to default or override
container images in workload controllers like Deployments and StatefulSets.
@@ -917,6 +1023,23 @@ referred to by services.
+### ContainerSetRetryStrategy
+
+
+
+
+
+
+**Properties**
+
+| Name | Type | Go type | Required | Default | Description | Example |
+|------|------|---------|:--------:| ------- |-------------|---------|
+| duration | string| `string` | | | Duration is the time between each retry, examples values are "300ms", "1s" or "5m".
+Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". | |
+| retries | [IntOrString](#int-or-string)| `IntOrString` | | | | |
+
+
+
### ContainerSetTemplate
@@ -929,6 +1052,7 @@ referred to by services.
| Name | Type | Go type | Required | Default | Description | Example |
|------|------|---------|:--------:| ------- |-------------|---------|
| containers | [][ContainerNode](#container-node)| `[]*ContainerNode` | | | | |
+| retryStrategy | [ContainerSetRetryStrategy](#container-set-retry-strategy)| `ContainerSetRetryStrategy` | | | | |
| volumeMounts | [][VolumeMount](#volume-mount)| `[]*VolumeMount` | | | | |
@@ -1227,11 +1351,12 @@ can be used as map keys in json.
|------|------|---------|:--------:| ------- |-------------|---------|
| name | string| `string` | | | Name of the environment variable. Must be a C_IDENTIFIER. | |
| value | string| `string` | | | Variable references $(VAR_NAME) are expanded
-using the previous defined environment variables in the container and
+using the previously defined environment variables in the container and
any service environment variables. If a variable cannot be resolved,
-the reference in the input string will be unchanged. The $(VAR_NAME)
-syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped
-references will never be expanded, regardless of whether the variable
+the reference in the input string will be unchanged. Double $$ are reduced
+to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e.
+"$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".
+Escaped references will never be expanded, regardless of whether the variable
exists or not.
Defaults to "".
+optional | |
@@ -1354,19 +1479,19 @@ Fibre Channel volumes support ownership management and SELinux relabeling.
| Name | Type | Go type | Required | Default | Description | Example |
|------|------|---------|:--------:| ------- |-------------|---------|
-| fsType | string| `string` | | | Filesystem type to mount.
+| fsType | string| `string` | | | fsType is the filesystem type to mount.
Must be a filesystem type supported by the host operating system.
Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
TODO: how do we prevent errors in the filesystem from compromising the machine
+optional | |
-| lun | int32 (formatted integer)| `int32` | | | Optional: FC target lun number
+| lun | int32 (formatted integer)| `int32` | | | lun is Optional: FC target lun number
+optional | |
-| readOnly | boolean| `bool` | | | Optional: Defaults to false (read/write). ReadOnly here will force
+| readOnly | boolean| `bool` | | | readOnly is Optional: Defaults to false (read/write). ReadOnly here will force
the ReadOnly setting in VolumeMounts.
+optional | |
-| targetWWNs | []string| `[]string` | | | Optional: FC target worldwide names (WWNs)
+| targetWWNs | []string| `[]string` | | | targetWWNs is Optional: FC target worldwide names (WWNs)
+optional | |
-| wwids | []string| `[]string` | | | Optional: FC volume world wide identifiers (wwids)
+| wwids | []string| `[]string` | | | wwids Optional: FC volume world wide identifiers (wwids)
Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.
+optional | |
@@ -1406,14 +1531,14 @@ provisioned/attached using an exec based plugin.
| Name | Type | Go type | Required | Default | Description | Example |
|------|------|---------|:--------:| ------- |-------------|---------|
-| driver | string| `string` | | | Driver is the name of the driver to use for this volume. | |
-| fsType | string| `string` | | | Filesystem type to mount.
+| driver | string| `string` | | | driver is the name of the driver to use for this volume. | |
+| fsType | string| `string` | | | fsType is the filesystem type to mount.
Must be a filesystem type supported by the host operating system.
Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script.
+optional | |
-| options | map of string| `map[string]string` | | | Optional: Extra command options if any.
+| options | map of string| `map[string]string` | | | options is Optional: this field holds extra command options if any.
+optional | |
-| readOnly | boolean| `bool` | | | Optional: Defaults to false (read/write). ReadOnly here will force
+| readOnly | boolean| `bool` | | | readOnly is Optional: defaults to false (read/write). ReadOnly here will force
the ReadOnly setting in VolumeMounts.
+optional | |
| secretRef | [LocalObjectReference](#local-object-reference)| `LocalObjectReference` | | | | |
@@ -1435,10 +1560,10 @@ Flocker volumes do not support ownership management or SELinux relabeling.
| Name | Type | Go type | Required | Default | Description | Example |
|------|------|---------|:--------:| ------- |-------------|---------|
-| datasetName | string| `string` | | | Name of the dataset stored as metadata -> name on the dataset for Flocker
+| datasetName | string| `string` | | | datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker
should be considered as deprecated
+optional | |
-| datasetUUID | string| `string` | | | UUID of the dataset. This is unique identifier of a Flocker dataset
+| datasetUUID | string| `string` | | | datasetUUID is the UUID of the dataset. This is unique identifier of a Flocker dataset
+optional | |
@@ -1460,21 +1585,21 @@ PDs support ownership management and SELinux relabeling.
| Name | Type | Go type | Required | Default | Description | Example |
|------|------|---------|:--------:| ------- |-------------|---------|
-| fsType | string| `string` | | | Filesystem type of the volume that you want to mount.
+| fsType | string| `string` | | | fsType is filesystem type of the volume that you want to mount.
Tip: Ensure that the filesystem type is supported by the host operating system.
Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
TODO: how do we prevent errors in the filesystem from compromising the machine
+optional | |
-| partition | int32 (formatted integer)| `int32` | | | The partition in the volume that you want to mount.
+| partition | int32 (formatted integer)| `int32` | | | partition is the partition in the volume that you want to mount.
If omitted, the default is to mount by volume name.
Examples: For volume /dev/sda1, you specify the partition as "1".
Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).
More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+optional | |
-| pdName | string| `string` | | | Unique name of the PD resource in GCE. Used to identify the disk in GCE.
+| pdName | string| `string` | | | pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE.
More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk | |
-| readOnly | boolean| `bool` | | | ReadOnly here will force the ReadOnly setting in VolumeMounts.
+| readOnly | boolean| `bool` | | | readOnly here will force the ReadOnly setting in VolumeMounts.
Defaults to false.
More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+optional | |
@@ -1501,6 +1626,27 @@ More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+### GRPCAction
+
+
+
+
+
+
+**Properties**
+
+| Name | Type | Go type | Required | Default | Description | Example |
+|------|------|---------|:--------:| ------- |-------------|---------|
+| port | int32 (formatted integer)| `int32` | | | Port number of the gRPC service. Number must be in the range 1 to 65535. | |
+| service | string| `string` | | | Service is the name of the service to place in the gRPC HealthCheckRequest
+(see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
+
+If this is not specified, the default behavior is defined by gRPC.
++optional
++default="" | |
+
+
+
### Gauge
@@ -1534,6 +1680,7 @@ More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
| Name | Type | Go type | Required | Default | Description | Example |
|------|------|---------|:--------:| ------- |-------------|---------|
+| branch | string| `string` | | | Branch is the branch to fetch when `SingleBranch` is enabled | |
| depth | uint64 (formatted integer)| `uint64` | | | Depth specifies clones/fetches should be shallow and include the given
number of commits from the branch tip | |
| disableSubmodules | boolean| `bool` | | | DisableSubmodules disables submodules during git clone | |
@@ -1542,6 +1689,7 @@ number of commits from the branch tip | |
| passwordSecret | [SecretKeySelector](#secret-key-selector)| `SecretKeySelector` | | | | |
| repo | string| `string` | | | Repo is the git repository | |
| revision | string| `string` | | | Revision is the git commit, tag, branch to checkout | |
+| singleBranch | boolean| `bool` | | | SingleBranch enables single branch clone, using the `branch` parameter | |
| sshPrivateKeySecret | [SecretKeySelector](#secret-key-selector)| `SecretKeySelector` | | | | |
| usernameSecret | [SecretKeySelector](#secret-key-selector)| `SecretKeySelector` | | | | |
@@ -1563,13 +1711,13 @@ into the Pod's container.
| Name | Type | Go type | Required | Default | Description | Example |
|------|------|---------|:--------:| ------- |-------------|---------|
-| directory | string| `string` | | | Target directory name.
+| directory | string| `string` | | | directory is the target directory name.
Must not contain or start with '..'. If '.' is supplied, the volume directory will be the
git repository. Otherwise, if specified, the volume will contain the git repository in
the subdirectory with the given name.
+optional | |
-| repository | string| `string` | | | Repository URL | |
-| revision | string| `string` | | | Commit hash for the specified revision.
+| repository | string| `string` | | | repository is the URL | |
+| revision | string| `string` | | | revision is the commit hash for the specified revision.
+optional | |
@@ -1588,11 +1736,11 @@ the subdirectory with the given name.
| Name | Type | Go type | Required | Default | Description | Example |
|------|------|---------|:--------:| ------- |-------------|---------|
-| endpoints | string| `string` | | | EndpointsName is the endpoint name that details Glusterfs topology.
+| endpoints | string| `string` | | | endpoints is the endpoint name that details Glusterfs topology.
More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod | |
-| path | string| `string` | | | Path is the Glusterfs volume path.
+| path | string| `string` | | | path is the Glusterfs volume path.
More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod | |
-| readOnly | boolean| `bool` | | | ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions.
+| readOnly | boolean| `bool` | | | readOnly here will force the Glusterfs volume to be mounted with read-only permissions.
Defaults to false.
More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
+optional | |
@@ -1642,8 +1790,11 @@ It must be set if keytab is used. | |
| Name | Type | Go type | Required | Default | Description | Example |
|------|------|---------|:--------:| ------- |-------------|---------|
| body | string| `string` | | | Body is content of the HTTP Request | |
-| headers | [][HTTPHeader](#http-header)| `[]*HTTPHeader` | | | Headers are an optional list of headers to send with HTTP requests | |
+| bodyFrom | [HTTPBodySource](#http-body-source)| `HTTPBodySource` | | | | |
+| headers | [HTTPHeaders](#http-headers)| `HTTPHeaders` | | | | |
+| insecureSkipVerify | boolean| `bool` | | | InsecureSkipVerify is a bool when if set to true will skip TLS verification for the HTTP client | |
| method | string| `string` | | | Method is HTTP methods for HTTP Request | |
+| successCondition | string| `string` | | | SuccessCondition is an expression if evaluated to true is considered successful | |
| timeoutSeconds | int64 (formatted integer)| `int64` | | | TimeoutSeconds is request timeout for HTTP Request. Default is 30 seconds | |
| url | string| `string` | | | URL of the HTTP Request | |
@@ -1652,7 +1803,7 @@ It must be set if keytab is used. | |
### HTTPArtifact
-> HTTPArtifact allows an file served on HTTP to be placed as an input artifact in a container
+> HTTPArtifact allows a file served on HTTP to be placed as an input artifact in a container
@@ -1663,11 +1814,44 @@ It must be set if keytab is used. | |
| Name | Type | Go type | Required | Default | Description | Example |
|------|------|---------|:--------:| ------- |-------------|---------|
+| auth | [HTTPAuth](#http-auth)| `HTTPAuth` | | | | |
| headers | [][Header](#header)| `[]*Header` | | | Headers are an optional list of headers to send with HTTP requests for artifacts | |
| url | string| `string` | | | URL of the artifact | |
+### HTTPAuth
+
+
+
+
+
+
+**Properties**
+
+| Name | Type | Go type | Required | Default | Description | Example |
+|------|------|---------|:--------:| ------- |-------------|---------|
+| basicAuth | [BasicAuth](#basic-auth)| `BasicAuth` | | | | |
+| clientCert | [ClientCertAuth](#client-cert-auth)| `ClientCertAuth` | | | | |
+| oauth2 | [OAuth2Auth](#o-auth2-auth)| `OAuth2Auth` | | | | |
+
+
+
+### HTTPBodySource
+
+
+
+
+
+
+**Properties**
+
+| Name | Type | Go type | Required | Default | Description | Example |
+|------|------|---------|:--------:| ------- |-------------|---------|
+| bytes | []uint8 (formatted integer)| `[]uint8` | | | | |
+
+
+
### HTTPGetAction
@@ -1723,26 +1907,12 @@ It must be set if keytab is used. | |
-### Handler
+### HTTPHeaders
-> Handler defines a specific action that should be taken
-TODO: pass structured data to these actions, and document that data here.
-
-
-
-
-**Properties**
-
-| Name | Type | Go type | Required | Default | Description | Example |
-|------|------|---------|:--------:| ------- |-------------|---------|
-| exec | [ExecAction](#exec-action)| `ExecAction` | | | | |
-| httpGet | [HTTPGetAction](#http-get-action)| `HTTPGetAction` | | | | |
-| tcpSocket | [TCPSocketAction](#tcp-socket-action)| `TCPSocketAction` | | | | |
-
-
+[][HTTPHeader](#http-header)
### Header
@@ -1805,11 +1975,14 @@ pod's hosts file.
### HostPathType
+> +enum
+
+
| Name | Type | Go type | Default | Description | Example |
|------|------|---------| ------- |-------------|---------|
-| HostPathType | string| string | | | |
+| HostPathType | string| string | | +enum | |
@@ -1827,7 +2000,7 @@ pod's hosts file.
| Name | Type | Go type | Required | Default | Description | Example |
|------|------|---------|:--------:| ------- |-------------|---------|
-| path | string| `string` | | | Path of the directory on the host.
+| path | string| `string` | | | path of the directory on the host.
If the path is a symlink, it will follow the link to the real path.
More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath | |
| type | [HostPathType](#host-path-type)| `HostPathType` | | | | |
@@ -1849,33 +2022,33 @@ ISCSI volumes support ownership management and SELinux relabeling.
| Name | Type | Go type | Required | Default | Description | Example |
|------|------|---------|:--------:| ------- |-------------|---------|
-| chapAuthDiscovery | boolean| `bool` | | | whether support iSCSI Discovery CHAP authentication
+| chapAuthDiscovery | boolean| `bool` | | | chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication
+optional | |
-| chapAuthSession | boolean| `bool` | | | whether support iSCSI Session CHAP authentication
+| chapAuthSession | boolean| `bool` | | | chapAuthSession defines whether support iSCSI Session CHAP authentication
+optional | |
-| fsType | string| `string` | | | Filesystem type of the volume that you want to mount.
+| fsType | string| `string` | | | fsType is the filesystem type of the volume that you want to mount.
Tip: Ensure that the filesystem type is supported by the host operating system.
Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi
TODO: how do we prevent errors in the filesystem from compromising the machine
+optional | |
-| initiatorName | string| `string` | | | Custom iSCSI Initiator Name.
+| initiatorName | string| `string` | | | initiatorName is the custom iSCSI Initiator Name.
If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface
: will be created for the connection.
+optional | |
-| iqn | string| `string` | | | Target iSCSI Qualified Name. | |
-| iscsiInterface | string| `string` | | | iSCSI Interface Name that uses an iSCSI transport.
+| iqn | string| `string` | | | iqn is the target iSCSI Qualified Name. | |
+| iscsiInterface | string| `string` | | | iscsiInterface is the interface Name that uses an iSCSI transport.
Defaults to 'default' (tcp).
+optional | |
-| lun | int32 (formatted integer)| `int32` | | | iSCSI Target Lun number. | |
-| portals | []string| `[]string` | | | iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port
+| lun | int32 (formatted integer)| `int32` | | | lun represents iSCSI Target Lun number. | |
+| portals | []string| `[]string` | | | portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port
is other than default (typically TCP ports 860 and 3260).
+optional | |
-| readOnly | boolean| `bool` | | | ReadOnly here will force the ReadOnly setting in VolumeMounts.
+| readOnly | boolean| `bool` | | | readOnly here will force the ReadOnly setting in VolumeMounts.
Defaults to false.
+optional | |
| secretRef | [LocalObjectReference](#local-object-reference)| `LocalObjectReference` | | | | |
-| targetPortal | string| `string` | | | iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port
+| targetPortal | string| `string` | | | targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port
is other than default (typically TCP ports 860 and 3260). | |
@@ -1945,15 +2118,15 @@ is other than default (typically TCP ports 860 and 3260). | |
| Name | Type | Go type | Required | Default | Description | Example |
|------|------|---------|:--------:| ------- |-------------|---------|
-| key | string| `string` | | | The key to project. | |
-| mode | int32 (formatted integer)| `int32` | | | Optional: mode bits used to set permissions on this file.
+| key | string| `string` | | | key is the key to project. | |
+| mode | int32 (formatted integer)| `int32` | | | mode is Optional: mode bits used to set permissions on this file.
Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
If not specified, the volume defaultMode will be used.
This might be in conflict with other options that affect the file
mode, like fsGroup, and the result can be other mode bits set.
+optional | |
-| path | string| `string` | | | The relative path of the file to map the key to.
+| path | string| `string` | | | path is the relative path of the file to map the key to.
May not be an absolute path.
May not contain the path element '..'.
May not start with the string '..'. | |
@@ -2040,8 +2213,29 @@ until the action is complete, unless the container process fails, in which case
| Name | Type | Go type | Required | Default | Description | Example |
|------|------|---------|:--------:| ------- |-------------|---------|
-| postStart | [Handler](#handler)| `Handler` | | | | |
-| preStop | [Handler](#handler)| `Handler` | | | | |
+| postStart | [LifecycleHandler](#lifecycle-handler)| `LifecycleHandler` | | | | |
+| preStop | [LifecycleHandler](#lifecycle-handler)| `LifecycleHandler` | | | | |
+
+
+
+### LifecycleHandler
+
+
+> LifecycleHandler defines a specific action that should be taken in a lifecycle
+hook. One and only one of the fields, except TCPSocket must be specified.
+
+
+
+
+
+
+**Properties**
+
+| Name | Type | Go type | Required | Default | Description | Example |
+|------|------|---------|:--------:| ------- |-------------|---------|
+| exec | [ExecAction](#exec-action)| `ExecAction` | | | | |
+| httpGet | [HTTPGetAction](#http-get-action)| `HTTPGetAction` | | | | |
+| tcpSocket | [TCPSocketAction](#tcp-socket-action)| `TCPSocketAction` | | | | |
@@ -2057,7 +2251,10 @@ until the action is complete, unless the container process fails, in which case
| Name | Type | Go type | Required | Default | Description | Example |
|------|------|---------|:--------:| ------- |-------------|---------|
| arguments | [Arguments](#arguments)| `Arguments` | | | | |
-| template | string| `string` | | | | |
+| expression | string| `string` | | | Expression is a condition expression for when a node will be retried. If it evaluates to false, the node will not
+be retried and the retry strategy will be ignored | |
+| template | string| `string` | | | Template is the name of the template to execute by the hook | |
+| templateRef | [TemplateRef](#template-ref)| `TemplateRef` | | | | |
@@ -2073,6 +2270,7 @@ until the action is complete, unless the container process fails, in which case
> LocalObjectReference contains enough information to let you locate the
referenced object inside the same namespace.
++structType=atomic
@@ -2114,6 +2312,13 @@ There is currently only one possible value: "FieldsV1" | |
| fieldsV1 | [FieldsV1](#fields-v1)| `FieldsV1` | | | | |
| manager | string| `string` | | | Manager is an identifier of the workflow managing these fields. | |
| operation | [ManagedFieldsOperationType](#managed-fields-operation-type)| `ManagedFieldsOperationType` | | | | |
+| subresource | string| `string` | | | Subresource is the name of the subresource used to update that object, or
+empty string if the object was updated through the main resource. The
+value of this field is used to distinguish between managers, even if they
+share the same name. For example, a status update will be distinct from a
+regular update using the same manager name.
+Note that the APIVersion field is not related to the Subresource field and
+it always corresponds to the version of the main resource. | |
| time | [Time](#time)| `Time` | | | | |
@@ -2129,6 +2334,21 @@ There is currently only one possible value: "FieldsV1" | |
+### ManifestFrom
+
+
+
+
+
+
+**Properties**
+
+| Name | Type | Go type | Required | Default | Description | Example |
+|------|------|---------|:--------:| ------- |-------------|---------|
+| artifact | [Artifact](#artifact)| `Artifact` | | | | |
+
+
+
### Memoize
@@ -2209,11 +2429,14 @@ than the MaxAge, it will be ignored. | |
### MountPropagationMode
+> +enum
+
+
| Name | Type | Go type | Default | Description | Example |
|------|------|---------| ------- |-------------|---------|
-| MountPropagationMode | string| string | | | |
+| MountPropagationMode | string| string | | +enum | |
@@ -2249,14 +2472,13 @@ than the MaxAge, it will be ignored. | |
| Name | Type | Go type | Required | Default | Description | Example |
|------|------|---------|:--------:| ------- |-------------|---------|
-| path | string| `string` | | | Path that is exported by the NFS server.
+| path | string| `string` | | | path that is exported by the NFS server.
More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs | |
-| readOnly | boolean| `bool` | | | ReadOnly here will force
-the NFS export to be mounted with read-only permissions.
+| readOnly | boolean| `bool` | | | readOnly here will force the NFS export to be mounted with read-only permissions.
Defaults to false.
More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+optional | |
-| server | string| `string` | | | Server is the hostname or IP address of the NFS server.
+| server | string| `string` | | | server is the hostname or IP address of the NFS server.
More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs | |
@@ -2311,6 +2533,7 @@ node(s) with the highest sum are the most preferred.
| message | string| `string` | | | | |
| outputs | [Outputs](#outputs)| `Outputs` | | | | |
| phase | [NodePhase](#node-phase)| `NodePhase` | | | | |
+| progress | [Progress](#progress)| `Progress` | | | | |
@@ -2320,6 +2543,7 @@ node(s) with the highest sum are the most preferred.
> A node selector represents the union of the results of one or more label queries
over a set of nodes; that is, it represents the OR of the selectors represented
by the node selector terms.
++structType=atomic
@@ -2339,6 +2563,7 @@ by the node selector terms.
> A node selector operator is the set of operators that can be used in
a node selector requirement.
++enum
@@ -2346,7 +2571,8 @@ a node selector requirement.
| Name | Type | Go type | Default | Description | Example |
|------|------|---------| ------- |-------------|---------|
| NodeSelectorOperator | string| string | | A node selector operator is the set of operators that can be used in
-a node selector requirement. | |
+a node selector requirement.
++enum | |
@@ -2382,6 +2608,7 @@ This array is replaced during a strategic merge patch.
> A null or empty node selector term matches no objects. The requirements of
them are ANDed.
The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.
++structType=atomic
@@ -2411,6 +2638,47 @@ save/load the directory appropriately.
[interface{}](#interface)
+### OAuth2Auth
+
+
+> OAuth2Auth holds all information for client authentication via OAuth2 tokens
+
+
+
+
+
+
+**Properties**
+
+| Name | Type | Go type | Required | Default | Description | Example |
+|------|------|---------|:--------:| ------- |-------------|---------|
+| clientIDSecret | [SecretKeySelector](#secret-key-selector)| `SecretKeySelector` | | | | |
+| clientSecretSecret | [SecretKeySelector](#secret-key-selector)| `SecretKeySelector` | | | | |
+| endpointParams | [][OAuth2EndpointParam](#o-auth2-endpoint-param)| `[]*OAuth2EndpointParam` | | | | |
+| scopes | []string| `[]string` | | | | |
+| tokenURLSecret | [SecretKeySelector](#secret-key-selector)| `SecretKeySelector` | | | | |
+
+
+
+### OAuth2EndpointParam
+
+
+> EndpointParam is for requesting optional fields that should be sent in the oauth request
+
+
+
+
+
+
+**Properties**
+
+| Name | Type | Go type | Required | Default | Description | Example |
+|------|------|---------|:--------:| ------- |-------------|---------|
+| key | string| `string` | | | Name is the header name | |
+| value | string| `string` | | | Value is the literal value to use for the header | |
+
+
+
### OSSArtifact
@@ -2455,21 +2723,14 @@ save/load the directory appropriately.
-### Object
+### ObjectFieldSelector
-> +kubebuilder:validation:Type=object
+> +structType=atomic
-[interface{}](#interface)
-
-### ObjectFieldSelector
-
-
-
-
**Properties**
@@ -2526,6 +2787,7 @@ save/load the directory appropriately.
> OwnerReference contains enough information to let you identify an owning
object. An owning object must be in the same namespace as the dependent, or
be cluster-scoped, so there is no namespace field.
++structType=atomic
@@ -2540,6 +2802,8 @@ be cluster-scoped, so there is no namespace field.
| blockOwnerDeletion | boolean| `bool` | | | If true, AND if the owner has the "foregroundDeletion" finalizer, then
the owner cannot be deleted from the key-value store until this
reference is removed.
+See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion
+for how the garbage collector interacts with this field and enforces the foreground deletion.
Defaults to false.
To set this field, a user needs "delete" permission of the owner,
otherwise 422 (Unprocessable Entity) will be returned.
@@ -2579,6 +2843,7 @@ More info: http://kubernetes.io/docs/user-guide/identifiers#names | |
| Name | Type | Go type | Required | Default | Description | Example |
|------|------|---------|:--------:| ------- |-------------|---------|
| default | [AnyString](#any-string)| `AnyString` | | | | |
+| description | [AnyString](#any-string)| `AnyString` | | | | |
| enum | [][AnyString](#any-string)| `[]AnyString` | | | Enum holds a list of string values to choose from, for the actual value of the parameter | |
| globalName | string| `string` | | | GlobalName exports an output parameter to the global scope, making it available as
'{{workflow.outputs.parameters.XXXX}} and in workflow.status.outputs.parameters | |
@@ -2591,11 +2856,14 @@ More info: http://kubernetes.io/docs/user-guide/identifiers#names | |
### PersistentVolumeAccessMode
+> +enum
+
+
| Name | Type | Go type | Default | Description | Example |
|------|------|---------| ------- |-------------|---------|
-| PersistentVolumeAccessMode | string| string | | | |
+| PersistentVolumeAccessMode | string| string | | +enum | |
@@ -2614,17 +2882,18 @@ and allows a Source for provider-specific attributes
| Name | Type | Go type | Required | Default | Description | Example |
|------|------|---------|:--------:| ------- |-------------|---------|
-| accessModes | [][PersistentVolumeAccessMode](#persistent-volume-access-mode)| `[]PersistentVolumeAccessMode` | | | AccessModes contains the desired access modes the volume should have.
+| accessModes | [][PersistentVolumeAccessMode](#persistent-volume-access-mode)| `[]PersistentVolumeAccessMode` | | | accessModes contains the desired access modes the volume should have.
More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1
+optional | |
| dataSource | [TypedLocalObjectReference](#typed-local-object-reference)| `TypedLocalObjectReference` | | | | |
+| dataSourceRef | [TypedLocalObjectReference](#typed-local-object-reference)| `TypedLocalObjectReference` | | | | |
| resources | [ResourceRequirements](#resource-requirements)| `ResourceRequirements` | | | | |
| selector | [LabelSelector](#label-selector)| `LabelSelector` | | | | |
-| storageClassName | string| `string` | | | Name of the StorageClass required by the claim.
+| storageClassName | string| `string` | | | storageClassName is the name of the StorageClass required by the claim.
More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1
+optional | |
| volumeMode | [PersistentVolumeMode](#persistent-volume-mode)| `PersistentVolumeMode` | | | | |
-| volumeName | string| `string` | | | VolumeName is the binding reference to the PersistentVolume backing this claim.
+| volumeName | string| `string` | | | volumeName is the binding reference to the PersistentVolume backing this claim.
+optional | |
@@ -2649,9 +2918,12 @@ set by external tools to store and retrieve arbitrary metadata. They are not
queryable and should be preserved when modifying objects.
More info: http://kubernetes.io/docs/user-guide/annotations
+optional | |
-| clusterName | string| `string` | | | The name of the cluster which the object belongs to.
-This is used to distinguish resources with same name and namespace in different clusters.
-This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.
+| clusterName | string| `string` | | | Deprecated: ClusterName is a legacy field that was always cleared by
+the system and never used; it will be removed completely in 1.25.
+
+The name in the go struct is changed to help clients detect
+accidental use.
+
+optional | |
| creationTimestamp | [Time](#time)| `Time` | | | | |
| deletionGracePeriodSeconds | int64 (formatted integer)| `int64` | | | Number of seconds allowed for this object to gracefully terminate before
@@ -2683,10 +2955,7 @@ The provided value has the same validation rules as the Name field,
and may be truncated by the length of the suffix required to make the value
unique on the server.
-If this field is specified and the generated name exists, the server will
-NOT return a 409 - instead, it will either return 201 Created or 500 with Reason
-ServerTimeout indicating a unique name could not be found in the time allotted, and the client
-should retry (optionally after the time indicated in the Retry-After header).
+If this field is specified and the generated name exists, the server will return a 409.
Applied only if Name is not specified.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency
@@ -2742,13 +3011,7 @@ Read-only.
Value must be treated as opaque by clients and .
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
+optional | |
-| selfLink | string| `string` | | | SelfLink is a URL representing this object.
-Populated by the system.
-Read-only.
-
-DEPRECATED
-Kubernetes will stop propagating this field in 1.20 release and the field is planned
-to be removed in 1.21 release.
+| selfLink | string| `string` | | | Deprecated: selfLink is a legacy read-only field that is no longer populated by the system.
+optional | |
| spec | [PersistentVolumeClaimSpec](#persistent-volume-claim-spec)| `PersistentVolumeClaimSpec` | | | | |
| uid | [UID](#uid)| `UID` | | | | |
@@ -2771,9 +3034,9 @@ type of volume that is owned by someone else (the system).
| Name | Type | Go type | Required | Default | Description | Example |
|------|------|---------|:--------:| ------- |-------------|---------|
-| claimName | string| `string` | | | ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume.
+| claimName | string| `string` | | | claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume.
More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims | |
-| readOnly | boolean| `bool` | | | Will force the ReadOnly setting in VolumeMounts.
+| readOnly | boolean| `bool` | | | readOnly Will force the ReadOnly setting in VolumeMounts.
Default false.
+optional | |
@@ -2782,11 +3045,14 @@ Default false.
### PersistentVolumeMode
+> +enum
+
+
| Name | Type | Go type | Default | Description | Example |
|------|------|---------| ------- |-------------|---------|
-| PersistentVolumeMode | string| string | | | |
+| PersistentVolumeMode | string| string | | +enum | |
@@ -2801,12 +3067,22 @@ Default false.
| Name | Type | Go type | Required | Default | Description | Example |
|------|------|---------|:--------:| ------- |-------------|---------|
-| fsType | string| `string` | | | Filesystem type to mount.
+| fsType | string| `string` | | | fsType is the filesystem type to mount.
Must be a filesystem type supported by the host operating system.
Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. | |
-| pdID | string| `string` | | | ID that identifies Photon Controller persistent disk | |
+| pdID | string| `string` | | | pdID is the ID that identifies Photon Controller persistent disk | |
+
+
+
+### Plugin
+
+
+> Plugin is an Object with exactly one key
+
+
+[interface{}](#interface)
### PodAffinity
@@ -2864,7 +3140,7 @@ a pod of the set of pods is running
| namespaces | []string| `[]string` | | | namespaces specifies a static list of namespace names that the term applies to.
The term is applied to the union of the namespaces listed in this field
and the ones selected by namespaceSelector.
-null or empty namespaces list and null namespaceSelector means "this pod's namespace"
+null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+optional | |
| topologyKey | string| `string` | | | This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
the labelSelector in the specified namespaces, where co-located is defined as running on a node
@@ -2911,6 +3187,7 @@ podAffinityTerm are intersected, i.e. all terms must be satisfied.
> PodFSGroupChangePolicy holds policies that will be used for applying fsGroup to a volume
when volume is mounted.
++enum
@@ -2918,7 +3195,8 @@ when volume is mounted.
| Name | Type | Go type | Default | Description | Example |
|------|------|---------| ------- |-------------|---------|
| PodFSGroupChangePolicy | string| string | | PodFSGroupChangePolicy holds policies that will be used for applying fsGroup to a volume
-when volume is mounted. | |
+when volume is mounted.
++enum | |
@@ -2946,6 +3224,7 @@ to be owned by the pod:
3. The permission bits are OR'd with rw-rw----
If unset, the Kubelet will not modify the ownership and permissions of any volume.
+Note that this field cannot be set when spec.os.name is windows.
+optional | |
| fsGroupChangePolicy | [PodFSGroupChangePolicy](#pod-f-s-group-change-policy)| `PodFSGroupChangePolicy` | | | | |
| runAsGroup | int64 (formatted integer)| `int64` | | | The GID to run the entrypoint of the container process.
@@ -2953,6 +3232,7 @@ Uses runtime default if unset.
May also be set in SecurityContext. If set in both SecurityContext and
PodSecurityContext, the value specified in SecurityContext takes precedence
for that container.
+Note that this field cannot be set when spec.os.name is windows.
+optional | |
| runAsNonRoot | boolean| `bool` | | | Indicates that the container must run as a non-root user.
If true, the Kubelet will validate the image at runtime to ensure that it
@@ -2966,15 +3246,18 @@ Defaults to user specified in image metadata if unspecified.
May also be set in SecurityContext. If set in both SecurityContext and
PodSecurityContext, the value specified in SecurityContext takes precedence
for that container.
+Note that this field cannot be set when spec.os.name is windows.
+optional | |
| seLinuxOptions | [SELinuxOptions](#s-e-linux-options)| `SELinuxOptions` | | | | |
| seccompProfile | [SeccompProfile](#seccomp-profile)| `SeccompProfile` | | | | |
| supplementalGroups | []int64 (formatted integer)| `[]int64` | | | A list of groups applied to the first process run in each container, in addition
to the container's primary GID. If unspecified, no groups will be added to
any container.
+Note that this field cannot be set when spec.os.name is windows.
+optional | |
| sysctls | [][Sysctl](#sysctl)| `[]*Sysctl` | | | Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported
sysctls (by the container runtime) might fail to launch.
+Note that this field cannot be set when spec.os.name is windows.
+optional | |
| windowsOptions | [WindowsSecurityContextOptions](#windows-security-context-options)| `WindowsSecurityContextOptions` | | | | |
@@ -2991,13 +3274,13 @@ sysctls (by the container runtime) might fail to launch.
| Name | Type | Go type | Required | Default | Description | Example |
|------|------|---------|:--------:| ------- |-------------|---------|
-| fsType | string| `string` | | | FSType represents the filesystem type to mount
+| fsType | string| `string` | | | fSType represents the filesystem type to mount
Must be a filesystem type supported by the host operating system.
Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. | |
-| readOnly | boolean| `bool` | | | Defaults to false (read/write). ReadOnly here will force
+| readOnly | boolean| `bool` | | | readOnly defaults to false (read/write). ReadOnly here will force
the ReadOnly setting in VolumeMounts.
+optional | |
-| volumeID | string| `string` | | | VolumeID uniquely identifies a Portworx volume | |
+| volumeID | string| `string` | | | volumeID uniquely identifies a Portworx volume | |
@@ -3040,6 +3323,7 @@ alive or ready to receive traffic.
| failureThreshold | int32 (formatted integer)| `int32` | | | Minimum consecutive failures for the probe to be considered failed after having succeeded.
Defaults to 3. Minimum value is 1.
+optional | |
+| grpc | [GRPCAction](#g-rpc-action)| `GRPCAction` | | | | |
| httpGet | [HTTPGetAction](#http-get-action)| `HTTPGetAction` | | | | |
| initialDelaySeconds | int32 (formatted integer)| `int32` | | | Number of seconds after the container has started before liveness probes are initiated.
More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
@@ -3059,7 +3343,8 @@ If this value is nil, the pod's terminationGracePeriodSeconds will be used. Othe
value overrides the value provided by the pod spec.
Value must be non-negative integer. The value zero indicates stop immediately via
the kill signal (no opportunity to shut down).
-This is an alpha field and requires enabling ProbeTerminationGracePeriod feature gate.
+This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
+Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+optional | |
| timeoutSeconds | int32 (formatted integer)| `int32` | | | Number of seconds after which the probe times out.
Defaults to 1 second. Minimum value is 1.
@@ -3071,11 +3356,25 @@ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#cont
### ProcMountType
+> +enum
+
+
+
+
+| Name | Type | Go type | Default | Description | Example |
+|------|------|---------| ------- |-------------|---------|
+| ProcMountType | string| string | | +enum | |
+
+
+
+### Progress
+
+
| Name | Type | Go type | Default | Description | Example |
|------|------|---------| ------- |-------------|---------|
-| ProcMountType | string| string | | | |
+| Progress | string| string | | | |
@@ -3093,14 +3392,14 @@ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#cont
| Name | Type | Go type | Required | Default | Description | Example |
|------|------|---------|:--------:| ------- |-------------|---------|
-| defaultMode | int32 (formatted integer)| `int32` | | | Mode bits used to set permissions on created files by default.
+| defaultMode | int32 (formatted integer)| `int32` | | | defaultMode are the mode bits used to set permissions on created files by default.
Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
Directories within the path are not affected by this setting.
This might be in conflict with other options that affect the file
mode, like fsGroup, and the result can be other mode bits set.
+optional | |
-| sources | [][VolumeProjection](#volume-projection)| `[]*VolumeProjection` | | | list of volume projections
+| sources | [][VolumeProjection](#volume-projection)| `[]*VolumeProjection` | | | sources is the list of volume projections
+optional | |
@@ -3132,11 +3431,14 @@ mode, like fsGroup, and the result can be other mode bits set.
### Protocol
+> +enum
+
+
| Name | Type | Go type | Default | Description | Example |
|------|------|---------| ------- |-------------|---------|
-| Protocol | string| string | | | |
+| Protocol | string| string | | +enum | |
@@ -3144,13 +3446,15 @@ mode, like fsGroup, and the result can be other mode bits set.
> PullPolicy describes a policy for if/when to pull a container image
++enum
| Name | Type | Go type | Default | Description | Example |
|------|------|---------| ------- |-------------|---------|
-| PullPolicy | string| string | | PullPolicy describes a policy for if/when to pull a container image | |
+| PullPolicy | string| string | | PullPolicy describes a policy for if/when to pull a container image
++enum | |
@@ -3231,22 +3535,22 @@ cause implementors to also use a fixed point implementation.
| Name | Type | Go type | Required | Default | Description | Example |
|------|------|---------|:--------:| ------- |-------------|---------|
-| group | string| `string` | | | Group to map volume access to
+| group | string| `string` | | | group to map volume access to
Default is no group
+optional | |
-| readOnly | boolean| `bool` | | | ReadOnly here will force the Quobyte volume to be mounted with read-only permissions.
+| readOnly | boolean| `bool` | | | readOnly here will force the Quobyte volume to be mounted with read-only permissions.
Defaults to false.
+optional | |
-| registry | string| `string` | | | Registry represents a single or multiple Quobyte Registry services
+| registry | string| `string` | | | registry represents a single or multiple Quobyte Registry services
specified as a string as host:port pair (multiple entries are separated with commas)
which acts as the central registry for volumes | |
-| tenant | string| `string` | | | Tenant owning the given Quobyte volume in the Backend
+| tenant | string| `string` | | | tenant owning the given Quobyte volume in the Backend
Used with dynamically provisioned Quobyte volumes, value is set by the plugin
+optional | |
-| user | string| `string` | | | User to map volume access to
+| user | string| `string` | | | user to map volume access to
Defaults to serivceaccount user
+optional | |
-| volume | string| `string` | | | Volume is a string that references an already created Quobyte volume by name. | |
+| volume | string| `string` | | | volume is a string that references an already created Quobyte volume by name. | |
@@ -3264,30 +3568,30 @@ Defaults to serivceaccount user
| Name | Type | Go type | Required | Default | Description | Example |
|------|------|---------|:--------:| ------- |-------------|---------|
-| fsType | string| `string` | | | Filesystem type of the volume that you want to mount.
+| fsType | string| `string` | | | fsType is the filesystem type of the volume that you want to mount.
Tip: Ensure that the filesystem type is supported by the host operating system.
Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd
TODO: how do we prevent errors in the filesystem from compromising the machine
+optional | |
-| image | string| `string` | | | The rados image name.
+| image | string| `string` | | | image is the rados image name.
More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it | |
-| keyring | string| `string` | | | Keyring is the path to key ring for RBDUser.
+| keyring | string| `string` | | | keyring is the path to key ring for RBDUser.
Default is /etc/ceph/keyring.
More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+optional | |
-| monitors | []string| `[]string` | | | A collection of Ceph monitors.
+| monitors | []string| `[]string` | | | monitors is a collection of Ceph monitors.
More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it | |
-| pool | string| `string` | | | The rados pool name.
+| pool | string| `string` | | | pool is the rados pool name.
Default is rbd.
More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+optional | |
-| readOnly | boolean| `bool` | | | ReadOnly here will force the ReadOnly setting in VolumeMounts.
+| readOnly | boolean| `bool` | | | readOnly here will force the ReadOnly setting in VolumeMounts.
Defaults to false.
More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+optional | |
| secretRef | [LocalObjectReference](#local-object-reference)| `LocalObjectReference` | | | | |
-| user | string| `string` | | | The rados user name.
+| user | string| `string` | | | user is the rados user name.
Default is admin.
More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+optional | |
@@ -3316,6 +3620,7 @@ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
> ResourceFieldSelector represents container resources (cpu, memory) and their output format
++structType=atomic
@@ -3380,6 +3685,7 @@ flags: [
"--validate=false" # disable resource validation
] | |
| manifest | string| `string` | | | Manifest contains the kubernetes manifest | |
+| manifestFrom | [ManifestFrom](#manifest-from)| `ManifestFrom` | | | | |
| mergeStrategy | string| `string` | | | MergeStrategy is the strategy used to merge a patch. It defaults to "strategic"
Must be one of: strategic, merge, json | |
| setOwnerReference | boolean| `bool` | | | SetOwnerReference sets the reference to the workflow on the OwnerReference of generated resource. | |
@@ -3535,27 +3841,27 @@ be retried and the retry strategy will be ignored | |
| Name | Type | Go type | Required | Default | Description | Example |
|------|------|---------|:--------:| ------- |-------------|---------|
-| fsType | string| `string` | | | Filesystem type to mount.
+| fsType | string| `string` | | | fsType is the filesystem type to mount.
Must be a filesystem type supported by the host operating system.
Ex. "ext4", "xfs", "ntfs".
Default is "xfs".
+optional | |
-| gateway | string| `string` | | | The host address of the ScaleIO API Gateway. | |
-| protectionDomain | string| `string` | | | The name of the ScaleIO Protection Domain for the configured storage.
+| gateway | string| `string` | | | gateway is the host address of the ScaleIO API Gateway. | |
+| protectionDomain | string| `string` | | | protectionDomain is the name of the ScaleIO Protection Domain for the configured storage.
+optional | |
-| readOnly | boolean| `bool` | | | Defaults to false (read/write). ReadOnly here will force
+| readOnly | boolean| `bool` | | | readOnly Defaults to false (read/write). ReadOnly here will force
the ReadOnly setting in VolumeMounts.
+optional | |
| secretRef | [LocalObjectReference](#local-object-reference)| `LocalObjectReference` | | | | |
-| sslEnabled | boolean| `bool` | | | Flag to enable/disable SSL communication with Gateway, default false
+| sslEnabled | boolean| `bool` | | | sslEnabled Flag enable/disable SSL communication with Gateway, default false
+optional | |
-| storageMode | string| `string` | | | Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.
+| storageMode | string| `string` | | | storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.
Default is ThinProvisioned.
+optional | |
-| storagePool | string| `string` | | | The ScaleIO Storage Pool associated with the protection domain.
+| storagePool | string| `string` | | | storagePool is the ScaleIO Storage Pool associated with the protection domain.
+optional | |
-| system | string| `string` | | | The name of the storage system as configured in ScaleIO. | |
-| volumeName | string| `string` | | | The name of a volume already created in the ScaleIO system
+| system | string| `string` | | | system is the name of the storage system as configured in ScaleIO. | |
+| volumeName | string| `string` | | | volumeName is the name of a volume already created in the ScaleIO system
that is associated with this volume source. | |
@@ -3575,21 +3881,21 @@ that is associated with this volume source. | |
| Name | Type | Go type | Required | Default | Description | Example |
|------|------|---------|:--------:| ------- |-------------|---------|
| args | []string| `[]string` | | | Arguments to the entrypoint.
-The docker image's CMD is used if this is not provided.
+The container image's CMD is used if this is not provided.
Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
-cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax
-can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded,
-regardless of whether the variable exists or not.
-Cannot be updated.
+cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
+to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
+produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
+of whether the variable exists or not. Cannot be updated.
More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
+optional | |
| command | []string| `[]string` | | | Entrypoint array. Not executed within a shell.
-The docker image's ENTRYPOINT is used if this is not provided.
+The container image's ENTRYPOINT is used if this is not provided.
Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
-cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax
-can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded,
-regardless of whether the variable exists or not.
-Cannot be updated.
+cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
+to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
+produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
+of whether the variable exists or not. Cannot be updated.
More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
+optional | |
| env | [][EnvVar](#env-var)| `[]*EnvVar` | | | List of environment variables to set in the container.
@@ -3604,7 +3910,7 @@ sources, the value associated with the last source will take precedence.
Values defined by an Env with a duplicate key will take precedence.
Cannot be updated.
+optional | |
-| image | string| `string` | | | Docker image name.
+| image | string| `string` | | | Container image name.
More info: https://kubernetes.io/docs/concepts/containers/images
This field is optional to allow higher level config management to default or override
container images in workload controllers like Deployments and StatefulSets.
@@ -3701,11 +4007,14 @@ Must only be set if type is "Localhost".
### SeccompProfileType
+> +enum
+
+
| Name | Type | Go type | Default | Description | Example |
|------|------|---------| ------- |-------------|---------|
-| SeccompProfileType | string| string | | | |
+| SeccompProfileType | string| string | | +enum | |
@@ -3736,10 +4045,13 @@ TODO: Add other useful fields. apiVersion, kind, uid?
### SecretKeySelector
+> +structType=atomic
+
+
**Properties**
| Name | Type | Go type | Required | Default | Description | Example |
@@ -3771,7 +4083,7 @@ mode.
| Name | Type | Go type | Required | Default | Description | Example |
|------|------|---------|:--------:| ------- |-------------|---------|
-| items | [][KeyToPath](#key-to-path)| `[]*KeyToPath` | | | If unspecified, each key-value pair in the Data field of the referenced
+| items | [][KeyToPath](#key-to-path)| `[]*KeyToPath` | | | items if unspecified, each key-value pair in the Data field of the referenced
Secret will be projected into the volume as a file whose name is the
key and content is the value. If specified, the listed keys will be
projected into the specified paths, and unlisted keys will not be
@@ -3783,7 +4095,7 @@ relative and may not contain the '..' path or start with '..'.
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?
+optional | |
-| optional | boolean| `bool` | | | Specify whether the Secret or its key must be defined
+| optional | boolean| `bool` | | | optional field specify whether the Secret or its key must be defined
+optional | |
@@ -3804,7 +4116,7 @@ Secret volumes support ownership management and SELinux relabeling.
| Name | Type | Go type | Required | Default | Description | Example |
|------|------|---------|:--------:| ------- |-------------|---------|
-| defaultMode | int32 (formatted integer)| `int32` | | | Optional: mode bits used to set permissions on created files by default.
+| defaultMode | int32 (formatted integer)| `int32` | | | defaultMode is Optional: mode bits used to set permissions on created files by default.
Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
YAML accepts both octal and decimal values, JSON requires decimal values
for mode bits. Defaults to 0644.
@@ -3812,7 +4124,7 @@ Directories within the path are not affected by this setting.
This might be in conflict with other options that affect the file
mode, like fsGroup, and the result can be other mode bits set.
+optional | |
-| items | [][KeyToPath](#key-to-path)| `[]*KeyToPath` | | | If unspecified, each key-value pair in the Data field of the referenced
+| items | [][KeyToPath](#key-to-path)| `[]*KeyToPath` | | | items If unspecified, each key-value pair in the Data field of the referenced
Secret will be projected into the volume as a file whose name is the
key and content is the value. If specified, the listed keys will be
projected into the specified paths, and unlisted keys will not be
@@ -3820,9 +4132,9 @@ present. If a key is specified which is not present in the Secret,
the volume setup will error unless it is marked optional. Paths must be
relative and may not contain the '..' path or start with '..'.
+optional | |
-| optional | boolean| `bool` | | | Specify whether the Secret or its keys must be defined
+| optional | boolean| `bool` | | | optional field specify whether the Secret or its keys must be defined
+optional | |
-| secretName | string| `string` | | | Name of the secret in the pod's namespace to use.
+| secretName | string| `string` | | | secretName is the name of the secret in the pod's namespace to use.
More info: https://kubernetes.io/docs/concepts/storage/volumes#secret
+optional | |
@@ -3849,20 +4161,24 @@ the no_new_privs flag will be set on the container process.
AllowPrivilegeEscalation is true always when the container is:
1) run as Privileged
2) has CAP_SYS_ADMIN
+Note that this field cannot be set when spec.os.name is windows.
+optional | |
| capabilities | [Capabilities](#capabilities)| `Capabilities` | | | | |
| privileged | boolean| `bool` | | | Run container in privileged mode.
Processes in privileged containers are essentially equivalent to root on the host.
Defaults to false.
+Note that this field cannot be set when spec.os.name is windows.
+optional | |
| procMount | [ProcMountType](#proc-mount-type)| `ProcMountType` | | | | |
| readOnlyRootFilesystem | boolean| `bool` | | | Whether this container has a read-only root filesystem.
Default is false.
+Note that this field cannot be set when spec.os.name is windows.
+optional | |
| runAsGroup | int64 (formatted integer)| `int64` | | | The GID to run the entrypoint of the container process.
Uses runtime default if unset.
May also be set in PodSecurityContext. If set in both SecurityContext and
PodSecurityContext, the value specified in SecurityContext takes precedence.
+Note that this field cannot be set when spec.os.name is windows.
+optional | |
| runAsNonRoot | boolean| `bool` | | | Indicates that the container must run as a non-root user.
If true, the Kubelet will validate the image at runtime to ensure that it
@@ -3875,6 +4191,7 @@ PodSecurityContext, the value specified in SecurityContext takes precedence.
Defaults to user specified in image metadata if unspecified.
May also be set in PodSecurityContext. If set in both SecurityContext and
PodSecurityContext, the value specified in SecurityContext takes precedence.
+Note that this field cannot be set when spec.os.name is windows.
+optional | |
| seLinuxOptions | [SELinuxOptions](#s-e-linux-options)| `SELinuxOptions` | | | | |
| seccompProfile | [SeccompProfile](#seccomp-profile)| `SeccompProfile` | | | | |
@@ -3938,19 +4255,19 @@ otherwise).
| Name | Type | Go type | Required | Default | Description | Example |
|------|------|---------|:--------:| ------- |-------------|---------|
-| audience | string| `string` | | | Audience is the intended audience of the token. A recipient of a token
+| audience | string| `string` | | | audience is the intended audience of the token. A recipient of a token
must identify itself with an identifier specified in the audience of the
token, and otherwise should reject the token. The audience defaults to the
identifier of the apiserver.
+optional | |
-| expirationSeconds | int64 (formatted integer)| `int64` | | | ExpirationSeconds is the requested duration of validity of the service
+| expirationSeconds | int64 (formatted integer)| `int64` | | | expirationSeconds is the requested duration of validity of the service
account token. As the token approaches expiration, the kubelet volume
plugin will proactively rotate the service account token. The kubelet will
start trying to rotate the token if the token is older than 80 percent of
its time to live or if the token is older than 24 hours.Defaults to 1 hour
and must be at least 10 minutes.
+optional | |
-| path | string| `string` | | | Path is the path relative to the mount point of the file to project the
+| path | string| `string` | | | path is the path relative to the mount point of the file to project the
token into. | |
@@ -3977,17 +4294,17 @@ token into. | |
| Name | Type | Go type | Required | Default | Description | Example |
|------|------|---------|:--------:| ------- |-------------|---------|
-| fsType | string| `string` | | | Filesystem type to mount.
+| fsType | string| `string` | | | fsType is the filesystem type to mount.
Must be a filesystem type supported by the host operating system.
Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+optional | |
-| readOnly | boolean| `bool` | | | Defaults to false (read/write). ReadOnly here will force
+| readOnly | boolean| `bool` | | | readOnly defaults to false (read/write). ReadOnly here will force
the ReadOnly setting in VolumeMounts.
+optional | |
| secretRef | [LocalObjectReference](#local-object-reference)| `LocalObjectReference` | | | | |
-| volumeName | string| `string` | | | VolumeName is the human-readable name of the StorageOS volume. Volume
+| volumeName | string| `string` | | | volumeName is the human-readable name of the StorageOS volume. Volume
names are only unique within a namespace. | |
-| volumeNamespace | string| `string` | | | VolumeNamespace specifies the scope of the volume within StorageOS. If no
+| volumeNamespace | string| `string` | | | volumeNamespace specifies the scope of the volume within StorageOS. If no
namespace is specified then the Pod's namespace will be used. This allows the
Kubernetes name scoping to be mirrored within StorageOS for tighter integration.
Set VolumeName to any name to override the default behaviour.
@@ -4083,11 +4400,14 @@ Namespaces that do not pre-exist within StorageOS will be created.
### TaintEffect
+> +enum
+
+
| Name | Type | Go type | Default | Description | Example |
|------|------|---------| ------- |-------------|---------|
-| TaintEffect | string| string | | | |
+| TaintEffect | string| string | | +enum | |
@@ -4155,7 +4475,7 @@ run on the selected node(s). Overrides the selector set at the workflow level. |
| parallelism | int64 (formatted integer)| `int64` | | | Parallelism limits the max total parallel pods that can execute at the same time within the
boundaries of this template invocation. If additional steps/dag templates are invoked, the
pods created by those templates will not be counted towards this total. | |
-| plugin | [Object](#object)| `Object` | | | | |
+| plugin | [Plugin](#plugin)| `Plugin` | | | | |
| podSpecPatch | string| `string` | | | PodSpecPatch holds strategic merge patch to apply against the pod spec. Allows parameterization of
container fields which are not strings (e.g. resource limits). | |
| priority | int32 (formatted integer)| `int32` | | | Priority to apply to workflow pods. | |
@@ -4176,7 +4496,7 @@ Sidecars are automatically killed when the main container completes
| steps | [][ParallelSteps](#parallel-steps)| `[]ParallelSteps` | | | Steps define a series of sequential/parallel workflow steps | |
| suspend | [SuspendTemplate](#suspend-template)| `SuspendTemplate` | | | | |
| synchronization | [Synchronization](#synchronization)| `Synchronization` | | | | |
-| timeout | string| `string` | | | Timout allows to set the total node execution timeout duration counting from the node's start time.
+| timeout | string| `string` | | | Timeout allows to set the total node execution timeout duration counting from the node's start time.
This duration also includes time in which the node spends in Pending state. This duration may not be applied to Step or DAG templates. | |
| tolerations | [][Toleration](#toleration)| `[]*Toleration` | | | Tolerations to apply to workflow pods.
+patchStrategy=merge
@@ -4207,11 +4527,14 @@ This duration also includes time in which the node spends in Pending state. This
### TerminationMessagePolicy
+> +enum
+
+
| Name | Type | Go type | Default | Description | Example |
|------|------|---------| ------- |-------------|---------|
-| TerminationMessagePolicy | string| string | | | |
+| TerminationMessagePolicy | string| string | | +enum | |
@@ -4261,11 +4584,14 @@ If the operator is Exists, the value should be empty, otherwise just a regular s
### TolerationOperator
+> +enum
+
+
| Name | Type | Go type | Default | Description | Example |
|------|------|---------| ------- |-------------|---------|
-| TolerationOperator | string| string | | | |
+| TolerationOperator | string| string | | +enum | |
@@ -4307,6 +4633,7 @@ If the operator is Exists, the value should be empty, otherwise just a regular s
> TypedLocalObjectReference contains enough information to let you locate the
typed referenced object inside the same namespace.
++structType=atomic
@@ -4348,13 +4675,15 @@ intent and helps make sure that UIDs and names do not get conflated. | |
> URIScheme identifies the scheme used for connection to a host for Get actions
++enum
| Name | Type | Go type | Default | Description | Example |
|------|------|---------| ------- |-------------|---------|
-| URIScheme | string| string | | URIScheme identifies the scheme used for connection to a host for Get actions | |
+| URIScheme | string| string | | URIScheme identifies the scheme used for connection to a host for Get actions
++enum | |
@@ -4370,21 +4699,21 @@ intent and helps make sure that UIDs and names do not get conflated. | |
| Name | Type | Go type | Required | Default | Description | Example |
|------|------|---------|:--------:| ------- |-------------|---------|
| args | []string| `[]string` | | | Arguments to the entrypoint.
-The docker image's CMD is used if this is not provided.
+The container image's CMD is used if this is not provided.
Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
-cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax
-can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded,
-regardless of whether the variable exists or not.
-Cannot be updated.
+cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
+to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
+produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
+of whether the variable exists or not. Cannot be updated.
More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
+optional | |
| command | []string| `[]string` | | | Entrypoint array. Not executed within a shell.
-The docker image's ENTRYPOINT is used if this is not provided.
+The container image's ENTRYPOINT is used if this is not provided.
Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
-cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax
-can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded,
-regardless of whether the variable exists or not.
-Cannot be updated.
+cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
+to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
+produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
+of whether the variable exists or not. Cannot be updated.
More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
+optional | |
| env | [][EnvVar](#env-var)| `[]*EnvVar` | | | List of environment variables to set in the container.
@@ -4399,7 +4728,7 @@ sources, the value associated with the last source will take precedence.
Values defined by an Env with a duplicate key will take precedence.
Cannot be updated.
+optional | |
-| image | string| `string` | | | Docker image name.
+| image | string| `string` | | | Container image name.
More info: https://kubernetes.io/docs/concepts/containers/images
This field is optional to allow higher level config management to default or override
container images in workload controllers like Deployments and StatefulSets.
@@ -4528,7 +4857,7 @@ Cannot be updated.
| glusterfs | [GlusterfsVolumeSource](#glusterfs-volume-source)| `GlusterfsVolumeSource` | | | | |
| hostPath | [HostPathVolumeSource](#host-path-volume-source)| `HostPathVolumeSource` | | | | |
| iscsi | [ISCSIVolumeSource](#i-s-c-s-i-volume-source)| `ISCSIVolumeSource` | | | | |
-| name | string| `string` | | | Volume's name.
+| name | string| `string` | | | name of the volume.
Must be a DNS_LABEL and unique within the pod.
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names | |
| nfs | [NFSVolumeSource](#n-f-s-volume-source)| `NFSVolumeSource` | | | | |
@@ -4622,15 +4951,15 @@ SubPathExpr and SubPath are mutually exclusive.
| Name | Type | Go type | Required | Default | Description | Example |
|------|------|---------|:--------:| ------- |-------------|---------|
-| fsType | string| `string` | | | Filesystem type to mount.
+| fsType | string| `string` | | | fsType is filesystem type to mount.
Must be a filesystem type supported by the host operating system.
Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+optional | |
-| storagePolicyID | string| `string` | | | Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName.
+| storagePolicyID | string| `string` | | | storagePolicyID is the storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName.
+optional | |
-| storagePolicyName | string| `string` | | | Storage Policy Based Management (SPBM) profile name.
+| storagePolicyName | string| `string` | | | storagePolicyName is the storage Policy Based Management (SPBM) profile name.
+optional | |
-| volumePath | string| `string` | | | Path that identifies vSphere volume vmdk | |
+| volumePath | string| `string` | | | volumePath is the path that identifies vSphere volume vmdk | |
@@ -4671,6 +5000,14 @@ GMSA credential spec named by the GMSACredentialSpecName field.
+optional | |
| gmsaCredentialSpecName | string| `string` | | | GMSACredentialSpecName is the name of the GMSA credential spec to use.
+optional | |
+| hostProcess | boolean| `bool` | | | HostProcess determines if a container should be run as a 'Host Process' container.
+This field is alpha-level and will only be honored by components that enable the
+WindowsHostProcessContainers feature flag. Setting this field without the feature
+flag will result in errors when validating the Pod. All of a Pod's containers must
+have the same effective HostProcess value (it is not allowed to have a mix of HostProcess
+containers and non-HostProcess containers). In addition, if HostProcess is true
+then HostNetwork must also be set to true.
++optional | |
| runAsUserName | string| `string` | | | The UserName in Windows to run the entrypoint of the container process.
Defaults to the user specified in image metadata if unspecified.
May also be set in PodSecurityContext. If set in both SecurityContext and
diff --git a/docs/faq.md b/docs/faq.md
index 2b09d5f42e1b..c4f09490bee6 100644
--- a/docs/faq.md
+++ b/docs/faq.md
@@ -1,16 +1,16 @@
# FAQ
-> "token not valid for running mode", "any bearer token is able to login in the UI or use the API"
+## "token not valid for running mode", "any bearer token is able to login in the UI or use the API"
You've not configured Argo Server authentication correctly. If you want SSO, try running with `--auth-mode=sso`.
[Learn more about the Argo Server set-up](argo-server.md)
-> Argo Server return EOF error
+## Argo Server return EOF error
Since v3.0 the Argo Server listens for HTTPS requests, rather than HTTP. Try changing your URL to HTTPS, or start Argo Server using `--secure=false`.
-> My workflow hangs
+## My workflow hangs
Check your `wait` container logs:
@@ -18,13 +18,13 @@ Is there an RBAC error?
[Learn more about workflow RBAC](workflow-rbac.md)
-> Return "unknown (get pods)" error
+## Return "unknown (get pods)" error
You're probably getting a permission denied error because your RBAC is not configured.
[Learn more about workflow RBAC](workflow-rbac.md) and [even more details](https://blog.argoproj.io/demystifying-argo-workflowss-kubernetes-rbac-7a1406d446fc)
-> There is an error about /var/run/docker.sock.
+## There is an error about `/var/run/docker.sock`
Try using a different container runtime executor.
diff --git a/docs/fields.md b/docs/fields.md
index 80a111d33ceb..3abd9071afba 100644
--- a/docs/fields.md
+++ b/docs/fields.md
@@ -18,6 +18,8 @@ Workflow is the definition of a workflow resource
- [`artifact-disable-archive.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/artifact-disable-archive.yaml)
+- [`artifact-gc-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/artifact-gc-workflow.yaml)
+
- [`artifact-passing-subpath.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/artifact-passing-subpath.yaml)
- [`artifact-passing.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/artifact-passing.yaml)
@@ -168,6 +170,8 @@ Workflow is the definition of a workflow resource
- [`init-container.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/init-container.yaml)
+- [`input-artifact-azure.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/input-artifact-azure.yaml)
+
- [`input-artifact-gcs.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/input-artifact-gcs.yaml)
- [`input-artifact-git.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/input-artifact-git.yaml)
@@ -224,6 +228,8 @@ Workflow is the definition of a workflow resource
- [`node-selector.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/node-selector.yaml)
+- [`output-artifact-azure.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/output-artifact-azure.yaml)
+
- [`output-artifact-gcs.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/output-artifact-gcs.yaml)
- [`output-artifact-s3.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/output-artifact-s3.yaml)
@@ -330,6 +336,8 @@ Workflow is the definition of a workflow resource
- [`volumes-pvc.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/volumes-pvc.yaml)
+- [`webhdfs-input-output-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/webhdfs-input-output-artifacts.yaml)
+
- [`work-avoidance.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/work-avoidance.yaml)
- [`workflow-of-workflows.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/workflow-of-workflows.yaml)
@@ -390,8 +398,12 @@ WorkflowTemplate is the definition of a workflow template resource
Examples (click to open)
+- [`artifacts-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/artifacts-workflowtemplate.yaml)
+
- [`buildkit-template.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/buildkit-template.yaml)
+- [`ci-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/ci-workflowtemplate.yaml)
+
- [`cron-backfill.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/cron-backfill.yaml)
- [`dag-inline-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/dag-inline-workflowtemplate.yaml)
@@ -427,6 +439,8 @@ WorkflowSpec is the specification of a Workflow.
- [`artifact-disable-archive.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/artifact-disable-archive.yaml)
+- [`artifact-gc-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/artifact-gc-workflow.yaml)
+
- [`artifact-passing-subpath.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/artifact-passing-subpath.yaml)
- [`artifact-passing.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/artifact-passing.yaml)
@@ -437,10 +451,14 @@ WorkflowSpec is the specification of a Workflow.
- [`artifactory-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/artifactory-artifact.yaml)
+- [`artifacts-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/artifacts-workflowtemplate.yaml)
+
- [`buildkit-template.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/buildkit-template.yaml)
- [`ci-output-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/ci-output-artifact.yaml)
+- [`ci-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/ci-workflowtemplate.yaml)
+
- [`ci.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/ci.yaml)
- [`cluster-wftmpl-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/cluster-workflow-template/cluster-wftmpl-dag.yaml)
@@ -587,6 +605,8 @@ WorkflowSpec is the specification of a Workflow.
- [`init-container.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/init-container.yaml)
+- [`input-artifact-azure.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/input-artifact-azure.yaml)
+
- [`input-artifact-gcs.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/input-artifact-gcs.yaml)
- [`input-artifact-git.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/input-artifact-git.yaml)
@@ -633,6 +653,8 @@ WorkflowSpec is the specification of a Workflow.
- [`node-selector.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/node-selector.yaml)
+- [`output-artifact-azure.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/output-artifact-azure.yaml)
+
- [`output-artifact-gcs.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/output-artifact-gcs.yaml)
- [`output-artifact-s3.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/output-artifact-s3.yaml)
@@ -735,6 +757,8 @@ WorkflowSpec is the specification of a Workflow.
- [`volumes-pvc.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/volumes-pvc.yaml)
+- [`webhdfs-input-output-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/webhdfs-input-output-artifacts.yaml)
+
- [`work-avoidance.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/work-avoidance.yaml)
- [`event-consumer-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/workflow-event-binding/event-consumer-workflowtemplate.yaml)
@@ -765,6 +789,7 @@ WorkflowSpec is the specification of a Workflow.
|`affinity`|[`Affinity`](#affinity)|Affinity sets the scheduling constraints for all pods in the io.argoproj.workflow.v1alpha1. Can be overridden by an affinity specified in the template|
|`archiveLogs`|`boolean`|ArchiveLogs indicates if the container logs should be archived|
|`arguments`|[`Arguments`](#arguments)|Arguments contain the parameters and artifacts sent to the workflow entrypoint Parameters are referencable globally using the 'workflow' variable prefix. e.g. {{io.argoproj.workflow.v1alpha1.parameters.myparam}}|
+|`artifactGC`|[`ArtifactGC`](#artifactgc)|ArtifactGC describes the strategy to use when deleting artifacts from completed or deleted workflows (applies to all output Artifacts unless Artifact.ArtifactGC is specified, which overrides this)|
|`artifactRepositoryRef`|[`ArtifactRepositoryRef`](#artifactrepositoryref)|ArtifactRepositoryRef specifies the configMap name and key containing the artifact repository config.|
|`automountServiceAccountToken`|`boolean`|AutomountServiceAccountToken indicates whether a service account token should be automatically mounted in pods. ServiceAccountName of ExecutorConfig must be specified if this value is false.|
|`dnsConfig`|[`PodDNSConfig`](#poddnsconfig)|PodDNSConfig defines the DNS parameters of a pod in addition to those generated from DNSPolicy.|
@@ -780,9 +805,9 @@ WorkflowSpec is the specification of a Workflow.
|`onExit`|`string`|OnExit is a template reference which is invoked at the end of the workflow, irrespective of the success, failure, or error of the primary io.argoproj.workflow.v1alpha1.|
|`parallelism`|`integer`|Parallelism limits the max total parallel pods that can execute at the same time in a workflow|
|`podDisruptionBudget`|[`PodDisruptionBudgetSpec`](#poddisruptionbudgetspec)|PodDisruptionBudget holds the number of concurrent disruptions that you allow for Workflow's Pods. Controller will automatically add the selector with workflow name, if selector is empty. Optional: Defaults to empty.|
-|`podGC`|[`PodGC`](#podgc)|PodGC describes the strategy to use when to deleting completed pods|
+|`podGC`|[`PodGC`](#podgc)|PodGC describes the strategy to use when deleting completed pods|
|`podMetadata`|[`Metadata`](#metadata)|PodMetadata defines additional metadata that should be applied to workflow pods|
-|`podPriority`|`integer`|Priority to apply to workflow pods.|
+|~`podPriority`~|~`integer`~|~Priority to apply to workflow pods.~ DEPRECATED: Use PodPriorityClassName instead.|
|`podPriorityClassName`|`string`|PriorityClassName to apply to workflow pods.|
|`podSpecPatch`|`string`|PodSpecPatch holds strategic merge patch to apply against the pod spec. Allows parameterization of container fields which are not strings (e.g. resource limits).|
|`priority`|`integer`|Priority is used if controller is configured to process limited number of workflows in parallel. Workflows with higher priority are processed first.|
@@ -797,10 +822,10 @@ WorkflowSpec is the specification of a Workflow.
|`templates`|`Array<`[`Template`](#template)`>`|Templates is a list of workflow templates used in a workflow|
|`tolerations`|`Array<`[`Toleration`](#toleration)`>`|Tolerations to apply to workflow pods.|
|`ttlStrategy`|[`TTLStrategy`](#ttlstrategy)|TTLStrategy limits the lifetime of a Workflow that has finished execution depending on if it Succeeded or Failed. If this struct is set, once the Workflow finishes, it will be deleted after the time to live expires. If this field is unset, the controller config map will hold the default values.|
-|`volumeClaimGC`|[`VolumeClaimGC`](#volumeclaimgc)|VolumeClaimGC describes the strategy to use when to deleting volumes from completed workflows|
+|`volumeClaimGC`|[`VolumeClaimGC`](#volumeclaimgc)|VolumeClaimGC describes the strategy to use when deleting volumes from completed workflows|
|`volumeClaimTemplates`|`Array<`[`PersistentVolumeClaim`](#persistentvolumeclaim)`>`|VolumeClaimTemplates is a list of claims that containers are allowed to reference. The Workflow controller will create the claims at the beginning of the workflow and delete the claims upon completion of the workflow|
|`volumes`|`Array<`[`Volume`](#volume)`>`|Volumes is a list of volumes that can be mounted by containers in a io.argoproj.workflow.v1alpha1.|
-|`workflowMetadata`|[`WorkflowMetadata`](#workflowmetadata)|WorkflowMetadata contains some metadata of the workflow to be refer|
+|`workflowMetadata`|[`WorkflowMetadata`](#workflowmetadata)|WorkflowMetadata contains some metadata of the workflow to refer to|
|`workflowTemplateRef`|[`WorkflowTemplateRef`](#workflowtemplateref)|WorkflowTemplateRef holds a reference to a WorkflowTemplate for execution|
## WorkflowStatus
@@ -810,6 +835,7 @@ WorkflowStatus contains overall status information about a workflow
### Fields
| Field Name | Field Type | Description |
|:----------:|:----------:|---------------|
+|`artifactGCStatus`|[`ArtGCStatus`](#artgcstatus)|ArtifactGCStatus maintains the status of Artifact Garbage Collection|
|`artifactRepositoryRef`|[`ArtifactRepositoryRefStatus`](#artifactrepositoryrefstatus)|ArtifactRepositoryRef is used to cache the repository to use so we do not need to determine it everytime we reconcile.|
|`compressedNodes`|`string`|Compressed and base64 decoded Nodes map|
|`conditions`|`Array<`[`Condition`](#condition)`>`|Conditions is a list of conditions the Workflow may have|
@@ -846,6 +872,8 @@ CronWorkflowSpec is the specification of a CronWorkflow
- [`artifact-disable-archive.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/artifact-disable-archive.yaml)
+- [`artifact-gc-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/artifact-gc-workflow.yaml)
+
- [`artifact-passing-subpath.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/artifact-passing-subpath.yaml)
- [`artifact-passing.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/artifact-passing.yaml)
@@ -856,10 +884,14 @@ CronWorkflowSpec is the specification of a CronWorkflow
- [`artifactory-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/artifactory-artifact.yaml)
+- [`artifacts-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/artifacts-workflowtemplate.yaml)
+
- [`buildkit-template.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/buildkit-template.yaml)
- [`ci-output-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/ci-output-artifact.yaml)
+- [`ci-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/ci-workflowtemplate.yaml)
+
- [`ci.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/ci.yaml)
- [`cluster-wftmpl-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/cluster-workflow-template/cluster-wftmpl-dag.yaml)
@@ -1006,6 +1038,8 @@ CronWorkflowSpec is the specification of a CronWorkflow
- [`init-container.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/init-container.yaml)
+- [`input-artifact-azure.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/input-artifact-azure.yaml)
+
- [`input-artifact-gcs.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/input-artifact-gcs.yaml)
- [`input-artifact-git.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/input-artifact-git.yaml)
@@ -1052,6 +1086,8 @@ CronWorkflowSpec is the specification of a CronWorkflow
- [`node-selector.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/node-selector.yaml)
+- [`output-artifact-azure.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/output-artifact-azure.yaml)
+
- [`output-artifact-gcs.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/output-artifact-gcs.yaml)
- [`output-artifact-s3.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/output-artifact-s3.yaml)
@@ -1154,6 +1190,8 @@ CronWorkflowSpec is the specification of a CronWorkflow
- [`volumes-pvc.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/volumes-pvc.yaml)
+- [`webhdfs-input-output-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/webhdfs-input-output-artifacts.yaml)
+
- [`work-avoidance.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/work-avoidance.yaml)
- [`event-consumer-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/workflow-event-binding/event-consumer-workflowtemplate.yaml)
@@ -1227,6 +1265,8 @@ Arguments to a template
- [`ci-output-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/ci-output-artifact.yaml)
+- [`ci-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/ci-workflowtemplate.yaml)
+
- [`ci.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/ci.yaml)
- [`cluster-wftmpl-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/cluster-workflow-template/cluster-wftmpl-dag.yaml)
@@ -1390,6 +1430,24 @@ Arguments to a template
|`artifacts`|`Array<`[`Artifact`](#artifact)`>`|Artifacts is the list of artifacts to pass to the template or workflow|
|`parameters`|`Array<`[`Parameter`](#parameter)`>`|Parameters is the list of parameters to pass to the template or workflow|
+## ArtifactGC
+
+ArtifactGC describes how to delete artifacts from completed Workflows
+
+
+Examples with this field (click to open)
+
+
+- [`artifact-gc-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/artifact-gc-workflow.yaml)
+
+
+### Fields
+| Field Name | Field Type | Description |
+|:----------:|:----------:|---------------|
+|`podMetadata`|[`Metadata`](#metadata)|PodMetadata is an optional field for specifying the Labels and Annotations that should be assigned to the Pod doing the deletion|
+|`serviceAccountName`|`string`|ServiceAccountName is an optional field for specifying the Service Account that should be assigned to the Pod doing the deletion|
+|`strategy`|`string`|Strategy is the strategy to use.|
+
## ArtifactRepositoryRef
_No description available_
@@ -1547,7 +1605,7 @@ RetryStrategy provides controls on how to retry a workflow step
|`affinity`|[`RetryAffinity`](#retryaffinity)|Affinity prevents running workflow's step on the same host|
|`backoff`|[`Backoff`](#backoff)|Backoff is a backoff strategy|
|`expression`|`string`|Expression is a condition expression for when a node will be retried. If it evaluates to false, the node will not be retried and the retry strategy will be ignored|
-|`limit`|[`IntOrString`](#intorstring)|Limit is the maximum number of attempts when retrying a container|
+|`limit`|[`IntOrString`](#intorstring)|Limit is the maximum number of retry attempts when retrying a container. It does not include the original container; the maximum number of total attempts will be `limit + 1`.|
|`retryPolicy`|`string`|RetryPolicy is a policy of NodePhase statuses that will be retried|
## Synchronization
@@ -1697,6 +1755,17 @@ WorkflowTemplateRef is a reference to a WorkflowTemplate resource.
|`clusterScope`|`boolean`|ClusterScope indicates the referred template is cluster scoped (i.e. a ClusterWorkflowTemplate).|
|`name`|`string`|Name is the resource name of the workflow template.|
+## ArtGCStatus
+
+ArtGCStatus maintains state related to ArtifactGC
+
+### Fields
+| Field Name | Field Type | Description |
+|:----------:|:----------:|---------------|
+|`notSpecified`|`boolean`|if this is true, we already checked to see if we need to do it and we don't|
+|`podsRecouped`|`Map< boolean , string >`|have completed Pods been processed? (mapped by Pod name) used to prevent re-processing the Status of a Pod more than once|
+|`strategiesProcessed`|`Map< boolean , string >`|have Pods been started to perform this strategy? (enables us not to re-process what we've already done)|
+
## ArtifactRepositoryRefStatus
_No description available_
@@ -1770,6 +1839,8 @@ Outputs hold parameters, artifacts, and results from a step
- [`artifact-disable-archive.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/artifact-disable-archive.yaml)
+- [`artifact-gc-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/artifact-gc-workflow.yaml)
+
- [`artifact-passing-subpath.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/artifact-passing-subpath.yaml)
- [`artifact-passing.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/artifact-passing.yaml)
@@ -1780,8 +1851,12 @@ Outputs hold parameters, artifacts, and results from a step
- [`artifactory-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/artifactory-artifact.yaml)
+- [`artifacts-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/artifacts-workflowtemplate.yaml)
+
- [`ci-output-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/ci-output-artifact.yaml)
+- [`ci-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/ci-workflowtemplate.yaml)
+
- [`conditional-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/conditional-artifacts.yaml)
- [`conditional-parameters.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/conditional-parameters.yaml)
@@ -1822,6 +1897,8 @@ Outputs hold parameters, artifacts, and results from a step
- [`nested-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/nested-workflow.yaml)
+- [`output-artifact-azure.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/output-artifact-azure.yaml)
+
- [`output-artifact-gcs.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/output-artifact-gcs.yaml)
- [`output-artifact-s3.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/output-artifact-s3.yaml)
@@ -1836,6 +1913,8 @@ Outputs hold parameters, artifacts, and results from a step
- [`suspend-template-outputs.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/suspend-template-outputs.yaml)
+- [`webhdfs-input-output-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/webhdfs-input-output-artifacts.yaml)
+
- [`work-avoidance.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/work-avoidance.yaml)
@@ -1878,6 +1957,8 @@ Artifact indicates an artifact to place at a specified path
- [`artifact-disable-archive.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/artifact-disable-archive.yaml)
+- [`artifact-gc-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/artifact-gc-workflow.yaml)
+
- [`artifact-passing-subpath.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/artifact-passing-subpath.yaml)
- [`artifact-passing.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/artifact-passing.yaml)
@@ -1888,8 +1969,12 @@ Artifact indicates an artifact to place at a specified path
- [`artifactory-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/artifactory-artifact.yaml)
+- [`artifacts-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/artifacts-workflowtemplate.yaml)
+
- [`ci-output-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/ci-output-artifact.yaml)
+- [`ci-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/ci-workflowtemplate.yaml)
+
- [`ci.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/ci.yaml)
- [`conditional-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/conditional-artifacts.yaml)
@@ -1912,6 +1997,8 @@ Artifact indicates an artifact to place at a specified path
- [`influxdb-ci.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/influxdb-ci.yaml)
+- [`input-artifact-azure.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/input-artifact-azure.yaml)
+
- [`input-artifact-gcs.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/input-artifact-gcs.yaml)
- [`input-artifact-git.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/input-artifact-git.yaml)
@@ -1930,10 +2017,14 @@ Artifact indicates an artifact to place at a specified path
- [`nested-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/nested-workflow.yaml)
+- [`output-artifact-azure.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/output-artifact-azure.yaml)
+
- [`output-artifact-gcs.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/output-artifact-gcs.yaml)
- [`output-artifact-s3.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/output-artifact-s3.yaml)
+- [`webhdfs-input-output-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/webhdfs-input-output-artifacts.yaml)
+
- [`work-avoidance.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/work-avoidance.yaml)
@@ -1942,7 +2033,10 @@ Artifact indicates an artifact to place at a specified path
|:----------:|:----------:|---------------|
|`archive`|[`ArchiveStrategy`](#archivestrategy)|Archive controls how the artifact will be saved to the artifact repository.|
|`archiveLogs`|`boolean`|ArchiveLogs indicates if the container logs should be archived|
+|`artifactGC`|[`ArtifactGC`](#artifactgc)|ArtifactGC describes the strategy to use when to deleting an artifact from completed or deleted workflows|
|`artifactory`|[`ArtifactoryArtifact`](#artifactoryartifact)|Artifactory contains artifactory artifact location details|
+|`azure`|[`AzureArtifact`](#azureartifact)|Azure contains Azure Storage artifact location details|
+|`deleted`|`boolean`|Has this been deleted?|
|`from`|`string`|From allows an artifact to reference an artifact from a previous step|
|`fromExpression`|`string`|FromExpression, if defined, is evaluated to specify the value for the artifact|
|`gcs`|[`GCSArtifact`](#gcsartifact)|GCS contains GCS artifact location details|
@@ -1978,6 +2072,8 @@ Parameter indicate a passed string parameter to a service template with an optio
- [`ci-output-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/ci-output-artifact.yaml)
+- [`ci-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/ci-workflowtemplate.yaml)
+
- [`ci.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/ci.yaml)
- [`cluster-wftmpl-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/cluster-workflow-template/cluster-wftmpl-dag.yaml)
@@ -2278,6 +2374,7 @@ ArtifactLocation describes a location for a single or multiple artifacts. It is
|:----------:|:----------:|---------------|
|`archiveLogs`|`boolean`|ArchiveLogs indicates if the container logs should be archived|
|`artifactory`|[`ArtifactoryArtifact`](#artifactoryartifact)|Artifactory contains artifactory artifact location details|
+|`azure`|[`AzureArtifact`](#azureartifact)|Azure contains Azure Storage artifact location details|
|`gcs`|[`GCSArtifact`](#gcsartifact)|GCS contains GCS artifact location details|
|`git`|[`GitArtifact`](#gitartifact)|Git contains git artifact location details|
|`hdfs`|[`HDFSArtifact`](#hdfsartifact)|HDFS contains HDFS artifact location details|
@@ -2294,6 +2391,8 @@ _No description available_
Examples with this field (click to open)
+- [`artifacts-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/artifacts-workflowtemplate.yaml)
+
- [`graph-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/container-set-template/graph-workflow.yaml)
- [`outputs-result-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/container-set-template/outputs-result-workflow.yaml)
@@ -2322,6 +2421,8 @@ DAGTemplate is a template subtype for directed acyclic graph templates
- [`buildkit-template.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/buildkit-template.yaml)
+- [`ci-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/ci-workflowtemplate.yaml)
+
- [`cluster-wftmpl-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/cluster-workflow-template/cluster-wftmpl-dag.yaml)
- [`clustertemplates.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/cluster-workflow-template/clustertemplates.yaml)
@@ -2425,6 +2526,8 @@ _No description available_
- [`artifactory-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/artifactory-artifact.yaml)
+- [`artifacts-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/artifacts-workflowtemplate.yaml)
+
- [`daemon-nginx.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/daemon-nginx.yaml)
- [`daemon-step.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/daemon-step.yaml)
@@ -2448,14 +2551,17 @@ _No description available_
- [`sidecar-nginx.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/sidecar-nginx.yaml)
- [`sidecar.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/sidecar.yaml)
+
+- [`webhdfs-input-output-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/webhdfs-input-output-artifacts.yaml)
### Fields
| Field Name | Field Type | Description |
|:----------:|:----------:|---------------|
|`body`|`string`|Body is content of the HTTP Request|
+|`bodyFrom`|[`HTTPBodySource`](#httpbodysource)|BodyFrom is content of the HTTP Request as Bytes|
|`headers`|`Array<`[`HTTPHeader`](#httpheader)`>`|Headers are an optional list of headers to send with HTTP requests|
-|`insecureSkipVerify`|`boolean`|insecureSkipVerify is a bool when if set to true will skip TLS verification for the HTTP client|
+|`insecureSkipVerify`|`boolean`|InsecureSkipVerify is a bool when if set to true will skip TLS verification for the HTTP client|
|`method`|`string`|Method is HTTP methods for HTTP Request|
|`successCondition`|`string`|SuccessCondition is an expression if evaluated to true is considered successful|
|`timeoutSeconds`|`integer`|TimeoutSeconds is request timeout for HTTP Request. Default is 30 seconds|
@@ -2475,11 +2581,11 @@ UserContainer is a container specified by a user.
### Fields
| Field Name | Field Type | Description |
|:----------:|:----------:|---------------|
-|`args`|`Array< string >`|Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell|
-|`command`|`Array< string >`|Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell|
+|`args`|`Array< string >`|Arguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell|
+|`command`|`Array< string >`|Entrypoint array. Not executed within a shell. The container image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell|
|`env`|`Array<`[`EnvVar`](#envvar)`>`|List of environment variables to set in the container. Cannot be updated.|
|`envFrom`|`Array<`[`EnvFromSource`](#envfromsource)`>`|List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.|
-|`image`|`string`|Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.|
+|`image`|`string`|Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.|
|`imagePullPolicy`|`string`|Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images|
|`lifecycle`|[`Lifecycle`](#lifecycle)|Actions that the management system should take in response to container lifecycle events. Cannot be updated.|
|`livenessProbe`|[`Probe`](#probe)|Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes|
@@ -2523,10 +2629,14 @@ Inputs are the mechanism for passing parameters, artifacts, volumes from one tem
- [`artifactory-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/artifactory-artifact.yaml)
+- [`artifacts-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/artifacts-workflowtemplate.yaml)
+
- [`buildkit-template.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/buildkit-template.yaml)
- [`ci-output-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/ci-output-artifact.yaml)
+- [`ci-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/ci-workflowtemplate.yaml)
+
- [`ci.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/ci.yaml)
- [`clustertemplates.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/cluster-workflow-template/clustertemplates.yaml)
@@ -2591,6 +2701,8 @@ Inputs are the mechanism for passing parameters, artifacts, volumes from one tem
- [`influxdb-ci.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/influxdb-ci.yaml)
+- [`input-artifact-azure.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/input-artifact-azure.yaml)
+
- [`input-artifact-gcs.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/input-artifact-gcs.yaml)
- [`input-artifact-git.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/input-artifact-git.yaml)
@@ -2661,6 +2773,8 @@ Inputs are the mechanism for passing parameters, artifacts, volumes from one tem
- [`suspend-template-outputs.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/suspend-template-outputs.yaml)
+- [`webhdfs-input-output-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/webhdfs-input-output-artifacts.yaml)
+
- [`work-avoidance.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/work-avoidance.yaml)
- [`event-consumer-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/workflow-event-binding/event-consumer-workflowtemplate.yaml)
@@ -2726,6 +2840,7 @@ ResourceTemplate is a template subtype to manipulate kubernetes resources
|`failureCondition`|`string`|FailureCondition is a label selector expression which describes the conditions of the k8s resource in which the step was considered failed|
|`flags`|`Array< string >`|Flags is a set of additional options passed to kubectl before submitting a resource I.e. to disable resource validation: flags: [ "--validate=false" # disable resource validation]|
|`manifest`|`string`|Manifest contains the kubernetes manifest|
+|`manifestFrom`|[`ManifestFrom`](#manifestfrom)|ManifestFrom is the source for a single kubernetes manifest|
|`mergeStrategy`|`string`|MergeStrategy is the strategy used to merge a patch. It defaults to "strategic" Must be one of: strategic, merge, json|
|`setOwnerReference`|`boolean`|SetOwnerReference sets the reference to the workflow on the OwnerReference of generated resource.|
|`successCondition`|`string`|SuccessCondition is a label selector expression which describes the conditions of the k8s resource in which it is acceptable to proceed to the following step|
@@ -2798,11 +2913,11 @@ ScriptTemplate is a template subtype to enable scripting through code steps
### Fields
| Field Name | Field Type | Description |
|:----------:|:----------:|---------------|
-|`args`|`Array< string >`|Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell|
-|`command`|`Array< string >`|Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell|
+|`args`|`Array< string >`|Arguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell|
+|`command`|`Array< string >`|Entrypoint array. Not executed within a shell. The container image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell|
|`env`|`Array<`[`EnvVar`](#envvar)`>`|List of environment variables to set in the container. Cannot be updated.|
|`envFrom`|`Array<`[`EnvFromSource`](#envfromsource)`>`|List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.|
-|`image`|`string`|Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.|
+|`image`|`string`|Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.|
|`imagePullPolicy`|`string`|Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images|
|`lifecycle`|[`Lifecycle`](#lifecycle)|Actions that the management system should take in response to container lifecycle events. Cannot be updated.|
|`livenessProbe`|[`Probe`](#probe)|Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes|
@@ -3056,6 +3171,7 @@ ArtifactRepository represents an artifact repository in which a controller will
|:----------:|:----------:|---------------|
|`archiveLogs`|`boolean`|ArchiveLogs enables log archiving|
|`artifactory`|[`ArtifactoryArtifactRepository`](#artifactoryartifactrepository)|Artifactory stores artifacts to JFrog Artifactory|
+|`azure`|[`AzureArtifactRepository`](#azureartifactrepository)|Azure stores artifact in an Azure Storage account|
|`gcs`|[`GCSArtifactRepository`](#gcsartifactrepository)|GCS stores artifact in a GCS object store|
|`hdfs`|[`HDFSArtifactRepository`](#hdfsartifactrepository)|HDFS stores artifacts in HDFS|
|`oss`|[`OSSArtifactRepository`](#ossartifactrepository)|OSS stores artifact in a OSS-compliant object store|
@@ -3122,6 +3238,10 @@ ArchiveStrategy describes how to archive files/directory when saving artifacts
- [`artifact-passing-subpath.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/artifact-passing-subpath.yaml)
+- [`artifacts-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/artifacts-workflowtemplate.yaml)
+
+- [`ci-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/ci-workflowtemplate.yaml)
+
- [`map-reduce.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/map-reduce.yaml)
- [`output-artifact-s3.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/output-artifact-s3.yaml)
@@ -3152,6 +3272,28 @@ ArtifactoryArtifact is the location of an artifactory artifact
|`url`|`string`|URL of the artifact|
|`usernameSecret`|[`SecretKeySelector`](#secretkeyselector)|UsernameSecret is the secret selector to the repository username|
+## AzureArtifact
+
+AzureArtifact is the location of a an Azure Storage artifact
+
+
+Examples with this field (click to open)
+
+
+- [`input-artifact-azure.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/input-artifact-azure.yaml)
+
+- [`output-artifact-azure.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/output-artifact-azure.yaml)
+
+
+### Fields
+| Field Name | Field Type | Description |
+|:----------:|:----------:|---------------|
+|`accountKeySecret`|[`SecretKeySelector`](#secretkeyselector)|AccountKeySecret is the secret selector to the Azure Blob Storage account access key|
+|`blob`|`string`|Blob is the blob name (i.e., path) in the container where the artifact resides|
+|`container`|`string`|Container is the container where resources will be stored|
+|`endpoint`|`string`|Endpoint is the service url associated with an account. It is most likely "https://.blob.core.windows.net"|
+|`useSDKCreds`|`boolean`|UseSDKCreds tells the driver to figure out credentials based on sdk defaults.|
+
## GCSArtifact
GCSArtifact is the location of a GCS artifact
@@ -3194,6 +3336,7 @@ GitArtifact is the location of an git artifact
### Fields
| Field Name | Field Type | Description |
|:----------:|:----------:|---------------|
+|`branch`|`string`|Branch is the branch to fetch when `SingleBranch` is enabled|
|`depth`|`integer`|Depth specifies clones/fetches should be shallow and include the given number of commits from the branch tip|
|`disableSubmodules`|`boolean`|DisableSubmodules disables submodules during git clone|
|`fetch`|`Array< string >`|Fetch specifies a number of refs that should be fetched before checkout|
@@ -3201,6 +3344,7 @@ GitArtifact is the location of an git artifact
|`passwordSecret`|[`SecretKeySelector`](#secretkeyselector)|PasswordSecret is the secret selector to the repository password|
|`repo`|`string`|Repo is the git repository|
|`revision`|`string`|Revision is the git commit, tag, branch to checkout|
+|`singleBranch`|`boolean`|SingleBranch enables single branch clone, using the `branch` parameter|
|`sshPrivateKeySecret`|[`SecretKeySelector`](#secretkeyselector)|SSHPrivateKeySecret is the secret selector to the repository ssh private key|
|`usernameSecret`|[`SecretKeySelector`](#secretkeyselector)|UsernameSecret is the secret selector to the repository username|
@@ -3231,7 +3375,7 @@ HDFSArtifact is the location of an HDFS artifact
## HTTPArtifact
-HTTPArtifact allows an file served on HTTP to be placed as an input artifact in a container
+HTTPArtifact allows a file served on HTTP to be placed as an input artifact in a container
Examples with this field (click to open)
@@ -3241,6 +3385,8 @@ HTTPArtifact allows an file served on HTTP to be placed as an input artifact in
- [`artifactory-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/artifactory-artifact.yaml)
+- [`artifacts-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/artifacts-workflowtemplate.yaml)
+
- [`daemon-nginx.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/daemon-nginx.yaml)
- [`daemon-step.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/daemon-step.yaml)
@@ -3264,11 +3410,14 @@ HTTPArtifact allows an file served on HTTP to be placed as an input artifact in
- [`sidecar-nginx.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/sidecar-nginx.yaml)
- [`sidecar.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/sidecar.yaml)
+
+- [`webhdfs-input-output-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/webhdfs-input-output-artifacts.yaml)
### Fields
| Field Name | Field Type | Description |
|:----------:|:----------:|---------------|
+|`auth`|[`HTTPAuth`](#httpauth)|Auth contains information for client authentication|
|`headers`|`Array<`[`Header`](#header)`>`|Headers are an optional list of headers to send with HTTP requests for artifacts|
|`url`|`string`|URL of the artifact|
@@ -3484,8 +3633,6 @@ MetricLabel is a single label for a prometheus metric
- [`dag-inline-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/dag-inline-workflow.yaml)
-- [`data-transformations.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/data-transformations.yaml)
-
- [`exit-handler-with-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/exit-handler-with-artifacts.yaml)
- [`exit-handler-with-param.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/exit-handler-with-param.yaml)
@@ -3535,6 +3682,8 @@ _No description available_
Examples with this field (click to open)
+- [`artifacts-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/artifacts-workflowtemplate.yaml)
+
- [`graph-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/container-set-template/graph-workflow.yaml)
- [`outputs-result-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/container-set-template/outputs-result-workflow.yaml)
@@ -3553,12 +3702,12 @@ _No description available_
### Fields
| Field Name | Field Type | Description |
|:----------:|:----------:|---------------|
-|`args`|`Array< string >`|Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell|
-|`command`|`Array< string >`|Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell|
+|`args`|`Array< string >`|Arguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell|
+|`command`|`Array< string >`|Entrypoint array. Not executed within a shell. The container image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell|
|`dependencies`|`Array< string >`|_No description available_|
|`env`|`Array<`[`EnvVar`](#envvar)`>`|List of environment variables to set in the container. Cannot be updated.|
|`envFrom`|`Array<`[`EnvFromSource`](#envfromsource)`>`|List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.|
-|`image`|`string`|Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.|
+|`image`|`string`|Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.|
|`imagePullPolicy`|`string`|Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images|
|`lifecycle`|[`Lifecycle`](#lifecycle)|Actions that the management system should take in response to container lifecycle events. Cannot be updated.|
|`livenessProbe`|[`Probe`](#probe)|Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes|
@@ -3624,6 +3773,8 @@ DAGTask represents a node in the graph during DAG execution
- [`buildkit-template.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/buildkit-template.yaml)
+- [`ci-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/ci-workflowtemplate.yaml)
+
- [`cluster-wftmpl-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/cluster-workflow-template/cluster-wftmpl-dag.yaml)
- [`clustertemplates.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/cluster-workflow-template/clustertemplates.yaml)
@@ -3793,10 +3944,26 @@ _No description available_
|:----------:|:----------:|---------------|
|`expression`|`string`|Expression defines an expr expression to apply|
+## HTTPBodySource
+
+HTTPBodySource contains the source of the HTTP body.
+
+### Fields
+| Field Name | Field Type | Description |
+|:----------:|:----------:|---------------|
+|`bytes`|`byte`|_No description available_|
+
## HTTPHeader
_No description available_
+
+Examples with this field (click to open)
+
+
+- [`webhdfs-input-output-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/webhdfs-input-output-artifacts.yaml)
+
+
### Fields
| Field Name | Field Type | Description |
|:----------:|:----------:|---------------|
@@ -3820,6 +3987,15 @@ Cache is the configuration for the type of cache to be used
|:----------:|:----------:|---------------|
|`configMap`|[`ConfigMapKeySelector`](#configmapkeyselector)|ConfigMap sets a ConfigMap-based cache|
+## ManifestFrom
+
+_No description available_
+
+### Fields
+| Field Name | Field Type | Description |
+|:----------:|:----------:|---------------|
+|`artifact`|[`Artifact`](#artifact)|Artifact contains the artifact to use|
+
## ContinueOn
ContinueOn defines if a workflow should continue even if a task or step fails/errors. It can be specified if the workflow should continue when the pod errors, fails or both.
@@ -3923,6 +4099,28 @@ ArtifactoryArtifactRepository defines the controller configuration for an artifa
|`repoURL`|`string`|RepoURL is the url for artifactory repo.|
|`usernameSecret`|[`SecretKeySelector`](#secretkeyselector)|UsernameSecret is the secret selector to the repository username|
+## AzureArtifactRepository
+
+AzureArtifactRepository defines the controller configuration for an Azure Blob Storage artifact repository
+
+
+Examples with this field (click to open)
+
+
+- [`input-artifact-azure.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/input-artifact-azure.yaml)
+
+- [`output-artifact-azure.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/output-artifact-azure.yaml)
+
+
+### Fields
+| Field Name | Field Type | Description |
+|:----------:|:----------:|---------------|
+|`accountKeySecret`|[`SecretKeySelector`](#secretkeyselector)|AccountKeySecret is the secret selector to the Azure Blob Storage account access key|
+|`blobNameFormat`|`string`|BlobNameFormat is defines the format of how to store blob names. Can reference workflow variables|
+|`container`|`string`|Container is the container where resources will be stored|
+|`endpoint`|`string`|Endpoint is the service url associated with an account. It is most likely "https://.blob.core.windows.net"|
+|`useSDKCreds`|`boolean`|UseSDKCreds tells the driver to figure out credentials based on sdk defaults.|
+
## GCSArtifactRepository
GCSArtifactRepository defines the controller configuration for a GCS artifact repository
@@ -4043,6 +4241,10 @@ NoneStrategy indicates to skip tar process and upload the files or directory tre
- [`artifact-passing-subpath.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/artifact-passing-subpath.yaml)
+- [`artifacts-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/artifacts-workflowtemplate.yaml)
+
+- [`ci-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/ci-workflowtemplate.yaml)
+
- [`map-reduce.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/map-reduce.yaml)
- [`output-artifact-s3.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/output-artifact-s3.yaml)
@@ -4068,10 +4270,35 @@ TarStrategy will tar and gzip the file or directory when saving
ZipStrategy will unzip zipped input artifacts
+## HTTPAuth
+
+_No description available_
+
+
+Examples with this field (click to open)
+
+
+- [`webhdfs-input-output-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/webhdfs-input-output-artifacts.yaml)
+
+
+### Fields
+| Field Name | Field Type | Description |
+|:----------:|:----------:|---------------|
+|`basicAuth`|[`BasicAuth`](#basicauth)|_No description available_|
+|`clientCert`|[`ClientCertAuth`](#clientcertauth)|_No description available_|
+|`oauth2`|[`OAuth2Auth`](#oauth2auth)|_No description available_|
+
## Header
Header indicate a key-value request header to be used when fetching artifacts over HTTP
+
+Examples with this field (click to open)
+
+
+- [`webhdfs-input-output-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/webhdfs-input-output-artifacts.yaml)
+
+
### Fields
| Field Name | Field Type | Description |
|:----------:|:----------:|---------------|
@@ -4147,7 +4374,10 @@ ArtifactPaths expands a step from a collection of artifacts
|:----------:|:----------:|---------------|
|`archive`|[`ArchiveStrategy`](#archivestrategy)|Archive controls how the artifact will be saved to the artifact repository.|
|`archiveLogs`|`boolean`|ArchiveLogs indicates if the container logs should be archived|
+|`artifactGC`|[`ArtifactGC`](#artifactgc)|ArtifactGC describes the strategy to use when to deleting an artifact from completed or deleted workflows|
|`artifactory`|[`ArtifactoryArtifact`](#artifactoryartifact)|Artifactory contains artifactory artifact location details|
+|`azure`|[`AzureArtifact`](#azureartifact)|Azure contains Azure Storage artifact location details|
+|`deleted`|`boolean`|Has this been deleted?|
|`from`|`string`|From allows an artifact to reference an artifact from a previous step|
|`fromExpression`|`string`|FromExpression, if defined, is evaluated to specify the value for the artifact|
|`gcs`|[`GCSArtifact`](#gcsartifact)|GCS contains GCS artifact location details|
@@ -4221,6 +4451,63 @@ _No description available_
|:----------:|:----------:|---------------|
|`secretKeyRef`|[`SecretKeySelector`](#secretkeyselector)|_No description available_|
+## BasicAuth
+
+BasicAuth describes the secret selectors required for basic authentication
+
+### Fields
+| Field Name | Field Type | Description |
+|:----------:|:----------:|---------------|
+|`passwordSecret`|[`SecretKeySelector`](#secretkeyselector)|PasswordSecret is the secret selector to the repository password|
+|`usernameSecret`|[`SecretKeySelector`](#secretkeyselector)|UsernameSecret is the secret selector to the repository username|
+
+## ClientCertAuth
+
+ClientCertAuth holds necessary information for client authentication via certificates
+
+
+Examples with this field (click to open)
+
+
+- [`webhdfs-input-output-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/webhdfs-input-output-artifacts.yaml)
+
+
+### Fields
+| Field Name | Field Type | Description |
+|:----------:|:----------:|---------------|
+|`clientCertSecret`|[`SecretKeySelector`](#secretkeyselector)|_No description available_|
+|`clientKeySecret`|[`SecretKeySelector`](#secretkeyselector)|_No description available_|
+
+## OAuth2Auth
+
+OAuth2Auth holds all information for client authentication via OAuth2 tokens
+
+### Fields
+| Field Name | Field Type | Description |
+|:----------:|:----------:|---------------|
+|`clientIDSecret`|[`SecretKeySelector`](#secretkeyselector)|_No description available_|
+|`clientSecretSecret`|[`SecretKeySelector`](#secretkeyselector)|_No description available_|
+|`endpointParams`|`Array<`[`OAuth2EndpointParam`](#oauth2endpointparam)`>`|_No description available_|
+|`scopes`|`Array< string >`|_No description available_|
+|`tokenURLSecret`|[`SecretKeySelector`](#secretkeyselector)|_No description available_|
+
+## OAuth2EndpointParam
+
+EndpointParam is for requesting optional fields that should be sent in the oauth request
+
+
+Examples with this field (click to open)
+
+
+- [`webhdfs-input-output-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/webhdfs-input-output-artifacts.yaml)
+
+
+### Fields
+| Field Name | Field Type | Description |
+|:----------:|:----------:|---------------|
+|`key`|`string`|Name is the header name|
+|`value`|`string`|Value is the literal value to use for the header|
+
# External Fields
@@ -4242,6 +4529,8 @@ ObjectMeta is metadata that all persisted resources must have, which includes al
- [`artifact-disable-archive.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/artifact-disable-archive.yaml)
+- [`artifact-gc-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/artifact-gc-workflow.yaml)
+
- [`artifact-passing-subpath.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/artifact-passing-subpath.yaml)
- [`artifact-passing.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/artifact-passing.yaml)
@@ -4252,10 +4541,14 @@ ObjectMeta is metadata that all persisted resources must have, which includes al
- [`artifactory-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/artifactory-artifact.yaml)
+- [`artifacts-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/artifacts-workflowtemplate.yaml)
+
- [`buildkit-template.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/buildkit-template.yaml)
- [`ci-output-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/ci-output-artifact.yaml)
+- [`ci-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/ci-workflowtemplate.yaml)
+
- [`ci.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/ci.yaml)
- [`cluster-wftmpl-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/cluster-workflow-template/cluster-wftmpl-dag.yaml)
@@ -4402,6 +4695,8 @@ ObjectMeta is metadata that all persisted resources must have, which includes al
- [`init-container.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/init-container.yaml)
+- [`input-artifact-azure.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/input-artifact-azure.yaml)
+
- [`input-artifact-gcs.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/input-artifact-gcs.yaml)
- [`input-artifact-git.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/input-artifact-git.yaml)
@@ -4448,6 +4743,8 @@ ObjectMeta is metadata that all persisted resources must have, which includes al
- [`node-selector.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/node-selector.yaml)
+- [`output-artifact-azure.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/output-artifact-azure.yaml)
+
- [`output-artifact-gcs.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/output-artifact-gcs.yaml)
- [`output-artifact-s3.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/output-artifact-s3.yaml)
@@ -4550,6 +4847,8 @@ ObjectMeta is metadata that all persisted resources must have, which includes al
- [`volumes-pvc.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/volumes-pvc.yaml)
+- [`webhdfs-input-output-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/webhdfs-input-output-artifacts.yaml)
+
- [`work-avoidance.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/work-avoidance.yaml)
- [`event-consumer-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/workflow-event-binding/event-consumer-workflowtemplate.yaml)
@@ -4716,6 +5015,8 @@ PersistentVolumeClaim is a user's request for and claim to a persistent volume
- [`ci-output-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/ci-output-artifact.yaml)
+- [`ci-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/ci-workflowtemplate.yaml)
+
- [`ci.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/ci.yaml)
- [`fun-with-gifs.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/fun-with-gifs.yaml)
@@ -4742,6 +5043,8 @@ Volume represents a named volume in a pod that may be accessed by any container
Examples with this field (click to open)
+- [`artifacts-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/artifacts-workflowtemplate.yaml)
+
- [`buildkit-template.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/buildkit-template.yaml)
- [`workspace-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/container-set-template/workspace-workflow.yaml)
@@ -4872,6 +5175,8 @@ A single application container that you want to run within a pod.
- [`artifact-disable-archive.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/artifact-disable-archive.yaml)
+- [`artifact-gc-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/artifact-gc-workflow.yaml)
+
- [`artifact-passing-subpath.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/artifact-passing-subpath.yaml)
- [`artifact-passing.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/artifact-passing.yaml)
@@ -4886,6 +5191,8 @@ A single application container that you want to run within a pod.
- [`ci-output-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/ci-output-artifact.yaml)
+- [`ci-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/ci-workflowtemplate.yaml)
+
- [`ci.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/ci.yaml)
- [`clustertemplates.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/cluster-workflow-template/clustertemplates.yaml)
@@ -4992,6 +5299,8 @@ A single application container that you want to run within a pod.
- [`init-container.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/init-container.yaml)
+- [`input-artifact-azure.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/input-artifact-azure.yaml)
+
- [`input-artifact-gcs.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/input-artifact-gcs.yaml)
- [`input-artifact-git.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/input-artifact-git.yaml)
@@ -5032,6 +5341,8 @@ A single application container that you want to run within a pod.
- [`node-selector.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/node-selector.yaml)
+- [`output-artifact-azure.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/output-artifact-azure.yaml)
+
- [`output-artifact-gcs.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/output-artifact-gcs.yaml)
- [`output-artifact-s3.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/output-artifact-s3.yaml)
@@ -5124,6 +5435,8 @@ A single application container that you want to run within a pod.
- [`volumes-pvc.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/volumes-pvc.yaml)
+- [`webhdfs-input-output-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/webhdfs-input-output-artifacts.yaml)
+
- [`work-avoidance.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/work-avoidance.yaml)
- [`event-consumer-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/workflow-event-binding/event-consumer-workflowtemplate.yaml)
@@ -5189,10 +5502,14 @@ VolumeMount describes a mounting of a Volume within a container.
Examples with this field (click to open)
+- [`artifacts-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/artifacts-workflowtemplate.yaml)
+
- [`buildkit-template.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/buildkit-template.yaml)
- [`ci-output-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/ci-output-artifact.yaml)
+- [`ci-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/ci-workflowtemplate.yaml)
+
- [`ci.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/ci.yaml)
- [`workspace-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/container-set-template/workspace-workflow.yaml)
@@ -5314,6 +5631,8 @@ ResourceRequirements describes the compute resource requirements.
- [`ci-output-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/ci-output-artifact.yaml)
+- [`ci-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/ci-workflowtemplate.yaml)
+
- [`ci.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/ci.yaml)
- [`dns-config.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/dns-config.yaml)
@@ -5531,6 +5850,8 @@ PersistentVolumeClaimSpec describes the common attributes of storage devices and
- [`artifact-disable-archive.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/artifact-disable-archive.yaml)
+- [`artifact-gc-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/artifact-gc-workflow.yaml)
+
- [`artifact-passing-subpath.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/artifact-passing-subpath.yaml)
- [`artifact-passing.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/artifact-passing.yaml)
@@ -5541,10 +5862,14 @@ PersistentVolumeClaimSpec describes the common attributes of storage devices and
- [`artifactory-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/artifactory-artifact.yaml)
+- [`artifacts-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/artifacts-workflowtemplate.yaml)
+
- [`buildkit-template.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/buildkit-template.yaml)
- [`ci-output-artifact.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/ci-output-artifact.yaml)
+- [`ci-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/ci-workflowtemplate.yaml)
+
- [`ci.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/ci.yaml)
- [`cluster-wftmpl-dag.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/cluster-workflow-template/cluster-wftmpl-dag.yaml)
@@ -5691,6 +6016,8 @@ PersistentVolumeClaimSpec describes the common attributes of storage devices and
- [`init-container.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/init-container.yaml)
+- [`input-artifact-azure.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/input-artifact-azure.yaml)
+
- [`input-artifact-gcs.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/input-artifact-gcs.yaml)
- [`input-artifact-git.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/input-artifact-git.yaml)
@@ -5737,6 +6064,8 @@ PersistentVolumeClaimSpec describes the common attributes of storage devices and
- [`node-selector.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/node-selector.yaml)
+- [`output-artifact-azure.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/output-artifact-azure.yaml)
+
- [`output-artifact-gcs.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/output-artifact-gcs.yaml)
- [`output-artifact-s3.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/output-artifact-s3.yaml)
@@ -5839,6 +6168,8 @@ PersistentVolumeClaimSpec describes the common attributes of storage devices and
- [`volumes-pvc.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/volumes-pvc.yaml)
+- [`webhdfs-input-output-artifacts.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/webhdfs-input-output-artifacts.yaml)
+
- [`work-avoidance.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/work-avoidance.yaml)
- [`event-consumer-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/workflow-event-binding/event-consumer-workflowtemplate.yaml)
@@ -6001,6 +6332,8 @@ Represents an empty directory for a pod. Empty directory volumes support ownersh
Examples with this field (click to open)
+- [`artifacts-workflowtemplate.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/artifacts-workflowtemplate.yaml)
+
- [`workspace-workflow.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/container-set-template/workspace-workflow.yaml)
- [`init-container.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/init-container.yaml)
diff --git a/docs/high-availability.md b/docs/high-availability.md
index 90aa29f77e6c..fb2a245f2cf4 100644
--- a/docs/high-availability.md
+++ b/docs/high-availability.md
@@ -2,15 +2,15 @@
## Workflow Controller
-Only one controller can run at once. If it crashes, Kubernetes will start another pod.
+Before v3.0, only one controller could run at once. (If it crashed, Kubernetes would start another pod.)
-> v3.0
+> v3.0
-For many users, a short loss of workflow service maybe acceptable - the new controller will just continue running
+For many users, a short loss of workflow service may be acceptable - the new controller will just continue running
workflows if it restarts. However, with high service guarantees, new pods may take too long to start running workflows.
You should run two replicas, and one of which will be kept on hot-standby.
-A voluntary pod disruption can cause both replicas to be replaced at the same time. You should use a Pod Disruption
+A voluntary pod disruption can cause both replicas to be replaced at the same time. You should use a Pod Disruption
Budget to prevent this and Pod Priority to recover faster from an involuntary pod disruption:
* [Pod Disruption Budget](https://kubernetes.io/docs/concepts/workloads/pods/disruptions/#pod-disruption-budgets)
@@ -24,4 +24,3 @@ Run a minimum of two replicas, typically three, should be run, otherwise it may
!!! Tip
Consider using [multi AZ-deployment using pod anti-affinity](https://www.verygoodsecurity.com/blog/posts/kubernetes-multi-az-deployments-using-pod-anti-affinity).
-
diff --git a/docs/http-template.md b/docs/http-template.md
index aa67bfcc5997..771f141d6777 100644
--- a/docs/http-template.md
+++ b/docs/http-template.md
@@ -1,10 +1,8 @@
# HTTP Template
-
-> v3.2 and after
-`HTTP Template` is a type of template which can execute the HTTP Requests.
+> v3.2 and after
-### HTTP Template
+`HTTP Template` is a type of template which can execute HTTP Requests.
```yaml
apiVersion: argoproj.io/v1alpha1
@@ -33,8 +31,8 @@ spec:
value: "test-value"
# Template will succeed if evaluated to true, otherwise will fail
# Available variables:
- # request.body: string, the response body
- # request.headers: map[string][]string, the response headers
+ # request.body: string, the request body
+ # request.headers: map[string][]string, the request headers
# response.url: string, the request url
# response.method: string, the request method
# response.statusCode: int, the response status code
@@ -44,7 +42,10 @@ spec:
body: "test body" # Change request body
```
-### Argo Agent
+## Argo Agent
+
HTTP Templates use the Argo Agent, which executes the requests independently of the controller. The Agent and the Workflow
Controller communicate through the `WorkflowTaskSet` CRD, which is created for each running `Workflow` that requires the use
-of the `Agent`.
\ No newline at end of file
+of the `Agent`.
+
+In order to use the Argo Agent, you will need to ensure that you have added the appropriate [workflow RBAC](workflow-rbac.md) to add an agent role with to Argo Workflows. An example agent role can be found in [the quick-start manifests](https://github.com/argoproj/argo-workflows/tree/master/manifests/quick-start/base/agent-role.yaml).
diff --git a/docs/ide-setup.md b/docs/ide-setup.md
index b6dffe9090d3..2e5f4f4472b1 100644
--- a/docs/ide-setup.md
+++ b/docs/ide-setup.md
@@ -12,11 +12,11 @@ Configure your IDE to reference the Argo schema and map it to your Argo YAML fil

-- The schema is located at [https://raw.githubusercontent.com/argoproj/argo-workflows/master/api/jsonschema/schema.json](https://raw.githubusercontent.com/argoproj/argo-workflows/master/api/jsonschema/schema.json).
+- The schema is located [here](https://raw.githubusercontent.com/argoproj/argo-workflows/master/api/jsonschema/schema.json).
- Specify a file glob pattern that locates **your** Argo files. The example glob here is for the Argo Github project!
- Note that you may need to restart IDEA to pick up the changes.
-That's it. Open an Argo YAML file and you should see smarter behaviour, including type errors and context-sensitive autocomplete.
+That's it. Open an Argo YAML file and you should see smarter behavior, including type errors and context-sensitive auto-complete.

@@ -24,7 +24,7 @@ That's it. Open an Argo YAML file and you should see smarter behaviour, includin
If you have the [JetBrains Kubernetes Plugin](https://plugins.jetbrains.com/plugin/10485-kubernetes)
installed in your IDE, the validation can be configured in the Kubernetes plugin settings
-instead of using the internal JSON schema file validator.
+instead of using the internal JSON schema file validator.

@@ -32,18 +32,18 @@ Unlike the previous JSON schema validation method, the plugin detects the necess
based on Kubernetes resource definition keys and does not require a file glob pattern.
Like the previously described method:
-- The schema is located at [https://raw.githubusercontent.com/argoproj/argo-workflows/master/api/jsonschema/schema.json](https://raw.githubusercontent.com/argoproj/argo-workflows/master/api/jsonschema/schema.json).
+- The schema is located [here](https://raw.githubusercontent.com/argoproj/argo-workflows/master/api/jsonschema/schema.json).
- Note that you may need to restart IDEA to pick up the changes.
### VSCode
-The [Red Hat YAML](https://github.com/redhat-developer/vscode-yaml) plugin will provide error highlighting and autocompletion for Argo resources.
+The [Red Hat YAML](https://github.com/redhat-developer/vscode-yaml) plugin will provide error highlighting and auto-completion for Argo resources.
Install the Red Hat YAML plugin in VSCode and open extension settings:

-Open the YAML schemas settings:
+Open the YAML schema settings:

@@ -51,10 +51,10 @@ Add the Argo schema setting `yaml.schemas`:

-- The schema is located at [https://raw.githubusercontent.com/argoproj/argo-workflows/master/api/jsonschema/schema.json](https://raw.githubusercontent.com/argoproj/argo-workflows/master/api/jsonschema/schema.json).
+- The schema is located [here](https://raw.githubusercontent.com/argoproj/argo-workflows/master/api/jsonschema/schema.json).
- Specify a file glob pattern that locates **your** Argo files. The example glob here is for the Argo Github project!
-- Note that other defined schemas with overlapping glob patterns may cause errors.
+- Note that other defined schema with overlapping glob patterns may cause errors.
-That's it. Open an Argo YAML file and you should see smarter behaviour, including type errors and context-sensitive autocomplete.
+That's it. Open an Argo YAML file and you should see smarter behavior, including type errors and context-sensitive auto-complete.

diff --git a/docs/templates.md b/docs/inline-templates.md
similarity index 54%
rename from docs/templates.md
rename to docs/inline-templates.md
index c06e075df5ff..03210fc3f802 100644
--- a/docs/templates.md
+++ b/docs/inline-templates.md
@@ -1,12 +1,4 @@
-# Templates
-
-See [core concepts](core-concepts.md) for DAG, steps, container templates.
-
-## Container Set Template
-
-See [container set template](container-set-template.md).
-
-## Inline Templates
+# Inline Templates
> v3.2 and after
@@ -18,4 +10,4 @@ Examples:
* [Steps](https://raw.githubusercontent.com/argoproj/argo-workflows/master/examples/steps-inline-workflow.yaml)
!!! Warning
- You can only inline once. Inlining a DAG within a DAG will not work.
+ You can only inline once. Inline a DAG within a DAG will not work.
diff --git a/docs/installation.md b/docs/installation.md
index 99d64581c542..3e591d145082 100644
--- a/docs/installation.md
+++ b/docs/installation.md
@@ -1,30 +1,40 @@
# Installation
-## Argo on Desktop
+## Non-production installation
-Use the [quick-start manifests](quick-start.md).
+If you just want to try out Argo Workflows in a non-production environment (including on desktop via minikube/kind/k3d etc) follow the [quick-start guide](quick-start.md).
-## Argo in Production
+## Production installation
-Determine your base installation option.
+### Installation Methods
-* A **cluster install** will watch and execute workflows in all namespaces.
-* A **namespace install** only executes workflows in the namespace it is installed in (typically `argo`).
-* A **managed namespace install**: only executes workflows in a specific namespace ([learn more](managed-namespace.md)).
+#### Official release manifests
-⚠️ `latest` is tip, not stable. Never run it. Make sure you're using the manifests attached to each Github release. See [this link](https://github.com/argoproj/argo-workflows/releases/latest) for the most recent manifests.
+To install Argo Workflows, navigate to the [releases page](https://github.com/argoproj/argo-workflows/releases/latest) and find the release you wish to use (the latest full release is preferred). Scroll down to the `Controller and Server` section and execute the `kubectl` commands.
-⚠️ Double-check you have the right version of your executor configured, it's easy to miss.
+You can use Kustomize to patch your preferred [configurations](managed-namespace.md) on top of the base manifest.
-⚠️ If you are using GitOps. Never use Kustomize remote base, this is dangerous. Instead, copy the manifests into your Git repo.
+⚠️ If you are using GitOps, never use Kustomize remote base: this is dangerous. Instead, copy the manifests into your Git repo.
-Review the following:
+⚠️ `latest` is tip, not stable. Never run it in production.
+
+#### Argo Workflows Helm Chart
+
+You can install Argo Workflows using the community maintained [Helm charts](https://github.com/argoproj/argo-helm).
- * [Security](security.md).
- * [Scaling](scaling.md) and [running at massive scale](running-at-massive-scale.md).
- * [High-availability](high-availability.md)
- * [Disaster recovery](disaster-recovery.md)
+## Installation options
-Read the [upgrading guide](upgrading.md) before any major upgrade to be aware of breaking changes.
+Determine your base installation option.
+
+* A **cluster install** will watch and execute workflows in all namespaces. This is the default installation option when installing using the official release manifests.
+* A **namespace install** only executes workflows in the namespace it is installed in (typically `argo`). Look for `namespace-install.yaml` in the [release assets](https://github.com/argoproj/argo-workflows/releases/latest).
+* A **managed namespace install**: only executes workflows in a specific namespace ([learn more](managed-namespace.md)).
+## Additional installation considerations
+
+Review the following:
+* [Security](security.md).
+* [Scaling](scaling.md) and [running at massive scale](running-at-massive-scale.md).
+* [High-availability](high-availability.md)
+* [Disaster recovery](disaster-recovery.md)
diff --git a/docs/intermediate-inputs.md b/docs/intermediate-inputs.md
new file mode 100644
index 000000000000..c3bf889d1c87
--- /dev/null
+++ b/docs/intermediate-inputs.md
@@ -0,0 +1,138 @@
+# Intermediate Parameters
+
+> v3.4 and after
+
+Traditionally, Argo workflows has supported input parameters from UI only when the workflow starts,
+and after that, it's pretty much on autopilot. But, there are a lot of use cases where human interaction is required.
+
+This interaction is in the form of providing input text in the middle of the workflow, choosing from a dropdown of the options which a workflow step itself is intelligently generating.
+
+A similar feature which you can see in jenkins is [pipeline-input-step](https://www.jenkins.io/doc/pipeline/steps/pipeline-input-step/)
+
+Example use cases include:
+
+- A human approval before doing something in production environment.
+- Programmatic generation of a list of inputs from which the user chooses.
+Choosing from a list of available databases which the workflow itself is generating.
+
+This feature is achieved via `suspend template`.
+
+The workflow will pause at a `Suspend` node, and user will be able to update parameters using fields type text or dropdown.
+
+## Intermediate Parameters Approval Example
+
+- The below example shows static enum values `approval` step.
+- The user will be able to choose between `[YES, NO]` which will be used in subsequent steps.
+
+[](https://youtu.be/eyeZ2oddwWE)
+
+```yaml
+
+entrypoint: cicd-pipeline
+templates:
+ - name: cicd-pipeline
+ steps:
+ - - name: deploy-pre-prod
+ template: deploy
+ - - name: approval
+ template: approval
+ - - name: deploy-prod
+ template: deploy
+ when: '{{steps.approval.outputs.parameters.approve}} == YES'
+ - name: approval
+ suspend: {}
+ inputs:
+ parameters:
+ - name: approve
+ default: 'NO'
+ enum:
+ - 'YES'
+ - 'NO'
+ outputs:
+ parameters:
+ - name: approve
+ valueFrom:
+ supplied: {}
+ - name: deploy
+ container:
+ image: 'argoproj/argosay:v2'
+ command:
+ - /argosay
+ args:
+ - echo
+ - deploying
+```
+
+## Intermediate Parameters DB Schema Update Example
+
+- The below example shows programmatic generation of `enum` values.
+- The `generate-db-list` template generates an output called `db_list`.
+- This output is of type `json`.
+- Since this `json` has a `key` called `enum`, with an array of options, the UI will parse this and display it as a dropdown.
+- The output can be any string also, in which case the UI will display it as a text field. Which the user can later edit.
+
+[](https://youtu.be/QgE-1782YJc)
+
+```yaml
+entrypoint: db-schema-update
+templates:
+ - name: db-schema-update
+ steps:
+ - - name: generate-db-list
+ template: generate-db-list
+ - - name: choose-db
+ template: choose-db
+ arguments:
+ parameters:
+ - name: db_name
+ value: '{{steps.generate-db-list.outputs.parameters.db_list}}'
+ - - name: update-schema
+ template: update-schema
+ arguments:
+ parameters:
+ - name: db_name
+ value: '{{steps.choose-db.outputs.parameters.db_name}}'
+ - name: generate-db-list
+ outputs:
+ parameters:
+ - name: db_list
+ valueFrom:
+ path: /tmp/db_list.txt
+ container:
+ name: main
+ image: 'argoproj/argosay:v2'
+ command:
+ - sh
+ - '-c'
+ args:
+ - >-
+ echo "{\"enum\": [\"db1\", \"db2\", \"db3\"]}" | tee /tmp/db_list.txt
+ - name: choose-db
+ inputs:
+ parameters:
+ - name: db_name
+ outputs:
+ parameters:
+ - name: db_name
+ valueFrom:
+ supplied: {}
+ suspend: {}
+ - name: update-schema
+ inputs:
+ parameters:
+ - name: db_name
+ container:
+ name: main
+ image: 'argoproj/argosay:v2'
+ command:
+ - sh
+ - '-c'
+ args:
+ - echo Updating DB {{inputs.parameters.db_name}}
+```
+
+### Some Important Details
+
+- The suspended node should have the **SAME** parameters defined in `inputs.parameters` and `outputs.parameters`.
+- All the output parameters in the suspended node should have `valueFrom.supplied: {}`
+- The selected values will be available at `.outputs.parameters.`
diff --git a/docs/key-only-artifacts.md b/docs/key-only-artifacts.md
index 7a3519075eb3..2ace452d120f 100644
--- a/docs/key-only-artifacts.md
+++ b/docs/key-only-artifacts.md
@@ -1,18 +1,17 @@
# Key-Only Artifacts
-
> v3.0 and after
A key-only artifact is an input or output artifact where you only specific the key, omitting the bucket, secrets etc. When these are omitted, the bucket/secrets from the configured artifact repository is used.
This allows you to move the configuration of the artifact repository out of the workflow specification.
-This is closely related to [artifact repository ref](artifact-repository-ref.md). You'll want to use them together for maximum benefit.
+This is closely related to [artifact repository ref](artifact-repository-ref.md). You'll want to use them together for maximum benefit.
This should probably be your default if you're using v3.0:
* Reduces the size of workflows (improved performance).
-* User owned artifact repository set-up configuration (simplified management).
+* User owned artifact repository set-up configuration (simplified management).
* Decouples the artifact location configuration from the workflow. Allowing you to re-configure the artifact repository without changing your workflows or templates.
Example:
@@ -57,4 +56,4 @@ spec:
```
!!! WARNING
- The location data is not longer stored in `/status/nodes`. Any tooling that relies on this will need to be updated.
\ No newline at end of file
+ The location data is not longer stored in `/status/nodes`. Any tooling that relies on this will need to be updated.
diff --git a/docs/kubectl.md b/docs/kubectl.md
index 38521a486503..d9f54b15b90b 100644
--- a/docs/kubectl.md
+++ b/docs/kubectl.md
@@ -1,14 +1,13 @@
-# Kubectl
+# `kubectl`
You can also create Workflows directly with `kubectl`. However, the Argo CLI offers extra features
that `kubectl` does not, such as YAML validation, workflow visualization, parameter passing, retries
and resubmits, suspend and resume, and more.
-```sh
+```bash
kubectl create -n argo -f https://raw.githubusercontent.com/argoproj/argo-workflows/master/examples/hello-world.yaml
kubectl get wf -n argo
kubectl get wf hello-world-xxx -n argo
kubectl get po -n argo --selector=workflows.argoproj.io/workflow=hello-world-xxx
kubectl logs hello-world-yyy -c main -n argo
```
-
diff --git a/docs/lifecyclehook.md b/docs/lifecyclehook.md
index e555e568b3b4..2da5f0f93111 100644
--- a/docs/lifecyclehook.md
+++ b/docs/lifecyclehook.md
@@ -1,4 +1,4 @@
-# LifecycleHook
+# Lifecycle-Hook
> v3.3 and after
@@ -9,16 +9,18 @@ A [`LifecycleHook`](https://argoproj.github.io/argo-workflows/fields/#lifecycleh
In other words, a `LifecycleHook` functions like an [exit handler](https://github.com/argoproj/argo-workflows/blob/master/examples/exit-handlers.yaml) with a conditional expression.
**Workflow-level `LifecycleHook`**: Executes the workflow when a configured expression is met.
-- [Workflow-level LifecycleHook example](https://github.com/argoproj/argo-workflows/blob/45730a9cdeb588d0e52b1ac87b6e0ca391a95a81/examples/life-cycle-hooks-wf-level.yaml)
-**Template-level LifecycleHook**: Executes the template when a configured expression is met.
-- [Template-level LifecycleHook example](https://github.com/argoproj/argo-workflows/blob/45730a9cdeb588d0e52b1ac87b6e0ca391a95a81/examples/life-cycle-hooks-tmpl-level.yaml)
+- [Workflow-level Lifecycle-Hook example](https://github.com/argoproj/argo-workflows/blob/45730a9cdeb588d0e52b1ac87b6e0ca391a95a81/examples/life-cycle-hooks-wf-level.yaml)
+
+**Template-level Lifecycle-Hook**: Executes the template when a configured expression is met.
+
+- [Template-level Lifecycle-Hook example](https://github.com/argoproj/argo-workflows/blob/45730a9cdeb588d0e52b1ac87b6e0ca391a95a81/examples/life-cycle-hooks-tmpl-level.yaml)
## Supported conditions
- [Exit handler variables](https://github.com/argoproj/argo-workflows/blob/ebd3677c7a9c973b22fa81ef3b409404a38ec331/docs/variables.md#exit-handler): `workflow.status` and `workflow.failures`
- [`template`](https://argoproj.github.io/argo-workflows/fields/#template)
-- [`templateRef`](https://argoproj.github.io/argo-workflows/fields/#templateref)
+- [`templateRef`](https://argoproj.github.io/argo-workflows/fields/#templateref)
- [`arguments`](https://github.com/argoproj/argo-workflows/blob/master/examples/conditionals.yaml)
## Unsupported conditions
diff --git a/docs/links.md b/docs/links.md
index dfec1e82ecfa..08b0574ce6fa 100644
--- a/docs/links.md
+++ b/docs/links.md
@@ -1,33 +1,31 @@
# Links
-
-
> v2.7 and after
You can configure Argo Server to show custom links:
-* A "Get Help" button in the bottom right of the window linking to you organisation help pages or chat room.
-* Deep-links to your facilities (e.g. logging facility) in the user interface for both the workflow and each workflow pod.
+* A "Get Help" button in the bottom right of the window linking to you organization help pages or chat room.
+* Deep-links to your facilities (e.g. logging facility) in the UI for both the workflow and each workflow pod.
Links can contain placeholder variables. Placeholder variables are indicated by the dollar sign and curly braces: `${variable}`.
These are the commonly used variables:
-- `${metadata.namespace}`: Kubernetes namespace of the current workflow / pod / event source / sensor
-- `${metadata.name}`: Name of the current workflow / pod / event source / sensor
-- `${status.startedAt}`: Start timestamp of the workflow / pod, in the format of `2021-01-01T10:35:56Z`
-- `${status.finishedAt}`: End timestamp of the workflow / pod, in the format of `2021-01-01T10:35:56Z`. If the workflow/pod is still running, this variable will be `null`
+* `${metadata.namespace}`: Kubernetes namespace of the current workflow / pod / event source / sensor
+* `${metadata.name}`: Name of the current workflow / pod / event source / sensor
+* `${status.startedAt}`: Start time-stamp of the workflow / pod, in the format of `2021-01-01T10:35:56Z`
+* `${status.finishedAt}`: End time-stamp of the workflow / pod, in the format of `2021-01-01T10:35:56Z`. If the workflow/pod is still running, this variable will be `null`
See [workflow-controller-configmap.yaml](workflow-controller-configmap.yaml) for a complete example
> v3.1 and after
-Epoch timestamps are available now. These are useful if we want to add links to logging facilities like [Grafana](https://grafana.com/)
-or [DataDog](https://datadoghq.com/), as they support Unix epoch timestamp formats as URL
+Epoch time-stamps are available now. These are useful if we want to add links to logging facilities like [Grafana](https://grafana.com/)
+or [DataDog](https://datadoghq.com/), as they support Unix epoch time-stamp formats as URL
parameters:
-- `${status.startedAtEpoch}`: Start timestamp of the workflow/pod, in the Unix epoch time format in **milliseconds**, e.g. `1609497000000`.
-- `${status.finishedAtEpoch}`: End timestamp of the workflow/pod, in the Unix epoch time format in **milliseconds**, e.g. `1609497000000`. If the workflow/pod is still running, this variable will represent the currnet time.
+* `${status.startedAtEpoch}`: Start time-stamp of the workflow/pod, in the Unix epoch time format in **milliseconds**, e.g. `1609497000000`.
+* `${status.finishedAtEpoch}`: End time-stamp of the workflow/pod, in the Unix epoch time format in **milliseconds**, e.g. `1609497000000`. If the workflow/pod is still running, this variable will represent the current time.
> v3.1 and after
diff --git a/docs/managed-namespace.md b/docs/managed-namespace.md
index 202bbca8e38b..6c892e0f0fbd 100644
--- a/docs/managed-namespace.md
+++ b/docs/managed-namespace.md
@@ -1,20 +1,18 @@
# Managed Namespace
-
-
> v2.5 and after
You can install Argo in either cluster scoped or namespace scope configurations.
This dictates if you must set-up cluster roles or normal roles.
In namespace scope configuration, you must run both the Workflow Controller and
-Argo Server using `--namespaced`. If you would like to have the workflows running in a separate
-namespace, add `--managed-namespace` as well. (In cluster scope installation, don't include `--namespaced`
+Argo Server using `--namespaced`. If you would like to have the workflows running in a separate
+namespace, add `--managed-namespace` as well. (In cluster scope installation, don't include `--namespaced`
or `--managed-namespace`.)
For example:
-```
+```yaml
- args:
- --configmap
- workflow-controller-configmap
@@ -25,4 +23,4 @@ For example:
- default
```
-Please mind that both cluster scoped and namespace scoped configurations require "admin" role because some custom resource (CRD) must be created (and CRD is always a cluster level object)
+Please mind that both cluster scoped and namespace scoped configurations require "admin" role because some custom resource (CRD) must be created (and CRD is always a cluster level object)
diff --git a/docs/memoization.md b/docs/memoization.md
index 81a473493d20..8945013433d1 100644
--- a/docs/memoization.md
+++ b/docs/memoization.md
@@ -4,44 +4,42 @@
## Introduction
-Workflows often have outputs that are expensive to compute.
-This feature reduces cost and workflow execution time by memoizing previously run steps:
+Workflows often have outputs that are expensive to compute.
+This feature reduces cost and workflow execution time by memoizing previously run steps:
it stores the outputs of a template into a specified cache with a variable key.
## Cache Method
-Currently, caching can only be performed with ConfigMaps.
+Currently, caching can only be performed with config-maps.
This allows you to easily manipulate cache entries manually through `kubectl` and the Kubernetes API without having to go through Argo.
-## Using Memoization
+## Using Memoization
-Memoization is set at the template level. You must specify a key, which can be static strings but more often depend on inputs.
-You must also specify a name for the ConfigMap cache.
+Memoization is set at the template level. You must specify a key, which can be static strings but more often depend on inputs.
+You must also specify a name for the config-map cache.
-```
+```yaml
apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
- generateName: memoized-workflow-
+ generateName: memoized-workflow-
spec:
- entrypoint: whalesay
- templates:
- - name: whalesay
- memoize:
- key: "{{inputs.parameters.message}}"
- cache:
- configMap:
- name: whalesay-cache
-
-...
+ entrypoint: whalesay
+ templates:
+ - name: whalesay
+ memoize:
+ key: "{{inputs.parameters.message}}"
+ cache:
+ configMap:
+ name: whalesay-cache
```
-!!! Note
+!!! Note
In order to use memoization it is necessary to add the verbs `create` and `update` to the `configmaps` resource for the appropriate (cluster) roles. In the case of a cluster install the `argo-cluster-role` cluster role should be updated, whilst for a namespace install the `argo-role` role should be updated.
-## FAQs
+## FAQ
-1. If you see errors like `"error creating cache entry: ConfigMap \"reuse-task\" is invalid: []: Too long: must have at most 1048576 characters"`,
+1. If you see errors like `error creating cache entry: ConfigMap \"reuse-task\" is invalid: []: Too long: must have at most 1048576 characters`,
this is due to [the 1MB limit placed on the size of `ConfigMap`](https://github.com/kubernetes/kubernetes/issues/19781).
Here are a couple of ways that might help resolve this:
* Delete the existing `ConfigMap` cache or switch to use a different cache.
diff --git a/docs/mentoring.md b/docs/mentoring.md
index 37a7c0f78797..a8db6cb00fb7 100644
--- a/docs/mentoring.md
+++ b/docs/mentoring.md
@@ -9,7 +9,7 @@ Mentors will help you with things like:
* Understand key concepts and learn your way around the source code
* Getting your first pull request with a code contribution created, reviewed and merged.
-Mentors and mentees must abide by the [code of conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).
+Mentors and mentees must abide by the [code of conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md).
## How To Get A Mentor
@@ -19,9 +19,9 @@ To set expectations:
1. Create [a mentoring request](https://github.com/argoproj/argo-workflows/issues/new?assignees=&labels=mentoring%2Ctriage&template=mentoring_request.md) with the title "I would like a mentor" and answer the questions in the template.
2. A mentor will comment offering to mentor you. It may take a few days for a mentor to respond.
-3. You comment to accept or decline the offer.
+3. You comment to accept or decline the offer.
4. If you accept, then your new mentor will assign the issue to themself.
-5. The mentor may suggest resources to learn Golang or React, or discus the changes that'll fix your bug or enhancement needs.
+5. The mentor may suggest resources to learn Golang or React, or discus the changes that will fix your bug or enhancement needs.
6. Work together the create, approve, and merge your pull request.
7. Once the PR is closed, then the issues is closed. The relationship ends. Congratulations!
@@ -57,4 +57,4 @@ For program specific questions, please refer to [GSoC FAQ](https://developers.go
If you have any problems with your relationship, please contact us:
* alex_collins@intuit.com
-* terrytangyuan@gmail.com
\ No newline at end of file
+* terrytangyuan@gmail.com
diff --git a/docs/metrics.md b/docs/metrics.md
index a8817bdb1274..69c23d778a79 100644
--- a/docs/metrics.md
+++ b/docs/metrics.md
@@ -1,7 +1,5 @@
# Prometheus Metrics
-
-
> v2.7 and after
## Introduction
@@ -9,7 +7,7 @@
Argo emits a certain number of controller metrics that inform on the state of the controller at any given time. Furthermore,
users can also define their own custom metrics to inform on the state of their Workflows.
-Custom prometheus metrics can be defined to be emitted on a `Workflow`- and `Template`-level basis. These can be useful
+Custom Prometheus metrics can be defined to be emitted on a `Workflow`- and `Template`-level basis. These can be useful
for many cases; some examples:
- Keeping track of the duration of a `Workflow` or `Template` over time, and setting an alert if it goes beyond a threshold
@@ -23,12 +21,13 @@ best way to define metrics in Argo to avoid problems such as [cardinality explos
There are two kinds of metrics emitted by Argo: **controller metrics** and **custom metrics**.
-#### Controller metrics
+### Controller metrics
+
Metrics that inform on the state of the controller; i.e., they answer the question "What is the state of the controller right now?"
Default controller metrics can be scraped from service ```workflow-controller-metrics``` at the endpoint ```:9090/metrics```
-
-#### Custom metrics
+### Custom metrics
+
Metrics that inform on the state of a Workflow, or a series of Workflows. These custom metrics are defined by the user in the Workflow spec.
Emitting custom metrics is the responsibility of the emitter owner. Since the user defines Workflows in Argo, the user is responsible
@@ -63,61 +62,61 @@ a way to view and analyze historical data, consider the [workflow archive](workf
Metrics for the Four Golden Signals are:
-* Latency: `argo_workflows_queue_latency`
-* Traffic: `argo_workflows_count` and `argo_workflows_queue_depth_count`
-* Errors: `argo_workflows_count` and `argo_workflows_error_count`
-* Saturation: `argo_workflows_workers_busy` and `argo_workflows_workflow_condition`
+- Latency: `argo_workflows_queue_latency`
+- Traffic: `argo_workflows_count` and `argo_workflows_queue_depth_count`
+- Errors: `argo_workflows_count` and `argo_workflows_error_count`
+- Saturation: `argo_workflows_workers_busy` and `argo_workflows_workflow_condition`
-#### argo_pod_missing
+#### `argo_pod_missing`
Pods were not seen. E.g. by being deleted by Kubernetes. You should only see this under high load.
!!! NOTE
This metric's name starts with `argo_` not `argo_workflows_`.
-#### argo_workflows_count
+#### `argo_workflows_count`
Number of workflow in each phase. The `Running` count does not mean that a workflows pods are running, just that the controller has scheduled them. A workflow can be stuck in `Running` with pending pods for a long time.
-#### argo_workflows_error_count
+#### `argo_workflows_error_count`
A count of certain errors incurred by the controller.
-#### argo_workflows_k8s_request_total
+#### `argo_workflows_k8s_request_total`
Number of API requests sent to the Kubernetes API.
-#### argo_workflows_operation_duration_seconds
+#### `argo_workflows_operation_duration_seconds`
A histogram of durations of operations.
-#### argo_workflows_pods_count
+#### `argo_workflows_pods_count`
-It is possible for a workflow to start, but no pods be running (e.g. cluster is too busy to run them). This metric sheds light on actual work being done.
+It is possible for a workflow to start, but no pods be running (e.g. cluster is too busy to run them). This metric sheds light on actual work being done.
-#### argo_workflows_queue_adds_count
+#### `argo_workflows_queue_adds_count`
The number of additions to the queue of workflows or cron workflows.
-#### argo_workflows_queue_depth_count
+#### `argo_workflows_queue_depth_count`
The depth of the queue of workflows or cron workflows to be processed by the controller.
-#### argo_workflows_queue_latency
+#### `argo_workflows_queue_latency`
The time workflows or cron workflows spend in the queue waiting to be processed.
-#### argo_workflows_workers_busy
+#### `argo_workflows_workers_busy`
The number of workers that are busy.
-#### argo_workflows_workflow_condition
+#### `argo_workflows_workflow_condition`
The number of workflow with different conditions. This will tell you the number of workflows with running pods.
-#### argo_workflows_workflows_processed_count
+#### `argo_workflows_workflows_processed_count`
A count of all Workflow updates processed by the controller.
@@ -131,7 +130,7 @@ In order to analyze the behavior of a workflow over time, we need to be able to
(i.e. individual executions) of a workflow together into a "series" for the purposes of emitting metrics. We do so by linking them together
with the same metric descriptor.
-In prometheus, a metric descriptor is defined as a metric's name and its key-value labels. For example, for a metric
+In Prometheus, a metric descriptor is defined as a metric's name and its key-value labels. For example, for a metric
tracking the duration of model execution over time, a metric descriptor could be:
`argo_workflows_model_exec_time{model_name="model_a",phase="validation"}`
@@ -155,7 +154,7 @@ Please see the [Argo Workflows metrics](https://grafana.com/grafana/dashboards/1
## Defining metrics
Metrics are defined in-place on the Workflow/Step/Task where they are emitted from. Metrics are always processed _after_
-the Workflow/Step/Task completes, with the exception of [realtime metrics](#realtime-metrics).
+the Workflow/Step/Task completes, with the exception of [real-time metrics](#real-time-metrics).
Metric definitions **must** include a `name` and a `help` doc string. They can also include any number of `labels` (when
defining labels avoid cardinality explosion). Metrics with the same `name` **must always** use the same exact `help` string,
@@ -279,16 +278,15 @@ Finally, an example of a `Template`-level Histogram metric that tracks an intern
...
```
-### Realtime metrics
+### Real-Time Metrics
-Argo supports a limited number of real-time metrics. These metrics are emitted in realtime, beginning when the step execution starts
-and ending when it completes. Realtime metrics are only available on Gauge type metrics and with a [limited number of variables](variables.md#realtime-metrics).
+Argo supports a limited number of real-time metrics. These metrics are emitted in real-time, beginning when the step execution starts
+and ending when it completes. Real-time metrics are only available on Gauge type metrics and with a [limited number of variables](variables.md#realtime-metrics).
-To define a realtime metric simply add `realtime: true` to a gauge metric with a valid realtime variable. For example:
+To define a real-time metric simply add `realtime: true` to a gauge metric with a valid real-time variable. For example:
```yaml
gauge:
realtime: true
value: "{{duration}}"
```
-
diff --git a/docs/node-field-selector.md b/docs/node-field-selector.md
index 086f5cf04e2c..b1f11296b978 100644
--- a/docs/node-field-selector.md
+++ b/docs/node-field-selector.md
@@ -1,12 +1,10 @@
# Node Field Selectors
-
-
> v2.8 and after
## Introduction
-The resume, stop and retry Argo CLI and API commands support a `--node-field-selector` parameter to allow the user to select a subset of nodes for the command to apply to.
+The resume, stop and retry Argo CLI and API commands support a `--node-field-selector` parameter to allow the user to select a subset of nodes for the command to apply to.
In the case of the resume and stop commands these are the nodes that should be resumed or stopped.
@@ -14,7 +12,9 @@ In the case of the retry command it allows specifying nodes that should be resta
The format of this when used with the CLI is:
-```--node-field-selector=FIELD=VALUE```
+```bash
+--node-field-selector=FIELD=VALUE
+```
## Possible options
@@ -25,26 +25,30 @@ The field can be any of:
| `displayName`| Display name of the node. This is the name of the node as it is displayed on the CLI or UI, without considering its ancestors (see example below). This is a useful shortcut if there is only one node with the same `displayName` |
| `name`| Full name of the node. This is the full name of the node, including its ancestors (see example below). Using `name` is necessary when two or more nodes share the same `displayName` and disambiguation is required. |
| `templateName`| Template name of the node |
-| `phase`| Phase status of the node - eg Running |
-| `templateRef.name`| The name of the WorkflowTemplate the node is referring to |
-| `templateRef.template`| The template within the WorkflowTemplate the node is referring to |
+| `phase`| Phase status of the node - e.g. Running |
+| `templateRef.name`| The name of the workflow template the node is referring to |
+| `templateRef.template`| The template within the workflow template the node is referring to |
| `inputs.parameters..value`| The value of input parameter NAME |
-The operator can be '=' or '!='. Multiple selectors can be combined with a comma, in which case they are ANDed together.
+The operator can be '=' or '!='. Multiple selectors can be combined with a comma, in which case they are anded together.
## Examples
To filter for nodes where the input parameter 'foo' is equal to 'bar':
-```--node-field-selector=inputs.parameters.foo.value=bar```
+```bash
+--node-field-selector=inputs.parameters.foo.value=bar
+```
To filter for nodes where the input parameter 'foo' is equal to 'bar' and phase is not running:
-```--node-field-selector=foo1=bar1,phase!=Running```
+```bash
+--node-field-selector=foo1=bar1,phase!=Running
+```
Consider the following workflow:
-```
+```text
● appr-promotion-ffsv4 code-release
├─✔ start sample-template/email appr-promotion-ffsv4-3704914002 2s
├─● app1 wftempl1/approval-and-promotion
@@ -63,7 +67,7 @@ Here we have two steps with the same `displayName`: `wait-approval`. To select o
`name`, either `appr-promotion-ffsv4.app1.wait-approval` or `appr-promotion-ffsv4.app3.wait-approval`. If it is not clear
what the full name of a node is, it can be found using `kubectl`:
-```
+```bash
$ kubectl get wf appr-promotion-ffsv4 -o yaml
...
diff --git a/docs/offloading-large-workflows.md b/docs/offloading-large-workflows.md
index 0180e4ee68cf..2111be6711b6 100644
--- a/docs/offloading-large-workflows.md
+++ b/docs/offloading-large-workflows.md
@@ -1,30 +1,28 @@
# Offloading Large Workflows
-
-
> v2.4 and after
-Argo stores workflows as Kubernetes resources (i.e. within EtcD). This creates a limit to their size as resources must be under 1MB. Each resource includes the status of each node, which is stored in the `/status/nodes` field for the resource. This can be over 1MB. If this happens, we try and compress the node status and store it in `/status/compressedNodes`. If the status is still too large, we then try and store it in an SQL database.
+Argo stores workflows as Kubernetes resources (i.e. within EtcD). This creates a limit to their size as resources must be under 1MB. Each resource includes the status of each node, which is stored in the `/status/nodes` field for the resource. This can be over 1MB. If this happens, we try and compress the node status and store it in `/status/compressedNodes`. If the status is still too large, we then try and store it in an SQL database.
To enable this feature, configure a Postgres or MySQL database under `persistence` in [your configuration](workflow-controller-configmap.yaml) and set `nodeStatusOffLoad: true`.
## FAQ
-#### Why aren't my workflows appearing in the database?
+### Why aren't my workflows appearing in the database?
Offloading is expensive and often unnecessary, so we only offload when we need to. Your workflows aren't probably large enough.
-#### Error "Failed to submit workflow: etcdserver: request is too large."
+### Error `Failed to submit workflow: etcdserver: request is too large.`
You must use the Argo CLI having exported `export ARGO_SERVER=...`.
-#### Error "offload node status is not supported"
+### Error `offload node status is not supported`
Even after compressing node statuses, the workflow exceeded the EtcD
size limit. To resolve, either enable node status offload as described
above or look for ways to reduce the size of your workflow manifest:
-- Use `withItems` or `withParams` to consolidate similar templates into a single parameterized template
+- Use `withItems` or `withParams` to consolidate similar templates into a single parametrized template
- Use [template defaults](https://argoproj.github.io/argo-workflows/template-defaults/) to factor shared template options to the workflow level
- Use [workflow templates](https://argoproj.github.io/argo-workflows/workflow-templates/) to factor frequently-used templates into separate resources
- Use [workflows of workflows](https://argoproj.github.io/argo-workflows/workflow-of-workflows/) to factor a large workflow into a workflow of smaller workflows
diff --git a/docs/overrides/main.html b/docs/overrides/main.html
new file mode 100644
index 000000000000..ecea080f7e92
--- /dev/null
+++ b/docs/overrides/main.html
@@ -0,0 +1,24 @@
+{% extends "base.html" %}
+{% block content %}
+{{ super() }}
+
+
+
{{ lang.t("meta.comments") }}
+
+
+
+{% endblock %}
\ No newline at end of file
diff --git a/docs/plugin-directory.md b/docs/plugin-directory.md
index dbc184a96e93..67a4a0c32e96 100644
--- a/docs/plugin-directory.md
+++ b/docs/plugin-directory.md
@@ -11,3 +11,4 @@
| [Python](https://github.com/argoproj-labs/argo-workflows-python-executor-plugin) | Plugin for executing Python |
| [Hermes](https://github.com/kjagiello/hermes) | Send notifications, e.g. Slack |
| [WASM](https://github.com/Shark/wasm-workflows-plugin) | Run Web Assembly (WASM) tasks |
+| [Chaos Mesh Plugin](https://github.com/xlgao-zju/argo-chaos-mesh-plugin) | Run Chaos Mesh experiment |
diff --git a/docs/progress.md b/docs/progress.md
index f94cbb81a71d..449e6aa7f75f 100644
--- a/docs/progress.md
+++ b/docs/progress.md
@@ -4,37 +4,38 @@
When you run a workflow, the controller will report on its progress.
-We define progress as two numbers, `N/M` such that `0 <= N <= M and 0 <= M`.
+We define progress as two numbers, `N/M` such that `0 <= N <= M and 0 <= M`.
* `N` is the number of completed tasks.
* `M` is the total number of tasks.
E.g. `0/0`, `0/1` or `50/100`.
-Unlike [estimated duration](estimated-duration.md), progress is deterministic. I.e. it will be the same for each workflow, regardless of any problems.
+Unlike [estimated duration](estimated-duration.md), progress is deterministic. I.e. it will be the same for each workflow, regardless of any problems.
Progress for each node is calculated as follows:
-2. For a pod node either `1/1` if completed or `0/1` otherwise.
-3. For non-leaf nodes, the sum of its children.
+1. For a pod node either `1/1` if completed or `0/1` otherwise.
+2. For non-leaf nodes, the sum of its children.
For a whole workflow's, progress is the sum of all its leaf nodes.
-
-!!! Warning
+
+!!! Warning
`M` will increase during workflow run each time a node is added to the graph.
## Self reporting progress
> v3.3 and after
-Pods in a workflow can report their own progress during their runtime. This self reported progress overrides the
-auto-generated progress.
+Pods in a workflow can report their own progress during their runtime. This self reported progress overrides the
+auto-generated progress.
Reporting progress works as follows:
-- create and write the progress to a file indicated by the env variable `ARGO_PROGRESS_FILE`
-- format of the progress must be `N/M`
-The executor will read this file every 3s and if there was an update,
+* create and write the progress to a file indicated by the env variable `ARGO_PROGRESS_FILE`
+* format of the progress must be `N/M`
+
+The executor will read this file every 3s and if there was an update,
patch the pod annotations with `workflows.argoproj.io/progress: N/M`.
The controller picks this up and writes the progress to the appropriate Status properties.
diff --git a/docs/proposals/artifact-gc-proposal.md b/docs/proposals/artifact-gc-proposal.md
new file mode 100644
index 000000000000..62c16acd33a2
--- /dev/null
+++ b/docs/proposals/artifact-gc-proposal.md
@@ -0,0 +1,88 @@
+# Proposal for Artifact Garbage Collection
+
+## Introduction
+
+The motivation for this is to enable users to automatically have certain Artifacts specified to be automatically garbage collected.
+
+Artifacts can be specified for Garbage Collection at different stages: `OnWorkflowCompletion`, `OnWorkflowDeletion`, `OnWorkflowSuccess`, `OnWorkflowFailure`, or `Never`
+
+## Proposal Specifics
+
+### Workflow Spec changes
+
+1. `WorkflowSpec` has an `ArtifactGC` structure, which consists of an `ArtifactGCStrategy`, as well as the optional designation of a `ServiceAccount` and Pod metadata (labels and annotations) to be used by the Pod doing the deletion. The `ArtifactGCStrategy` can be set to `OnWorkflowCompletion`, `OnWorkflowDeletion`, `OnWorkflowSuccess`, `OnWorkflowFailure`, or `Never`
+2. Artifact has an `ArtifactGC` section which can be used to override the Workflow level.
+
+### Workflow Status changes
+
+1. Artifact has a boolean `Deleted` flag
+2. `WorkflowStatus.Conditions` can be set to `ArtifactGCError`
+3. `WorkflowStatus` can include a new field `ArtGCStatus` which holds additional information to keep track of the state of Artifact Garbage Collection.
+
+### How it will work
+
+For each `ArtifactGCStrategy` the Controller will execute one Pod that runs in the user's namespace and deletes all artifacts pertaining to that strategy.
+
+
+
+Since `OnWorkflowSuccess` happens at the same time as `OnWorkflowCompletion` and `OnWorkflowFailure` also happens at the same time as `OnWorkflowCompletion`, we can consider consolidating these GC Strategies together.
+
+We will have a new CRD type called `ArtifactGCTask` and use one or more of them to specify the Artifacts which the GC Pod will read and then write Status to (note individual artifacts have individual statuses). The Controller will read the Status and reflect that in the Workflow Status. The Controller will deem the `ArtifactGCTasks` ready to read once the Pod has completed (in success or failure).
+
+Once the GC Pod has completed and the Workflow status has been persisted, assuming the Pod completed with Success, the Controller can delete the `ArtifactGCTasks`, which will cause the GC Pod to also get deleted as it will be "owned" by the `ArtifactGCTasks`.
+
+The Workflow will have a Finalizer on it to prevent it from being deleted until Artifact GC has occurred. Once all deletions for all GC Strategies have occurred, the Controller will remove the Finalizer.
+
+### Failures
+
+If a deletion fails, the Pod will retry a few times through exponential back off. Note: it will not be considered a failure if the key does not exist - the principal of idempotence will allow this (i.e. if a Pod were to get evicted and then re-run it should be okay if some artifacts were previously deleted).
+
+Once it retries a few times, if it didn't succeed, it will end in a "Failed" state. The user will manually need to delete the `ArtifactGCTasks` (which will delete the GC Pod), and remove the Finalizer on the Workflow.
+
+The Failure will be reflected in both the Workflow Conditions as well as as a Kubernetes Event (and the Artifacts that failed will have "Deleted"=false).
+
+### Alternatives Considered
+
+For reference, these [slides](../assets/artifact-gc-proposal.pptx) were presented to the Argo Contributor meeting on 7/12/22 which go through some of the alternative options that were weighed. These alternatives are explained below:
+
+#### One Pod Per Artifact
+
+The [POC](https://github.com/argoproj/argo-workflows/pull/8530) that was done, which uses just one Pod to delete each Artifact, was considered as an alternative for MVP (Option 1 from the slides).
+
+This option has these benefits:
+
+- simpler in that the Pod doesn't require any additional Object to report status (e.g. `ArtifactGCTask`) because it simply succeeds or fails based on its exit code (whereas in Option 2 the Pod needs to report individual failure statuses for each artifact)
+- could have a very minimal Service Account which provides access to just that one artifact's location
+
+and these drawbacks:
+
+- deletion is slower when performed by multiple Pods
+- a Workflow with thousands of artifacts causes thousands of Pods to get executed, which could overwhelm kube-scheduler and kube-apiserver.
+- if we delay the Artifact GC Pods by giving them a lower priority than the Workflow Pods, users will not get their artifacts deleted when they expect and may log bugs
+
+Summarizing ADR statement:
+"In the context of Artifact Garbage Collection, facing whether to use a separate Pod for every artifact or not, we decided not to, to achieve faster garbage collection and reduced load on K8S, accepting that we will require a new CRD type."
+
+#### Service Account/IAM roles
+
+We considered some alternatives for how to specify Service Account and/or Annotations, which are applied to give the GC Pod access (slide 12). We will have them specify this information in a new `ArtifactGC` section of the spec that lives on the Workflow level but can be overridden on the Artifact level (option 3 from slide). Another option considered was to just allow specification on the Workflow level (option 2 from slide) so as to reduce the complexity of the code and reduce the potential number of Pods running, but Option 3 was selected in the end to maximize flexibility.
+
+Summarizing ADR statement:
+"In the context of Artifact Garbage Collection, facing the question of how users should specify Service Account and annotations, we decided to give them the option to specify them on the Workflow level and/or override them on the Artifact level, to maximize flexibility for user needs, accepting that the code will be more complicated, and sometimes there will be many Pods running."
+
+### MVP vs post-MVP
+
+We will start with just S3.
+
+We can also make other determinations if it makes sense to postpone some parts for after MVP.
+
+### Workflow Spec Validation
+
+We can reject the Workflow during validation if `ArtifactGC` is configured along with a non-supported storage engine (for now probably anything besides S3).
+
+### Documentation
+
+Need to clarify certain things in our documentation:
+
+1. Users need to know that if they don't name their artifacts with unique keys, they risk the same key being deleted by one Workflow and created by another at the same time. One recommendation is to parametrize the key, e.g. `{{workflow.uid}}/hello.txt`.
+2. Requirement to specify Service Account or Annotation for `ArtifactGC` specifically if they are needed (we won't fall back to default Workflow SA/annotations). Also, the Service Account needs to either be bound to the "agent" role or otherwise allow the same access to `ArtifactGCTasks`.
diff --git a/docs/public-api.md b/docs/public-api.md
index dc4956ec233a..c7b1b63d74dc 100644
--- a/docs/public-api.md
+++ b/docs/public-api.md
@@ -5,7 +5,3 @@ Argo Workflows public API is defined by the following:
* The file `api/openapi-spec/swagger.json`
* The schema of the table `argo_archived_workflows`.
* The installation options listed in `manifests/README.md`.
-
-See:
-
-* [Versioning](versioning.md)
\ No newline at end of file
diff --git a/docs/quick-start.md b/docs/quick-start.md
index 59e78a71aef1..43c6d7ccffc0 100644
--- a/docs/quick-start.md
+++ b/docs/quick-start.md
@@ -1,67 +1,105 @@
# Quick Start
-To see how Argo Workflows work, you can install it and run examples of simple workflows and workflows that use artifacts.
+To see how Argo Workflows work, you can install it and run examples of simple workflows.
-Firstly, you'll need a Kubernetes cluster and `kubectl` set-up
+Before you start you need a Kubernetes cluster and `kubectl` set up to be able to access that cluster. For the purposes of getting up and running, a local cluster is fine. You could consider the following local Kubernetes cluster options:
+
+* [minikube](https://minikube.sigs.k8s.io/docs/)
+* [kind](https://kind.sigs.k8s.io/)
+* [k3s](https://k3s.io/) or [k3d](https://k3d.io/)
+* [Docker Desktop](https://www.docker.com/products/docker-desktop/)
+
+⚠️ These instructions are intended to help you get started quickly. They are not suitable in production. For production installs, please refer to [the installation documentation](installation.md) ⚠️
## Install Argo Workflows
-To get started quickly, you can use the quick start manifest which will install Argo Workflow as well as some commonly used components:
+To install Argo Workflows, navigate to the [releases page](https://github.com/argoproj/argo-workflows/releases/latest) and find the release you wish to use (the latest full release is preferred).
+
+Scroll down to the `Controller and Server` section and execute the `kubectl` commands.
-!!! note
- These manifests are intended to help you get started quickly. They are not suitable in production, on test environments, or any environment containing any real data. They contain hard-coded passwords that are publicly available.
+Below is an example of the install commands, ensure that you update the command to install the correct version number:
-```sh
-kubectl create ns argo
-kubectl apply -n argo -f https://raw.githubusercontent.com/argoproj/argo-workflows/master/manifests/quick-start-postgres.yaml
+```yaml
+kubectl create namespace argo
+kubectl apply -n argo -f https://github.com/argoproj/argo-workflows/releases/download/v<>/install.yaml
```
-!!! note
- On GKE, you may need to grant your account the ability to create new `clusterrole`s
+### Patch argo-server authentication
+
+The argo-server (and thus the UI) defaults to client authentication, which requires clients to provide their Kubernetes bearer token in order to authenticate. For more information, refer to the [Argo Server Auth Mode documentation](argo-server-auth-mode.md). We will switch the authentication mode to `server` so that we can bypass the UI login for now:
+
+```bash
+kubectl patch deployment \
+ argo-server \
+ --namespace argo \
+ --type='json' \
+ -p='[{"op": "replace", "path": "/spec/template/spec/containers/0/args", "value": [
+ "server",
+ "--auth-mode=server"
+]}]'
-```sh
-kubectl create clusterrolebinding YOURNAME-cluster-admin-binding --clusterrole=cluster-admin --user=YOUREMAIL@gmail.com
```
-!!! note
- To run Argo on GKE Autopilot, you must use the `emissary` executor or the `k8sapi` executor. Find more information on our [executors doc](workflow-executors.md).
+### Port-forward the UI
-If you are running Argo Workflows locally (e.g. using Minikube or Docker for Desktop), open a port-forward so you can access the namespace:
+Open a port-forward so you can access the UI:
-```sh
+```bash
kubectl -n argo port-forward deployment/argo-server 2746:2746
```
-This will serve the user interface on https://localhost:2746
+This will serve the UI on . Due to the self-signed certificate, you will receive a TLS error which you will need to manually approve.
-If you're using running Argo Workflows on a remote cluster (e.g. on EKS or GKE) then [follow these instructions](argo-server.md#access-the-argo-workflows-ui).
+## Install the Argo Workflows CLI
-Next, Download the latest Argo CLI from our [releases page](https://github.com/argoproj/argo-workflows/releases/latest).
+Next, Download the latest Argo CLI from the same [releases page](https://github.com/argoproj/argo-workflows/releases/latest).
-Finally, submit an example workflow:
+## Submitting an example workflow
-`argo submit -n argo --watch https://raw.githubusercontent.com/argoproj/argo-workflows/master/examples/hello-world.yaml`
+### Submit an example workflow (CLI)
-The `--watch` flag used above will allow you to observe the workflow as it runs and the status of whether it succeeds.
+```bash
+argo submit -n argo --watch https://raw.githubusercontent.com/argoproj/argo-workflows/master/examples/hello-world.yaml
+```
+
+The `--watch` flag used above will allow you to observe the workflow as it runs and the status of whether it succeeds.
When the workflow completes, the watch on the workflow will stop.
You can list all the Workflows you have submitted by running the command below:
-`argo list -n argo`
+```bash
+argo list -n argo
+```
-You will notice the Workflow name has a `hello-world-` prefix followed by random characters. These characters are used
-to give Workflows unique names to help identify specific runs of a Workflow. If you submitted this Workflow again,
+You will notice the Workflow name has a `hello-world-` prefix followed by random characters. These characters are used
+to give Workflows unique names to help identify specific runs of a Workflow. If you submitted this Workflow again,
the next Workflow run would have a different name.
-Using the `argo get` command, you can always review details of a Workflow run. The output for the command below will
+Using the `argo get` command, you can always review details of a Workflow run. The output for the command below will
be the same as the information shown as when you submitted the Workflow:
-`argo get -n argo @latest`
+```bash
+argo get -n argo @latest
+```
-The `@latest` argument to the CLI is a short cut to view the latest Workflow run that was executed.
+The `@latest` argument to the CLI is a short cut to view the latest Workflow run that was executed.
You can also observe the logs of the Workflow run by running the following:
-`argo logs -n argo @latest`
+```bash
+argo logs -n argo @latest
+```
+
+### Submit an example workflow (GUI)
+
+* Open a port-forward so you can access the UI:
+
+```bash
+kubectl -n argo port-forward deployment/argo-server 2746:2746
+```
+
+* Navigate your browser to .
+
+* Click `+ Submit New Workflow` and then `Edit using full workflow options`
-Now that you have understanding of using Workflows, you can check out other [Workflow examples](https://github.com/argoproj/argo-workflows/blob/master/examples/README.md) to see additional uses of Worklows.
+* You can find an example workflow already in the text field. Press `+ Create` to start the workflow.
diff --git a/docs/releases.md b/docs/releases.md
index 3c3ec7b00e8d..b60b93457a7c 100644
--- a/docs/releases.md
+++ b/docs/releases.md
@@ -1,29 +1,37 @@
# Releases
-## Supported Versions
+You can find the most recent version under [Github release](https://github.com/argoproj/argo-workflows/releases).
+
+## Versioning
-Versions are expressed as x.y.z, where x is the major version, y is the minor version, and z is the patch version,
+Versions are expressed as `x.y.z`, where `x` is the major version, `y` is the minor version, and `z` is the patch version,
following Semantic Versioning terminology.
+Argo Workflows does not use Semantic Versioning. Minor versions may contain breaking changes. Patch versions only
+contain bug fixes and minor features.
+
+For **stable**, use the latest patch version.
+
+⚠️ Read the [upgrading guide](upgrading.md) to find out about breaking changes before any upgrade.
+
+## Supported Versions
+
We maintain release branches for the most recent two minor releases.
-Fixes may be backported to release branches, depending on severity, risk, and, feasibility.
+Fixes may be back-ported to release branches, depending on severity, risk, and, feasibility.
-If a release contains breaking changes, or CVE fixes, this will documented in the release notes.
+If a release contains breaking changes, or CVE fixes, this will documented in [upgrading guide](upgrading.md).
## Supported Version Skew
Both the `argo-server` and `argocli` should be the same version as the controller.
-# Release Cycle
-
-For **stable**, use the latest patch version.
-For **unstable**, we build and tag `latest` images for every commit to master.
+## Release Cycle
New minor versions are released roughly every 3 months. Release candidates for each major release are typically available
for 4-6 weeks before the release becomes generally available.
-Otherwise, we typically release weekly:
+Otherwise, we typically release every two weeks:
-* Patch fixes for the current stable version.
+* Patch fixes for the current stable version.
* The next release candidate, if we are currently in a release-cycle.
diff --git a/docs/releasing.md b/docs/releasing.md
index 60f3ca3f36d4..736d2cecd586 100644
--- a/docs/releasing.md
+++ b/docs/releasing.md
@@ -1,225 +1,39 @@
# Release Instructions
-## Release
+## Cherry-Picking Fixes
-### 1. Cherry-pick Issue
+✋ Before you start, make sure the release branch is passing CI.
-Create a cherry-pick issue to allow the team and community to comment on the release contents.
+Get a list of commits you may want to cherry-pick:
-1. Locate the previous cherry-pick issue
-2. Get the hash of the most recent commit still available on the previous issue
-3. Generate new issue contents:
-
- ```sh
- $ git checkout master # Ensure we are on master
- $ git log --pretty=format:"%an: %s %h" [COMMIT_HASH]..HEAD
- ```
-4. Create a new issue on GitHub with the title `[VERSION] cherry-pick` (e.g. `v3.0.2 cherry-pick`) and the generated commits
-as content.
-
-### 2. Cherry-pick to Release Branch
-
-Once the team and community is satisfied with the commits to be cherry-picked, cherry-pick them into the appropriate
-release branch. There should be a single release branch per minor release (e.g. `release-3.0`, `release-3.1`, etc.)
-
-1. Checkout the release branch and cherry-pick commits
-
- ```sh
- $ git checkout relesae-3.0
- $ git cherry-pick [COMMIT_IDS...]
- ```
-
-2. Hope for few merge conflicts!
-
- A merge conflict during cherry-picking usually means the commit is based on another commit that should be
- cherry-picked first. In case of a merge conflict, you can undo the cherry-picking by `git cherry-pick --abort` and
- revisit the list of commits to make sure the prior commits are cherry-picked as well.
-
-3. Once done cherry-picking, push the release branch to ensure the branch can build and all tests pass.
-
-### 3. Prepare the Release
-
-> v2
-
-`v2` releases still depend on the previous repository name (`github.com/argoproj/argo`). To release for `v2`,
-make a local clone of the repository under the name `argo`:
-
-```shell
-$ pwd
-/Users//go/src/github.com/argoproj/argo-workflows
-$ cd ..
-$ cp -r argo-workflows argo
-$ cd argo
-```
-
-Then follow all the normal steps. You should delete the `argo` folder once the release is done to avoid confusion and conflicts.
-
-#### Preparing the release
-
-> Before v3.1
-
-1. Releasing requires a clean tree state, so back-up any untracked files in your Git directory.
-
- **Only once your files are backed up**, run:
- ```shell
- $ git clean -fdx # WARNING: Will delete untracked files!
- ```
-
-2. To generate new manifests and perform basic checks:
-
- ```shell
- $ make prepare-release -B VERSION=v3.0.3
- ```
-
-3. Once done, push the release branch and ensure the branch is green and all tests pass.
-
- ```shell
- $ git push
- ```
-
-4. Publish the images and local Git changes (disabling K3D as this is faster and more reliable for releases):
-
- ```shell
- $ make publish-release K3D=false VERSION=v3.0.3
- ```
-
-5. Wait 1h to 2h.
-
-> v3.1 and after
-
-Create and push a release tag:
-
-```
-git tag v3.1.0
-git push origin v3.1.0
-```
-
-The release will then be done automatically by a Github action.
-
-### 4. Ensure the Release Succeeded
-
-> Before v3.1
-
-1. Check the images were pushed successfully. Ensure the `GitTreeState` is `Clean`.
- ```sh
- $ docker run argoproj/argoexec:v3.0.3 version
- $ docker run argoproj/workflow-controller:v3.0.3 version
- $ docker run argoproj/argocli:v3.0.3 version
- ```
-
-1. Check the correct versions are printed. Ensure the `GitTreeState` is `Clean`.
-
- ```sh
- $ ./dist/argo-darwin-amd64 version
- ```
-
-1. Check the manifests contain the correct tags (search for `v3.0.3`): [https://raw.githubusercontent.com/argoproj/argo-workflows/v3.0.3/manifests/install.yaml](https://raw.githubusercontent.com/argoproj/argo-workflows/v3.0.3/manifests/install.yaml)
-
-1. Check the manifests apply: `kubectl -n argo apply -f https://raw.githubusercontent.com/argoproj/argo-workflows/v3.0.3/manifests/install.yaml`
-
-> v3.1 and after
-
-No action needed.
-
-### 5. Release Notes
-
-In [upgrading](upgrading.md), detail:
-
-* All breaking changes are listed with migration steps
-* The release notes identify every publicly known vulnerability with a CVE assignment
-
-The change log is automatically generated by a Github action.
-
-> Before v3.1
-
-The release title should be the version number (e.g. `v3.0.3`) and nothing else.
-
-Use hack/release-notes.md as the template for your new release notes.
-
-> v3.1 and after
-
-This is done automatically by a Github action.
-
-### 6. Upload Binaries and SHA256 Sums To GitHub
-
-> Before v3.1
-
-After running `make publish-release`, you will have the zipped binaries and SHA256 sums in your local.
-
-Open them with:
-
-```shell
-$ open dist
+```bash
+./hack/what-to-cherry-pick.sh release-3.3
```
-Upload only the zipped binaries (`.gz` suffix) and SHA256 sums (`.sha256` suffix) to GitHub. There should be 12 uploaded files in total.
-
-> v3.1 and after
+Ignore:
-This is done automatically by a Github action.
+* Fixes for features only on master.
+* Dependency upgrades, unless it fixes a known security issue.
-### 7. Update Stable Tag
+Cherry-pick a the first commit. Run `make test` locally before pushing. If the build timeouts the build caches may have
+gone, try re-running.
-> Before v3.1
+Don't cherry-pick another commit until the CI passes. It is harder to find the cause of a new failed build if the last
+build failed too.
-If this is GA:
-
-Update the `stable` tag
-
-```
-git tag -f stable
-git push -f origin stable
-```
+Cherry-picking commits one-by-one and then waiting for the CI will take a long time. Instead, cherry-pick each commit then
+run `make test` locally before pushing.
-Check the manifests contain the correct tags: [https://raw.githubusercontent.com/argoproj/argo-workflows/stable/manifests/install.yaml](https://raw.githubusercontent.com/argoproj/argo-workflows/stable/manifests/install.yaml)
+## Publish Release
-> v3.1 and after
+✋ Before you start, make sure the branch is passing CI.
-Delete the `stable` tag.
-
-```
-git tag -D stable
-git push origin :stable
-```
-
-### 8. Update Homebrew
-
-If this is GA:
-
-Update the Homebrew formula.
+Push a new tag to the release branch. E.g.:
```bash
-export HOMEBREW_GITHUB_API_TOKEN=$GITHUB_TOKEN
-brew bump-formula-pr argo --version 3.0.3
-```
-
-Check that Homebrew was successfully updated after the PR was merged:
-
- ```
- brew upgrade argo
- /usr/local/bin/argo version
- ```
-
-### 9. Update Java SDK
-
-If this is GA:
-
-Update the Java SDK formula.
-
+git tag v3.3.4
+git push upstream v3.3.4 ;# or origin if you do not use upstream
```
-git clone git@github.com:argoproj-labs/argo-client-java.git
-cd argo-client-java
-make publish VERSION=v3.0.3
-```
-
-Check package published: [https://github.com/argoproj-labs/argo-client-java/packages](https://github.com/argoproj-labs/argo-client-java/packages)
-
-### 10. Publish Release
-
-> Before v3.1
-
-Finally, press publish on the GitHub release. Congrats, you're done!
-
-> v3.1 and after
-This is done automatically by a Github action.
+Github Actions will automatically build and publish your release. This takes about 1h. Set your self a reminder to check
+this was successful.
diff --git a/docs/resource-duration.md b/docs/resource-duration.md
index 221aa2553ddc..8578526bebf2 100644
--- a/docs/resource-duration.md
+++ b/docs/resource-duration.md
@@ -1,17 +1,15 @@
# Resource Duration
-
-
> v2.7 and after
-Argo Workflows provides an indication of how much resource your workflow has used and saves this
+Argo Workflows provides an indication of how much resource your workflow has used and saves this
information. This is intended to be an **indicative but not accurate** value.
## Calculation
-The calculation is always an estimate, and is calculated by [duration.go](https://github.com/argoproj/argo-workflows/blob/master/util/resource/duration.go)
-based on container duration, specified pod resource requests, limits, or (for memory and CPU)
-defaults.
+The calculation is always an estimate, and is calculated by [`duration.go`](https://github.com/argoproj/argo-workflows/blob/master/util/resource/duration.go)
+based on container duration, specified pod resource requests, limits, or (for memory and CPU)
+defaults.
Each indicator is divided by a common denominator depending on resource type.
@@ -19,32 +17,32 @@ Each indicator is divided by a common denominator depending on resource type.
Each resource type has a denominator used to make large values smaller.
- * CPU: `1`
- * Memory: `1Gi`
- * Storage: `10Gi`
- * Ephemeral Storage: `10Gi`
- * All others: `1`
+* CPU: `1`
+* Memory: `1Gi`
+* Storage: `10Gi`
+* Ephemeral Storage: `10Gi`
+* All others: `1`
-The requested fraction of the base amount will be multiplied by the container's run time to get
-the container's Resource Duration.
+The requested fraction of the base amount will be multiplied by the container's run time to get
+the container's Resource Duration.
-For example, if you've requested `100Mi` of memory (one tenth of the base amount), and the container
-runs 120sec, then the reported Resource Duration will be `12sec * (1Gi memory)`.
+For example, if you've requested `100Mi` of memory (one tenth of the base amount), and the container
+runs 120sec, then the reported Resource Duration will be `12sec * (1Gi memory)`.
### Request Defaults
If `requests` are not set for a container, Kubernetes defaults to `limits`. If `limits` are not set,
-Argo falls back to `100m` for CPU and `100Mi` for memory.
+Argo falls back to `100m` for CPU and `100Mi` for memory.
-**Note:** these are Argo's defaults, _not_ Kubernetes' defaults. For the most meaningful results,
+**Note:** these are Argo's defaults, _not_ Kubernetes' defaults. For the most meaningful results,
set `requests` and/or `limits` for all containers.
### Example
-A pod that runs for 3min, with a CPU limit of `2000m`, no memory request and an `nvidia.com/gpu`
+A pod that runs for 3min, with a CPU limit of `2000m`, no memory request and an `nvidia.com/gpu`
resource limit of `1`:
-```
+```text
CPU: 3min * 2000m / 1000m = 6min * (1 cpu)
Memory: 3min * 100Mi / 1Gi = 18sec * (100Mi memory)
GPU: 3min * 1 / 1 = 2min * (1 nvidia.com/gpu)
@@ -55,10 +53,10 @@ GPU: 3min * 1 / 1 = 2min * (1 nvidia.com/gpu)
Both the web and CLI give abbreviated usage, like `9m10s*cpu,6s*memory,2m31s*nvidia.com/gpu`. In
this context, resources like `memory` refer to the "base amounts".
-For example, `memory` means "amount of time a resource requested 1Gi of memory." If a container only
+For example, `memory` means "amount of time a resource requested 1Gi of memory." If a container only
uses 100Mi, each second it runs will only count as a tenth-second of `memory`.
## Rounding Down
-For short running pods (<10s), the memory value may be 0s. This is because the default is `100Mi`,
-but the denominator is `1Gi`.
+For short running pods (<10s), the memory value may be 0s. This is because the default is `100Mi`,
+but the denominator is `1Gi`.
diff --git a/docs/rest-api.md b/docs/rest-api.md
index 0276dd7faa37..09eab21ee0cc 100644
--- a/docs/rest-api.md
+++ b/docs/rest-api.md
@@ -2,15 +2,13 @@
## Argo Server API
-
-
> v2.5 and after
-Argo Workflows ships with a server that provide more features and security than before.
+Argo Workflows ships with a server that provides more features and security than before.
-The server can be configured with or without client auth (`server --auth-mode client`). When it is disabled, then clients must pass their Kubeconfig base 64 encoded in the HTTP `Authorization` header:
+The server can be configured with or without client auth (`server --auth-mode client`). When it is disabled, then clients must pass their KUBECONFIG base 64 encoded in the HTTP `Authorization` header:
-```
+```bash
ARGO_TOKEN=$(argo auth token)
curl -H "Authorization: $ARGO_TOKEN" https://localhost:2746/api/v1/workflows/argo
```
@@ -18,7 +16,6 @@ curl -H "Authorization: $ARGO_TOKEN" https://localhost:2746/api/v1/workflows/arg
* Learn more on [how to generate an access token](access-token.md).
API reference docs :
-
+
* [Latest docs](swagger.md) (maybe incorrect)
* Interactively in the [Argo Server UI](https://localhost:2746/apidocs). (>= v2.10)
-
diff --git a/docs/rest-examples.md b/docs/rest-examples.md
index 0af3c1dad6b2..01e05376dd87 100644
--- a/docs/rest-examples.md
+++ b/docs/rest-examples.md
@@ -1,25 +1,25 @@
# API Examples
-Document contains couple of examples of workflow JSON's to submit via argo-server REST API.
+Document contains couple of examples of workflow JSON's to submit via argo-server REST API.
> v2.5 and after
Assuming
* the namespace of argo-server is argo
-* authentication is turned off (otherwise provide Authentication header)
+* authentication is turned off (otherwise provide Authorization header)
* argo-server is available on localhost:2746
## Submitting workflow
-```
+```bash
curl --request POST \
--url https://localhost:2746/api/v1/workflows/argo \
--header 'content-type: application/json' \
--data '{
"namespace": "argo",
"serverDryRun": false,
- "workflow": {
+ "workflow": {
"metadata": {
"generateName": "hello-world-",
"namespace": "argo",
@@ -57,21 +57,21 @@ curl --request POST \
## Getting workflows for namespace argo
-```
+```bash
curl --request GET \
--url https://localhost:2746/api/v1/workflows/argo
```
## Getting single workflow for namespace argo
-```
+```bash
curl --request GET \
--url https://localhost:2746/api/v1/workflows/argo/abc-dthgt
```
## Deleting single workflow for namespace argo
-```
+```bash
curl --request DELETE \
--url https://localhost:2746/api/v1/workflows/argo/abc-dthgt
```
diff --git a/docs/resuming-workflow-via-automation.md b/docs/resuming-workflow-via-automation.md
deleted file mode 100644
index f293debebd1c..000000000000
--- a/docs/resuming-workflow-via-automation.md
+++ /dev/null
@@ -1,38 +0,0 @@
-# Resume A Workflow
-
-For automation, we want just the name of the workflow, we can use labels to get just this our suspended workflow:
-
-```sh
-WF=$(argo list -l workflows.argoproj.io/workflow-template=wait --running -o name)
-```
-
-```sh
-WF=$(curl $ARGO_SERVER/api/v1/workflows/argo?listOptions.labelSelector=workflows.argoproj.io/workflow-template=wait,\!workflows.argoproj.io/completed \
- -fs \
- -H "Authorization: $ARGO_TOKEN" |
- jq -r '.items[0].metadata.name')
-```
-
-You can resume the workflow via the CLI or API too. If you have more than one node waiting, you must target it using a [node field selector](node-field-selector.md).
-
-````sh
-argo resume $WF --node-field-selector displayName=a
-````
-
-```sh
-curl $ARGO_SERVER/api/v1/workflows/argo/$WF/resume \
- -fs \
- -X 'PUT' \
- -H "Authorization: $ARGO_TOKEN" \
- -d '{"nodeFieldSelector": "displayName=a"}'
-```
-
-Now the workflow will have resumed and completed.
-
-See also:
-
-* [access token](access-token.md)
-* [resuming a workflow via automation](resuming-workflow-via-automation.md)
-* [submitting a workflow via automation](submit-workflow-via-automation.md)
-* [one workflow submitting another](workflow-submitting-workflow.md)
-* [async pattern](async-pattern.md)
diff --git a/docs/retries.md b/docs/retries.md
index f8a8b325e6a7..e0618a68d950 100644
--- a/docs/retries.md
+++ b/docs/retries.md
@@ -2,7 +2,7 @@
Argo Workflows offers a range of options for retrying failed steps.
-## Configuring `retryStrategy` in WorkflowSpec
+## Configuring `retryStrategy` in `WorkflowSpec`
```yaml
apiVersion: argoproj.io/v1alpha1
@@ -26,10 +26,10 @@ spec:
Use `retryPolicy` to choose which failures to retry:
-- Always: Retry all failed steps
-- OnFailure: Retry steps whose main container is marked as failed in Kubernetes
-- OnError: Retry steps that encounter Argo controller errors, or whose init or wait containers fail
-- OnTransientError: Retry steps that encounter errors [defined as transient](https://github.com/argoproj/argo-workflows/blob/master/util/errors/errors.go), or errors matching the TRANSIENT_ERROR_PATTERN [environment variable](https://argoproj.github.io/argo-workflows/environment-variables/). Available in version 3.0 and later.
+- `Always`: Retry all failed steps
+- `OnFailure`: Retry steps whose main container is marked as failed in Kubernetes (this is the default)
+- `OnError`: Retry steps that encounter Argo controller errors, or whose init or wait containers fail
+- `OnTransientError`: Retry steps that encounter errors [defined as transient](https://github.com/argoproj/argo-workflows/blob/master/util/errors/errors.go), or errors matching the `TRANSIENT_ERROR_PATTERN` [environment variable](https://argoproj.github.io/argo-workflows/environment-variables/). Available in version 3.0 and later.
For example:
@@ -68,6 +68,6 @@ If `expression` evaluates to false, the step will not be retried.
See [example](https://raw.githubusercontent.com/argoproj/argo-workflows/master/examples/retry-conditional.yaml) for usage.
-## Backoff
+## Back-Off
You can configure the delay between retries with `backoff`. See [example](https://raw.githubusercontent.com/argoproj/argo-workflows/master/examples/retry-backoff.yaml) for usage.
diff --git a/docs/roadmap.md b/docs/roadmap.md
index 1968ab23d69b..95d511a9661f 100644
--- a/docs/roadmap.md
+++ b/docs/roadmap.md
@@ -1,3 +1,3 @@
# Roadmap
-[Open the roadmap ](https://docs.google.com/document/d/1TzhgIPHnlUI9tVqcjoZVmvjuPAIZf5AyygGqL98BBaI/edit?usp=sharing)
\ No newline at end of file
+[Open the roadmap](https://docs.google.com/document/d/1TzhgIPHnlUI9tVqcjoZVmvjuPAIZf5AyygGqL98BBaI/edit?usp=sharing)
diff --git a/docs/running-at-massive-scale.md b/docs/running-at-massive-scale.md
index 6bdf4861d358..bfb6df8278eb 100644
--- a/docs/running-at-massive-scale.md
+++ b/docs/running-at-massive-scale.md
@@ -32,7 +32,5 @@ Where Argo has a lot of work to do, the Kubernetes API can be overwhelmed. There
## Overwhelmed Database
If you're running workflows with many nodes, you'll probably be offloading data to a database. Offloaded data is kept
-for 5m. You can reduce the number of records create by setting `DEFAULT_REQUEUE_TIME=1m`. This will slow reconciliation,
+for 5m. You can reduce the number of records created by setting `DEFAULT_REQUEUE_TIME=1m`. This will slow reconciliation,
but will suit workflows where nodes run for over 1m.
-
-
diff --git a/docs/running-locally.md b/docs/running-locally.md
index 441f5b826957..822c50031cd2 100644
--- a/docs/running-locally.md
+++ b/docs/running-locally.md
@@ -1,18 +1,55 @@
# Running Locally
+You have two options:
+
+1. If you're using VSCode, you use the [Dev-Container](#development-container). This takes about 7 minutes.
+1. Install the [requirements](#requirements) on your computer manually. This takes about 1 hour.
+
+## Git Clone
+
+Clone the Git repo into: `$(GOPATH)/src/github.com/argoproj/argo-workflows`. Any other path will mean the code
+generation does not work.
+
+## Development Container
+
+A development container is a running Docker container with a well-defined tool/runtime stack and its prerequisites.
+[The Visual Studio Code Remote - Containers](https://code.visualstudio.com/docs/remote/containers) extension lets you use a Docker container as a full-featured development environment.
+
+System requirements can be found [here](https://code.visualstudio.com/docs/remote/containers#_system-requirements)
+
+Note:
+
+* `GOPATH` must be `$HOME/go`.
+* for **Apple Silicon**
+ * This platform can spend 3 times the indicated time
+ * Configure Docker Desktop to use BuildKit:
+
+ ```json
+ "features": {
+ "buildkit": true
+ },
+ ```
+
+* For **Windows WSL2**
+ * Configure [`.wslconfig`](https://docs.microsoft.com/en-us/windows/wsl/wsl-config#configuration-setting-for-wslconfig) to limit memory usage by the WSL2 to prevent VSCode OOM.
+
+* For **Linux**
+ * Use [Docker Desktop](https://docs.docker.com/desktop/linux/install/) instead of [Docker Engine](https://docs.docker.com/engine/install/) to prevent incorrect network configuration by k3d
+
## Requirements
-* [Go 1.17](https://golang.org/dl/)
+* [Go 1.18](https://golang.org/dl/)
* [Yarn](https://classic.yarnpkg.com/en/docs/install/#mac-stable)
* [Docker](https://docs.docker.com/get-docker/)
-* [protoc](http://google.github.io/proto-lens/installing-protoc.html)
-* [jq](https://stedolan.github.io/jq/download/)
-* A local Kubernetes cluster (`k3d`, `kind`, or `minikube`)
+* [`protoc`](http://google.github.io/proto-lens/installing-protoc.html)
+* [`jq`](https://stedolan.github.io/jq/download/)
+* [`node` >= 16](https://nodejs.org/download/release/latest-v16.x/) for running the UI
+* A local Kubernetes cluster ([`k3d`](https://k3d.io/), [`kind`](https://kind.sigs.k8s.io/docs/user/quick-start/#installation), or [`minikube`](https://minikube.sigs.k8s.io/docs/start/))
We recommend using [K3D](https://k3d.io/) to set up the local Kubernetes cluster since this will allow you to test RBAC
set-up and is fast. You can set-up K3D to be part of your default kube config as follows:
-```shell
+```bash
k3d cluster start --wait
```
@@ -20,105 +57,187 @@ Alternatively, you can use [Minikube](https://github.com/kubernetes/minikube) to
Once a local Kubernetes cluster has started via `minikube start`, your kube config will use Minikube's context
automatically.
-## Developing locally
+⚠️ Do not use Docker for Desktop with its embedded Kubernetes, it does not support Kubernetes RBAC (i.e. `kubectl auth can-i` always
+returns `allowed`).
-!!! Warning
- The git repo must be checked out into: `$(GOPATH)/src/github.com/argoproj/argo-workflows`
+## Developing locally
Add the following to your `/etc/hosts`:
-```
+```text
127.0.0.1 dex
127.0.0.1 minio
127.0.0.1 postgres
127.0.0.1 mysql
+127.0.0.1 azurite
```
-To run the controller and argo-server API locally, with MinIO inside the "argo" namespace of your cluster:
+To start:
+
+* The controller, so you can run workflows.
+* MinIO (, use admin/password) so you can use artifacts:
-```shell
+Run:
+
+```bash
+make start
+```
+
+Make sure you don't see any errors in your terminal. This runs the Workflow Controller locally on your machine (not in Docker/Kubernetes).
+
+You can submit a workflow for testing using `kubectl`:
+
+```bash
+kubectl create -f examples/hello-world.yaml
+```
+
+We recommend running `make clean` before `make start` to ensure recompilation.
+
+If you made changes to the executor, you need to build the image:
+
+```bash
+make argoexec-image
+```
+
+To also start the API on :
+
+```bash
make start API=true
```
-
-To start the UI, use `UI=true`:
-```shell
-make start API=true UI=true
+This runs the Argo Server (in addition to the Workflow Controller) locally on your machine.
+
+To also start the UI on (`UI=true` implies `API=true`):
+
+```bash
+make start UI=true
```
-To test the workflow archive, use `PROFILE=mysql`:
+
+
+If you are making change to the CLI (i.e. Argo Server), you can build it separately if you want:
-```shell
-make start API=true UI=true PROFILE=mysql
+```bash
+make cli
+./dist/argo submit examples/hello-world.yaml ;# new CLI is created as `./dist/argo`
```
-
-To test SSO integration, use `PROFILE=sso`:
-```shell
-make start API=true UI=true PROFILE=sso
+Although, note that this will be built automatically if you do: `make start API=true`.
+
+To test the workflow archive, use `PROFILE=mysql` or `PROFILE=postgres`:
+
+```bash
+make start PROFILE=mysql
```
-You’ll now have:
+You'll have, either:
-* Argo UI on http://localhost:8080
-* Argo Server API on https://localhost:2746
-* MinIO on http://localhost:9000 (use admin/password)
-* Postgres on http://localhost:5432, run `make postgres-cli` to access.
-* MySQL on http://localhost:3306, run `make mysql-cli` to access.
+* Postgres on , run `make postgres-cli` to access.
+* MySQL on , run `make mysql-cli` to access.
-Before submitting/running workflows, build the executor images with this command:
+To test SSO integration, use `PROFILE=sso`:
-```shell
-make argoexec-image
+```bash
+make start UI=true PROFILE=sso
```
### Running E2E tests locally
-1. Configure your IDE to set the `KUBECONFIG` environment variable to your k3d kubeconfig file
-2. Find an e2e test that you want to run in `test/e2e`
-3. Determine which profile the e2e test is using by inspecting the go build flag at the top of the file and referring to [ci-build.yaml](https://github.com/argoproj/argo-workflows/blob/master/.github/workflows/ci-build.yaml)
+Start up Argo Workflows using the following:
+
+```bash
+make start PROFILE=mysql AUTH_MODE=client STATIC_FILES=false API=true
+```
- For example `TestArchiveStrategies` in `test/e2e/functional_test.go` has the following build flags
+If you want to run Azure tests against a local Azurite, add `AZURE=true`:
- ```go
- //go:build functional
- // +build functional
- ```
+```bash
+make start PROFILE=mysql AUTH_MODE=client STATIC_FILES=false API=true AZURE=true
+```
- In [ci-build.yaml](https://github.com/argoproj/argo-workflows/blob/master/.github/workflows/ci-build.yaml) the functional test suite is using the `minimal` profile
+#### Running One Test
-4. Run the profile in a terminal window
+In most cases, you want to run the test that relates to your changes locally. You should not run all the tests suites.
+Our CI will run those concurrently when you create a PR, which will give you feedback much faster.
- ```shell
- make start PROFILE=minimal E2E_EXECUTOR=emissary AUTH_MODE=client STATIC_FILES=false LOG_LEVEL=info API=true UI=false
- ```
+Find the test that you want to run in `test/e2e`
+
+```bash
+make TestArtifactServer
+```
+
+If you wish to include tests against Azure Storage, define `AZURE=true`:
+
+```bash
+make AZURE=true TestArtifactServer
+```
+
+#### Running A Set Of Tests
-5. Run the test in your IDE
+You can find the build tag at the top of the test file.
+
+```go
+//go:build api
+```
+
+You need to run `make test-{buildTag}`, so for `api` that would be:
+
+```bash
+make test-api
+```
+
+#### Diagnosing Test Failure
+
+Tests often fail: that's good. To diagnose failure:
+
+* Run `kubectl get pods`, are pods in the state you expect?
+* Run `kubectl get wf`, is your workflow in the state you expect?
+* What do the pod logs say? I.e. `kubectl logs`.
+* Check the controller and argo-server logs. These are printed to the console you ran `make start` in. Is anything
+ logged at `level=error`?
+
+If tests run slowly or time out, factory reset your Kubernetes cluster.
## Committing
Before you commit code and raise a PR, always run:
-```shell
+```bash
make pre-commit -B
```
-Please adhere to the following when creating your commits:
+Please do the following when creating your PR:
* Sign-off your commits.
* Use [Conventional Commit messages](https://www.conventionalcommits.org/en/v1.0.0/).
* Suffix the issue number.
-Example:
+Examples:
-```shell
+```bash
git commit --signoff -m 'fix: Fixed broken thing. Fixes #1234'
```
-Troubleshooting:
+```bash
+git commit --signoff -m 'feat: Added a new feature. Fixes #1234'
+```
+
+## Troubleshooting
* When running `make pre-commit -B`, if you encounter errors like
- `make: *** [pkg/apiclient/clusterworkflowtemplate/cluster-workflow-template.swagger.json] Error 1`,
- ensure that you have checked out your code into `$(GOPATH)/src/github.com/argoproj/argo-workflows`.
+ `make: *** [pkg/apiclient/clusterworkflowtemplate/cluster-workflow-template.swagger.json] Error 1`, ensure that you
+ have checked out your code into `$(GOPATH)/src/github.com/argoproj/argo-workflows`.
* If you encounter "out of heap" issues when building UI through Docker, please validate resources allocated to Docker.
Compilation may fail if allocated RAM is less than 4Gi.
+
+## Using Multiple Terminals
+
+I run the controller in one terminal, and the UI in another. I like the UI: it is much faster to debug workflows than
+the terminal. This allows you to make changes to the controller and re-start it, without restarting the UI (which I
+think takes too long to start-up).
+
+As a convenience, `CTRL=false` implies `UI=true`, so just run:
+
+```bash
+make start CTRL=false
+```
diff --git a/docs/scaling.md b/docs/scaling.md
index 2c026bda66b4..c81b1382504a 100644
--- a/docs/scaling.md
+++ b/docs/scaling.md
@@ -10,7 +10,7 @@ You cannot horizontally scale the controller.
You can scale the controller vertically:
-- If you have many workflows, increase `--workflow-workers` and `--workflow-ttl-workers`.
+- If you have many workflows, increase `--workflow-workers` and `--workflow-ttl-workers`.
- Increase both `--qps` and `--burst`.
You will need to increase the controller's memory and CPU.
@@ -23,13 +23,13 @@ Rather than running a single installation in your cluster, run one per namespace
### Instance ID
-Within a cluster can use instance ID to run N Argo instances within a cluster.
+Within a cluster can use instance ID to run N Argo instances within a cluster.
Create one namespace for each Argo, e.g. `argo-i1`, `argo-i2`:.
Edit [workflow-controller-configmap.yaml](workflow-controller-configmap.yaml) for each namespace to set an instance ID.
-```
+```yaml
apiVersion: v1
kind: ConfigMap
metadata:
@@ -42,7 +42,7 @@ data:
You may need to pass the instance ID to the CLI:
-```
+```bash
argo --instanceid i1 submit my-wf.yaml
```
diff --git a/docs/security.md b/docs/security.md
index 9a9571ace27b..a0b691065fb3 100644
--- a/docs/security.md
+++ b/docs/security.md
@@ -1,6 +1,8 @@
# Security
-See [SECURITY.md](https://github.com/argoproj/argo-workflows/blob/master/SECURITY.md).
+[To report security issues](https://github.com/argoproj/argo-workflows/blob/master/SECURITY.md).
+
+💡 Read [Practical Argo Workflows Hardening](https://blog.argoproj.io/practical-argo-workflows-hardening-dd8429acc1ce).
## Workflow Controller Security
@@ -14,7 +16,7 @@ The controller has permission (via Kubernetes RBAC + its config map) with either
* Create/get/delete pods, PVCs, and PDBs.
* List/get template, config maps, service accounts, and secrets.
-See [workflow-controller-clusterrole.yaml](https://raw.githubusercontent.com/argoproj/argo-workflows/master/manifests/cluster-install/workflow-controller-rbac/workflow-controller-clusterrole.yaml) or [workflow-controller-role.yaml](https://raw.githubusercontent.com/argoproj/argo-workflows/master/manifests/namespace-install/workflow-controller-rbac/workflow-controller-role.yaml)
+See [workflow controller cluster-role](https://raw.githubusercontent.com/argoproj/argo-workflows/master/manifests/cluster-install/workflow-controller-rbac/workflow-controller-clusterrole.yaml) or [workflow-controller-role.yaml](https://raw.githubusercontent.com/argoproj/argo-workflows/master/manifests/namespace-install/workflow-controller-rbac/workflow-controller-role.yaml)
### User Permissions
@@ -22,12 +24,12 @@ Users minimally need permission to create/read workflows. The controller will th
A way to think of this is that, if the user has permission to create a workflow in a namespace, then it is OK to create pods or anything else for them in that namespace.
-If the user only has permission to create workflows, then they will be typically unable to configure other necessary resources such as config maps, or view the outcome of their workflow. This is useful when the user is a service.
+If the user only has permission to create workflows, then they will be typically unable to configure other necessary resources such as config maps, or view the outcome of their workflow. This is useful when the user is a service.
!!! Warning
If you allow users to create workflows in the controller's namespace (typically `argo`), it may be possible for users to modify the controller itself. In a namespace-install the managed namespace should therefore not be the controller's namespace.
-You can typically further restrict what a user can do to just being able to submit workflows from templates using [the workflow requriments feature](workflow-restrictions.md).
+You can typically further restrict what a user can do to just being able to submit workflows from templates using [the workflow requirements feature](workflow-restrictions.md).
### Workflow Pod Permissions
@@ -42,11 +44,11 @@ This service account typically needs [permissions](workflow-rbac.md).
Different service accounts should be used if a workflow pod needs to have elevated permissions, e.g. to create other resources.
-The main container will have the service account token mounted , allowing the main container to patch pods (amongst other permissions). Set `automountServiceAccountToken` to false to prevent this. See [fields](fields.md).
+The main container will have the service account token mounted , allowing the main container to patch pods (among other permissions). Set `automountServiceAccountToken` to false to prevent this. See [fields](fields.md).
By default, workflows pods run as `root`. To further secure workflow pods, set the [workflow pod security context](workflow-pod-security-context.md).
-You should configure the controller with the correct [workflow executor](workflow-executors.md) for your trade off between security and scalabily.
+You should configure the controller with the correct [workflow executor](workflow-executors.md) for your trade off between security and scalability.
These settings can be set by default using [workflow defaults](default-workflow-specs.md).
@@ -66,17 +68,17 @@ You can achieve this by configuring the `argo-server` role ([example](https://gi
## Network Security
-Argo Workflows requires various levels of network access depending on configuration and the features enabled. The following describes the different workflow components and their network access needs, to help provide guidance on how to configure the argo namespace in a secure manner (e.g. NetworkPolicies).
+Argo Workflows requires various levels of network access depending on configuration and the features enabled. The following describes the different workflow components and their network access needs, to help provide guidance on how to configure the argo namespace in a secure manner (e.g. `NetworkPolicy`).
### Argo Server
-The argo server is commonly exposed to end-users to provide users with a user interface for visualizing and managing their workflows. It must also be exposed if leveraging [webhooks](webhooks.md) to trigger workflows. Both of these use cases require that the argo-server Service to be exposed for ingress traffic (e.g. with an Ingress object or load balancer). Note that the Argo UI is also available to be accessed by running the server locally (i.e. `argo server`) using local kubeconfig credentials, and visiting the UI over https://localhost:2746.
+The Argo Server is commonly exposed to end-users to provide users with a UI for visualizing and managing their workflows. It must also be exposed if leveraging [webhooks](webhooks.md) to trigger workflows. Both of these use cases require that the argo-server Service to be exposed for ingress traffic (e.g. with an Ingress object or load balancer). Note that the Argo UI is also available to be accessed by running the server locally (i.e. `argo server`) using local KUBECONFIG credentials, and visiting the UI over .
-The argo server additionally has a feature to allow downloading of artifacts through the user interface. This feature requires that the argo-server be given egress access to the underlying artifact provider (e.g. S3, GCS, MinIO, Artifactory) in order to download and stream the artifact.
+The Argo Server additionally has a feature to allow downloading of artifacts through the UI. This feature requires that the argo-server be given egress access to the underlying artifact provider (e.g. S3, GCS, MinIO, Artifactory, Azure Blob Storage) in order to download and stream the artifact.
### Workflow Controller
-The workflow-controller Deployment exposes a Prometheus metrics endpoint (workflow-controller-metrics:9090) so that a Prometheus server can periodically scrape for controller level metrics. Since prometheus is typically running in a separate namespace, the argo namespace should be configured to allow cross-namespace ingress access to the workflow-controller-metrics Service.
+The workflow-controller Deployment exposes a Prometheus metrics endpoint (workflow-controller-metrics:9090) so that a Prometheus server can periodically scrape for controller level metrics. Since Prometheus is typically running in a separate namespace, the argo namespace should be configured to allow cross-namespace ingress access to the workflow-controller-metrics Service.
### Database access
diff --git a/docs/service-accounts.md b/docs/service-accounts.md
index aa05ca540a7a..4b44c1b55c40 100644
--- a/docs/service-accounts.md
+++ b/docs/service-accounts.md
@@ -2,7 +2,7 @@
## Configure the service account to run Workflows
-### Roles, RoleBindings, and ServiceAccounts
+### Roles, Role-Bindings, and Service Accounts
In order for Argo to support features such as artifacts, outputs, access to secrets, etc. it needs to communicate with Kubernetes resources
using the Kubernetes API. To communicate with the Kubernetes API, Argo uses a `ServiceAccount` to authenticate itself to the Kubernetes API.
@@ -10,7 +10,7 @@ You can specify which `Role` (i.e. which permissions) the `ServiceAccount` that
Then, when submitting Workflows you can specify which `ServiceAccount` Argo uses using:
-```sh
+```bash
argo submit --serviceaccount
```
@@ -22,10 +22,9 @@ For more information about granting Argo the necessary permissions for your use
For the purposes of this demo, we will grant the `default` `ServiceAccount` admin privileges (i.e., we will bind the `admin` `Role` to the `default` `ServiceAccount` of the current namespace):
-```sh
+```bash
kubectl create rolebinding default-admin --clusterrole=admin --serviceaccount=argo:default -n argo
```
**Note that this will grant admin privileges to the `default` `ServiceAccount` in the namespace that the command is run from, so you will only be able to
run Workflows in the namespace where the `RoleBinding` was made.**
-
diff --git a/docs/sidecar-injection.md b/docs/sidecar-injection.md
index 9180734fe945..470c48f6d7db 100644
--- a/docs/sidecar-injection.md
+++ b/docs/sidecar-injection.md
@@ -11,8 +11,7 @@ You will minimize problems by not using Istio with Argo Workflows.
See [#1282](https://github.com/argoproj/argo-workflows/issues/1282).
-
-### Support Matrix
+## Support Matrix
Key:
@@ -20,13 +19,13 @@ Key:
* Any - we can kill any image
* KubectlExec - we kill images by running `kubectl exec`
-| Executor | Sidecar | Injected Sidecar |
-|---|---|---|
-| `docker` | Any | Unsupported |
-| `emissary` | Any | KubectlExec |
-| `k8sapi` | Shell | KubectlExec |
-| `kubelet` | Shell | KubectlExec |
-| `pns` | Any | Any |
+| Executor | Sidecar | Injected Sidecar |
+|---|---|---|
+| `docker` | Any | Unsupported |
+| `emissary` | Any | KubectlExec |
+| `k8sapi` | Shell | KubectlExec |
+| `kubelet` | Shell | KubectlExec |
+| `pns` | Any | Any |
## How We Kill Sidecars Using `kubectl exec`
@@ -35,7 +34,7 @@ Key:
Kubernetes does not provide a way to kill a single container. You can delete a pod, but this kills all containers, and loses all information
and logs of that pod.
-Instead, try to mimic the Kubernetes termination behaviour, which is:
+Instead, try to mimic the Kubernetes termination behavior, which is:
1. SIGTERM PID 1
1. Wait for the pod's `terminateGracePeriodSeconds` (30s by default).
diff --git a/docs/static-code-analysis.md b/docs/static-code-analysis.md
index 45593ccf205f..9c3c8b5cb800 100644
--- a/docs/static-code-analysis.md
+++ b/docs/static-code-analysis.md
@@ -2,7 +2,7 @@
We use the following static code analysis tools:
-* golangci-lint and tslint for compile time linting
-* [snyk.io](https://app.snyk.io/org/argoproj/projects) - for image scanning
+* `golangci-lint` and `tslint` for compile time linting.
+* [Snyk](https://app.snyk.io/org/argoproj/projects) for image scanning.
These are at least run daily or on each pull request.
diff --git a/docs/stress-testing.md b/docs/stress-testing.md
index ae98b75f5dc6..65706296990a 100644
--- a/docs/stress-testing.md
+++ b/docs/stress-testing.md
@@ -27,12 +27,12 @@ argo submit examples/hello-world.yaml --watch
Checks
-* Open http://localhost:2746/workflows and check it loads and that you can run a workflow.
-* Open http://localhost:9090/metrics and check you can see the Prometheus metrics.
-* Open http://localhost:9091/graph and check you can see a Prometheus graph. You can
+* Open and check it loads and that you can run a workflow.
+* Open and check you can see the Prometheus metrics.
+* Open and check you can see a Prometheus graph. You can
use [this Tab Auto Refresh Chrome extension](https://chrome.google.com/webstore/detail/tab-auto-refresh/oomoeacogjkolheacgdkkkhbjipaomkn)
to auto-refresh the page.
-* Open http://localhost:6060/debug/pprof and check you can access pprof.
+* Open and check you can access `pprof`.
Run `go run ./test/stress/tool -n 10000` to run a large number of workflows.
@@ -55,4 +55,4 @@ go tool pprof -png http://localhost:6060/debug/pprof/profile
```bash
gcloud container clusters delete argo-workflows-stress-1
-```
\ No newline at end of file
+```
diff --git a/docs/submit-workflow-via-automation.md b/docs/submit-workflow-via-automation.md
deleted file mode 100644
index cb0a2773cda1..000000000000
--- a/docs/submit-workflow-via-automation.md
+++ /dev/null
@@ -1,68 +0,0 @@
-# Submitting A Workflow Via Automation
-
-
-
-> v2.8 and after
-
-You may want to consider using [events](events.md) or [webhooks](webhooks.md) instead.
-
-Firstly, to do any automation, you'll need an ([access token](access-token.md)). For this example, our role needs extra permissions:
-
-```sh
-kubectl patch role jenkins -p '{"rules": [{"apiGroups": ["argoproj.io"], "resources": ["workflowtemplates"], "verbs": ["get"]}, {"apiGroups": ["argoproj.io"], "resources": ["workflows"], "verbs": ["create", "list", "get", "update"]}]}'
-```
-
-Next, create a workflow template
-
-```yaml
-apiVersion: argoproj.io/v1alpha1
-kind: WorkflowTemplate
-metadata:
- name: hello-argo
-spec:
- entrypoint: main
- templates:
- - name: main
- steps:
- - - name: a
- template: whalesay
- - name: whalesay
- container:
- image: docker/whalesay:latest
-```
-
-You can submit this workflow via an CLI or the [Argo Server API](rest-api.md).
-
-Submit via CLI (note how I add a label to help identify it later on):
-
-````sh
-argo submit --from wftmpl/hello-argo -l workflows.argoproj.io/workflow-template=hello-argo
-````
-
-Or submit via API:
-
-```sh
-curl $ARGO_SERVER/api/v1/workflows/argo/submit \
- -fs \
- -H "Authorization: $ARGO_TOKEN" \
- -d '{"resourceKind": "WorkflowTemplate", "resourceName": "hello-argo", "submitOptions": {"labels": "workflows.argoproj.io/workflow-template=hello-argo"}}'
-```
-
-You'll see that the workflow has been created:
-
-```sh
-argo list
-NAME STATUS AGE DURATION PRIORITY
-hello-argo-77m4l Running 33s 33s 0
-```
-
-See also:
-
-See also:
-
-* [access token](access-token.md)
-* [events](events.md)
-* [webhooks](webhooks.md)
-* [resuming a workflow via automation](resuming-workflow-via-automation.md)
-* [one workflow submitting another](workflow-submitting-workflow.md)
-* [async pattern](async-pattern.md)
diff --git a/docs/survey-data-privacy.md b/docs/survey-data-privacy.md
index 5d6345299ed4..fbeeb3f140ec 100644
--- a/docs/survey-data-privacy.md
+++ b/docs/survey-data-privacy.md
@@ -1,3 +1,3 @@
# Survey Data Privacy
-[Privacy policy](https://www.linuxfoundation.org/privacy-policy/)
\ No newline at end of file
+[Privacy policy](https://www.linuxfoundation.org/privacy-policy/)
diff --git a/docs/swagger.md b/docs/swagger.md
index 143f6cdb00b1..c6a8cbfd5f47 100644
--- a/docs/swagger.md
+++ b/docs/swagger.md
@@ -1,3 +1,3 @@
-# Argo Server API
+# API Reference
[Open the Swagger API docs](https://raw.githubusercontent.com/argoproj/argo-workflows/master/api/openapi-spec/swagger.json).
diff --git a/docs/synchronization.md b/docs/synchronization.md
index f3e47ed7ad36..3eedd463a767 100644
--- a/docs/synchronization.md
+++ b/docs/synchronization.md
@@ -1,19 +1,19 @@
# Synchronization
-
-
> v2.10 and after
## Introduction
-Synchronization enables users to limit the parallel execution of certain workflows or
+
+Synchronization enables users to limit the parallel execution of certain workflows or
templates within a workflow without having to restrict others.
-Users can create multiple synchronization configurations in the `ConfigMap` that can be referred to
+Users can create multiple synchronization configurations in the `ConfigMap` that can be referred to
from a workflow or template within a workflow. Alternatively, users can
configure a mutex to prevent concurrent execution of templates or
workflows using the same mutex.
For example:
+
```yaml
apiVersion: v1
kind: ConfigMap
@@ -21,13 +21,14 @@ metadata:
name: my-config
data:
workflow: "1" # Only one workflow can run at given time in particular namespace
- template: "2" # Two instance of template can run at a given time in particular namespace
+ template: "2" # Two instances of template can run at a given time in particular namespace
```
### Workflow-level Synchronization
-Workflow-level synchronization limits parallel execution of the workflow if workflow have same synchronization reference.
-In this example, Workflow refers `workflow` synchronization key which is configured as rate limit 1,
-so only one workflow instance will be executed at given time even multiple workflows created.
+
+Workflow-level synchronization limits parallel execution of the workflow if workflows have the same synchronization reference.
+In this example, Workflow refers to `workflow` synchronization key which is configured as limit 1,
+so only one workflow instance will be executed at given time even multiple workflows created.
Using a semaphore configured by a `ConfigMap`:
@@ -72,9 +73,10 @@ spec:
```
### Template-level Synchronization
-Template-level synchronization limits parallel execution of the template across workflows, if template have same synchronization reference.
-In this example, `acquire-lock` template has synchronization reference of `template` key which is configured as rate limit 2,
-so, two instance of templates will be executed at given time even multiple step/task with in workflow or different workflow refers same template.
+
+Template-level synchronization limits parallel execution of the template across workflows, if templates have the same synchronization reference.
+In this example, `acquire-lock` template has synchronization reference of `template` key which is configured as limit 2,
+so two instances of templates will be executed at a given time: even multiple steps/tasks within workflow or different workflows referring to the same template.
Using a semaphore configured by a `ConfigMap`:
@@ -139,14 +141,14 @@ spec:
```
Examples:
+
1. [Workflow level semaphore](https://github.com/argoproj/argo-workflows/blob/master/examples/synchronization-wf-level.yaml)
1. [Workflow level mutex](https://github.com/argoproj/argo-workflows/blob/master/examples/synchronization-mutex-wf-level.yaml)
1. [Step level semaphore](https://github.com/argoproj/argo-workflows/blob/master/examples/synchronization-tmpl-level.yaml)
1. [Step level mutex](https://github.com/argoproj/argo-workflows/blob/master/examples/synchronization-mutex-tmpl-level.yaml)
-### Other Parallelism support:
-In addition to this synchronization, the workflow controller supports a parallelism setting that applies to all workflows
-in the system (it is not granular to a class of workflows, or tasks withing them). Furthermore, there is a parallelism setting
-at the workflow and template level, but this only restricts total concurrent executions of tasks within the same workflow.
-
+### Other Parallelism support
+In addition to this synchronization, the workflow controller supports a parallelism setting that applies to all workflows
+in the system (it is not granular to a class of workflows, or tasks withing them). Furthermore, there is a parallelism setting
+at the workflow and template level, but this only restricts total concurrent executions of tasks within the same workflow.
diff --git a/docs/template-defaults.md b/docs/template-defaults.md
index 1ead1c37525e..16d4fe3f6f50 100644
--- a/docs/template-defaults.md
+++ b/docs/template-defaults.md
@@ -1,13 +1,15 @@
# Template Defaults
+
> v3.1 and after
## Introduction
`TemplateDefaults` feature enables the user to configure the default template values in workflow spec level that will apply to all the templates in the workflow. If the template has a value that also has a default value in `templateDefault`, the Template's value will take precedence. These values will be applied during the runtime. Template values and default values are merged using Kubernetes strategic merge patch. To check whether and how list values are merged, inspect the `patchStrategy` and `patchMergeKey` tags in the [workflow definition](https://github.com/argoproj/argo-workflows/blob/master/pkg/apis/workflow/v1alpha1/workflow_types.go).
-## Configuring `templateDefaults` in WorkflowSpec
+## Configuring `templateDefaults` in `WorkflowSpec`
For example:
+
```yaml
apiVersion: argoproj.io/v1alpha1
kind: Workflow
@@ -24,10 +26,12 @@ spec:
container:
image: docker/whalesay:latest
```
+
[template defaults example](https://raw.githubusercontent.com/argoproj/argo-workflows/master/examples/template-defaults.yaml)
## Configuring `templateDefaults` in Controller Level
-Operator can configure the `templateDefaults` in [workflowDefaults](default-workflow-specs.md). This `templateDefault` will be applied to all the workflow which runs on the controller.
+
+Operator can configure the `templateDefaults` in [workflow defaults](default-workflow-specs.md). This `templateDefault` will be applied to all the workflow which runs on the controller.
The following would be specified in the Config Map:
diff --git a/docs/tls.md b/docs/tls.md
index 283417aa51c8..1b5ea436acf5 100644
--- a/docs/tls.md
+++ b/docs/tls.md
@@ -1,13 +1,11 @@
# Transport Layer Security
-
-
> v2.8 and after
If you're running Argo Server you have three options with increasing transport security (note - you should also be
running [authentication](argo-server.md#auth-mode)):
-### Default configuration:
+## Default configuration
> v2.8 - 2.12
@@ -17,32 +15,32 @@ Defaults to [Plain Text](#plain-text)
Defaults to [Encrypted](#encrypted) if cert is available
-Argo image/deployment defaults to [Encrypted](#encrypted) with a self-signed certificate expires after 365 days.
+Argo image/deployment defaults to [Encrypted](#encrypted) with a self-signed certificate which expires after 365 days.
## Plain Text
-*Recommended for: dev*
+Recommended for: development.
Everything is sent in plain text.
-Start Argo Server with the --secure=false (or ARGO_SECURE=false) flag, e.g.:
+Start Argo Server with the --secure=false (or `ARGO_SECURE=false`) flag, e.g.:
-```
+```bash
export ARGO_SECURE=false
-argo --secure=false
+argo server --secure=false
```
To secure the UI you may front it with a HTTPS proxy.
## Encrypted
-*Recommended for: development and test environments*
+Recommended for: development and test environments.
You can encrypt connections without any real effort.
Start Argo Server with the `--secure` flag, e.g.:
-```
+```bash
argo server --secure
```
@@ -50,11 +48,11 @@ It will start with a self-signed certificate that expires after 365 days.
Run the CLI with `--secure` (or `ARGO_SECURE=true`) and `--insecure-skip-verify` (or `ARGO_INSECURE_SKIP_VERIFY=true`).
-```
+```bash
argo --secure --insecure-skip-verify list
```
-```
+```bash
export ARGO_SECURE=true
export ARGO_INSECURE_SKIP_VERIFY=true
argo --secure --insecure-skip-verify list
@@ -63,7 +61,7 @@ argo --secure --insecure-skip-verify list
Tip: Don't forget to update your readiness probe to use HTTPS. To do so, edit your `argo-server`
Deployment's `readinessProbe` spec:
-```
+```yaml
readinessProbe:
httpGet:
scheme: HTTPS
@@ -71,14 +69,14 @@ readinessProbe:
### Encrypted and Verified
-*Recommended for: production environments*
+Recommended for: production environments.
-Run your HTTPS proxy in front of the Argo Server. You'll need to set-up your certificates and this out of scope of this
-documentation.
+Run your HTTPS proxy in front of the Argo Server. You'll need to set-up your certificates (this is out of scope of this
+documentation).
Start Argo Server with the `--secure` flag, e.g.:
-```
+```bash
argo server --secure
```
@@ -86,11 +84,11 @@ As before, it will start with a self-signed certificate that expires after 365 d
Run the CLI with `--secure` (or `ARGO_SECURE=true`) only.
-```
+```bash
argo --secure list
```
-```
+```bash
export ARGO_SECURE=true
argo list
```
@@ -107,4 +105,3 @@ This must be one of these [int values](https://golang.org/pkg/crypto/tls/).
| v1.1 | 770 |
| v1.2 | 771 |
| v1.3 | 772 |
-
diff --git a/docs/tolerating-pod-deletion.md b/docs/tolerating-pod-deletion.md
index 628fdb2ad7a9..8024adec7883 100644
--- a/docs/tolerating-pod-deletion.md
+++ b/docs/tolerating-pod-deletion.md
@@ -5,8 +5,8 @@
In Kubernetes, pods are cattle and can be deleted at any time. Deletion could be manually via `kubectl delete pod`, during a node drain, or for other reasons.
This can be very inconvenient, your workflow will error, but for reasons outside of your control.
-
-A [pod disruption budget](https://raw.githubusercontent.com/argoproj/argo-workflows/master/examples/default-pdb-support.yaml) can reduce the likelihood of this happening. But, it cannot entirely prevent it.
+
+A [pod disruption budget](https://raw.githubusercontent.com/argoproj/argo-workflows/master/examples/default-pdb-support.yaml) can reduce the likelihood of this happening. But, it cannot entirely prevent it.
To retry pods that were deleted, set `retryStrategy.retryPolicy: OnError`.
@@ -35,4 +35,6 @@ spec:
- 30s
```
-Then execute `kubectl delete pod example`. You'll see that the errored node is automatically retried.
\ No newline at end of file
+Then execute `kubectl delete pod example`. You'll see that the errored node is automatically retried.
+
+💡 Read more on [architecting workflows for reliability](https://blog.argoproj.io/architecting-workflows-for-reliability-d33bd720c6cc).
diff --git a/docs/training.md b/docs/training.md
index 170277b13886..efe7e56c5405 100644
--- a/docs/training.md
+++ b/docs/training.md
@@ -1,18 +1,11 @@
# Training
-## Hands-On
-
-We've created a Katacoda course featuring beginner and intermediate lessons. These allow to you try out Argo Workflows
-in your web browser without needing to install anything on your computer. Each lesson starts up a Kubernetes cluster
-that you can access via a web browser.
-
-The course will take around 2 hours to complete and is the fastest way to learn Argo Workflows.
-
-[ Open the course](https://www.katacoda.com/argoproj/courses/argo-workflows/)
-
## Videos
-We also have a playlist of videos that dive into various topics. This includes contributing to Argo Workflows, not
-covered in the hand-on.
+We also have a YouTube playlist of videos that includes workshops you can follow along with:
[ Open the playlist](https://youtube.com/playlist?list=PLGHfqDpnXFXLHfeapfvtt9URtUF1geuBo)
+
+## Additional resources
+
+Visit the [awesome-argo GitHub repo](https://github.com/terrytangyuan/awesome-argo) for more educational resources.
diff --git a/docs/upgrading.md b/docs/upgrading.md
index 76ed9f7a02a5..a46988d51f67 100644
--- a/docs/upgrading.md
+++ b/docs/upgrading.md
@@ -1,14 +1,79 @@
-
-# Upgrading
+# Upgrading Guide
Breaking changes typically (sometimes we don't realise they are breaking) have "!" in the commit message, as per
the [conventional commits](https://www.conventionalcommits.org/en/v1.0.0/#summary).
+## Upgrading to v3.4
+
+### Non-Emissary executors are removed. ([#7829](https://github.com/argoproj/argo-workflows/issues/7829))
+
+Emissary executor is now the only supported executor. If you are using other executors, e.g. docker, k8sapi, pns, and kubelet, you need to
+remove your `containerRuntimeExecutors` and `containerRuntimeExecutor` from your controller's configmap. If you have workflows that use different
+executors with the label `workflows.argoproj.io/container-runtime-executor`, this is no longer supported and will not be effective.
+
+### chore!: Remove dataflow pipelines from codebase. (#9071)
+
+You are affected if you are using [dataflow pipelines](https://github.com/argoproj-labs/argo-dataflow) in the UI or via the `/pipelines` endpoint.
+We no longer support dataflow pipelines and all relevant code has been removed.
+
+### feat!: Add entrypoint lookup. Fixes #8344
+
+Affected if:
+
+* Using the Emissary executor.
+* Used the `args` field for any entry in `images`.
+
+This PR automatically looks up the command and entrypoint. The implementation for config look-up was incorrect (it
+allowed you to specify `args` but not `entrypoint`). `args` has been removed to correct the behaviour.
+
+If you are incorrectly configured, the workflow controller will error on start-up.
+
+#### Actions
+
+You don't need to configure images that use v2 manifests anymore. You can just remove them (e.g. argoproj/argosay:v2):
+
+```bash
+% docker manifest inspect argoproj/argosay:v2
+...
+"schemaVersion": 2,
+...
+```
+
+For v1 manifests (e.g. docker/whalesay:latest):
+
+```bash
+% docker image inspect -f '{{.Config.Entrypoint}} {{.Config.Cmd}}' docker/whalesay:latest
+[] [/bin/bash]
+```
+
+```yaml
+images:
+ docker/whalesay:latest:
+ cmd: [/bin/bash]
+```
+
+## feat: Fail on invalid config. (#8295)
+
+The workflow controller will error on start-up if incorrectly configured, rather than silently ignoring
+mis-configuration.
+
+```text
+Failed to register watch for controller config map: error unmarshaling JSON: while decoding JSON: json: unknown field \"args\"
+```
+
+## feat: add indexes for improve archived workflow performance. (#8860)
+
+This PR adds indexes to archived workflow tables. This change may cause a long time to upgrade if the user has a large table.
+
+## feat: enhance artifact visualization (#8655)
+
+For AWS users using S3: visualizing artifacts in the UI and downloading them now requires an additional "Action" to be configured in your S3 bucket policy: "ListBucket".
+
## Upgrading to v3.3
### [662a7295b](https://github.com/argoproj/argo-workflows/commit/662a7295b) feat: Replace `patch pod` with `create workflowtaskresult`. Fixes #3961 (#8000)
-The PR changes the permissions that can be used by a workflow to remove the `pod patch` permission.
+The PR changes the permissions that can be used by a workflow to remove the `pod patch` permission.
See [workflow RBAC](workflow-rbac.md) and [#8013](https://github.com/argoproj/argo-workflows/issues/3961).
@@ -38,7 +103,7 @@ See [#8013](https://github.com/argoproj/argo-workflows/issues/8013).
This PR removes the following configmap items -
-- executorImage (use executor.image in configmap instead)
+* executorImage (use executor.image in configmap instead)
e.g.
Workflow controller configmap similar to the following one given below won't be valid anymore:
@@ -67,7 +132,7 @@ This PR removes the following configmap items -
...
```
-- executorImagePullPolicy (use executor.imagePullPolicy in configmap instead)
+* executorImagePullPolicy (use executor.imagePullPolicy in configmap instead)
e.g.
Workflow controller configmap similar to the following one given below won't be valid anymore:
@@ -88,7 +153,7 @@ This PR removes the following configmap items -
...
```
-- executorResources (use executor.resources in configmap instead)
+* executorResources (use executor.resources in configmap instead)
e.g.
Workflow controller configmap similar to the following one given below won't be valid anymore:
@@ -123,7 +188,7 @@ This PR removes the following configmap items -
### [fce82d572](https://github.com/argoproj/argo-workflows/commit/fce82d5727b89cfe49e8e3568fff40725bd43734) feat: Remove pod workers (#7837)
-This PR removes pod workers from the code, the pod informer directly writes into the workflow queue. As a result the `--pod-workers` flag has been removed.
+This PR removes pod workers from the code, the pod informer directly writes into the workflow queue. As a result the `--pod-workers` flag has been removed.
### [93c11a24ff](https://github.com/argoproj/argo-workflows/commit/93c11a24ff06049c2197149acd787f702e5c1f9b) feat: Add TLS to Metrics and Telemetry servers (#7041)
@@ -146,7 +211,7 @@ HTTPArtifact without a scheme will now defaults to https instead of http
user need to explicitly include a http prefix if they want to retrieve HTTPArtifact through http
-### chore!: Remove the hidden flag `--verify` from `argo submit`.
+### chore!: Remove the hidden flag `--verify` from `argo submit`
The hidden flag `--verify` has been removed from `argo submit`. This is a internal testing flag we don't need anymore.
@@ -154,7 +219,7 @@ The hidden flag `--verify` has been removed from `argo submit`. This is a intern
### [e5b131a33](https://github.com/argoproj/argo-workflows/commit/e5b131a33) feat: Add template node to pod name. Fixes #1319 (#6712)
-This add the template name to the pod name, to make it easier to understand which pod ran which step. This behaviour can be reverted by setting `POD_NAMES=v1` on the workflow controller.
+This add the template name to the pod name, to make it easier to understand which pod ran which step. This behaviour can be reverted by setting `POD_NAMES=v1` on the workflow controller.
### [be63efe89](https://github.com/argoproj/argo-workflows/commit/be63efe89) feat(executor)!: Change `argoexec` base image to alpine. Closes #5720 (#6006)
@@ -204,7 +269,7 @@ always play nicely with the `when` condition syntax (Goevaluate).
This can be resolved using a single quote in your when expression:
-```
+```yaml
when: "'{{inputs.parameters.should-print}}' != '2021-01-01'"
```
diff --git a/docs/use-cases/infrastructure-automation.md b/docs/use-cases/infrastructure-automation.md
index 080cd1cc0a5d..200019395d5a 100644
--- a/docs/use-cases/infrastructure-automation.md
+++ b/docs/use-cases/infrastructure-automation.md
@@ -7,7 +7,7 @@
## Videos
-* [Infrastructure Automation with Argo at InsideBoard - Alexandre Le Mao (Head of infrastructure / Lead DevOps, Insideboard)](https://www.youtube.com/watch?v=BochC4GKxbo&list=PLGHfqDpnXFXK4E8XzasScagiJk-8BPgva&index=2&utm_source=argo-docs)
+* [Infrastructure Automation with Argo at InsideBoard - Alexandre Le Mao (Head of infrastructure / Lead DevOps, InsideBoard)](https://www.youtube.com/watch?v=BochC4GKxbo&list=PLGHfqDpnXFXK4E8XzasScagiJk-8BPgva&index=2&utm_source=argo-docs)
* [Argo and KNative - David Breitgand (IBM) - showing 5G infra automation use case](https://youtu.be/dxX_Xnp2sX4?t=210&utm_source=argo-docs)
* [How New Relic Uses Argo Workflows - Fischer Jemison, Jared Welch (New Relic)](https://youtu.be/dxX_Xnp2sX4?t=1890&utm_source=argo-docs)
* [Building Kubernetes using Kubernetes - Tomas Valasek (SAP Concur)](https://youtu.be/TLTxv2F5WCQ?t=1742&utm_source=argo-docs)
diff --git a/docs/use-cases/machine-learning.md b/docs/use-cases/machine-learning.md
index 709204bbf61e..2af8c74fa97c 100644
--- a/docs/use-cases/machine-learning.md
+++ b/docs/use-cases/machine-learning.md
@@ -14,8 +14,8 @@
* [Building Medical Grade AI with Argo Workflows](https://youtu.be/4VPSktuM5Ow)
* [CI/CD for Machine Learning at MLB using Argo Workflows - Eric Meadows](https://youtu.be/fccWoYlwZKc?t=184&utm_source=argo-docs)
* [Dynamic, Event-Driven Machine Learning Pipelines with Argo Workflows](https://youtu.be/ei4r0a7eAV0)
-* [Machine Learning as Code: GitOps for ML with Kubeflow and ArgoCD](https://www.youtube.com/watch?v=VXrGp5er1ZE&t=0s&index=135&list=PLj6h78yzYM2PZf9eA7bhWnIh_mK1vyOfU)
+* [Machine Learning as Code: GitOps for ML with Kubeflow and Argo CD](https://www.youtube.com/watch?v=VXrGp5er1ZE&t=0s&index=135&list=PLj6h78yzYM2PZf9eA7bhWnIh_mK1vyOfU)
* [Machine Learning with Argo and Ploomber](https://www.youtube.com/watch?v=FnpXyg-5W_c&list=PLGHfqDpnXFXK4E8XzasScagiJk-8BPgva&utm_source=argo-docs)
* [Making Complex R Forecast Applications Into Production Using Argo Workflows](https://www.youtube.com/watch?v=fPjztsUXHcg)
-* [MLOps at Tripadvisor: ML Models CI/CD Automation with Argo - Ang Gao (Principal Software Engineer, TripAdvisor)](https://www.youtube.com/watch?v=BochC4GKxbo&list=PLGHfqDpnXFXK4E8XzasScagiJk-8BPgva&index=2&utm_source=argo-docs)
+* [MLOps at TripAdvisor: ML Models CI/CD Automation with Argo - Ang Gao (Principal Software Engineer, TripAdvisor)](https://www.youtube.com/watch?v=BochC4GKxbo&list=PLGHfqDpnXFXK4E8XzasScagiJk-8BPgva&index=2&utm_source=argo-docs)
* [Towards Cloud-Native Distributed Machine Learning Pipelines at Scale](https://github.com/terrytangyuan/public-talks/tree/main/talks/towards-cloud-native-distributed-machine-learning-pipelines-at-scale-pydata-global-2021)
diff --git a/docs/use-cases/stream-processing.md b/docs/use-cases/stream-processing.md
index fa19fda79d09..ea13c1bb12ca 100644
--- a/docs/use-cases/stream-processing.md
+++ b/docs/use-cases/stream-processing.md
@@ -1,3 +1,3 @@
# Stream Processing
-Head to the [ArgoLabs Dataflow](https://github.com/argoproj-labs/argo-dataflow) docs.
\ No newline at end of file
+Head to the [ArgoLabs Dataflow](https://github.com/argoproj-labs/argo-dataflow) docs.
diff --git a/docs/use-cases/webhdfs.md b/docs/use-cases/webhdfs.md
new file mode 100644
index 000000000000..8acbfc80fc4a
--- /dev/null
+++ b/docs/use-cases/webhdfs.md
@@ -0,0 +1,45 @@
+# Using webHDFS protocol via HTTP artifacts
+
+webHDFS is a protocol allowing to access Hadoop or similar a data storage via a unified REST API ().
+
+## Input Artifacts
+
+In order to use the webHDFS protocol we will make use of HTTP artifacts, where the URL will be set to the webHDFS endpoint including the file path and all its query parameters. Suppose, our webHDFS endpoint is available under `https://mywebhdfsprovider.com/webhdfs/v1/` and we have a file `my-art.txt` located in a `data` folder, which we want to use as an input artifact. To construct the HTTP URL we need to append the file path to the base webHDFS endpoint and set the [OPEN operation](https://hadoop.apache.org/docs/r3.3.3/hadoop-project-dist/hadoop-hdfs/WebHDFS.html#Open_and_Read_a_File) in the HTTP URL parameter. This results in the following URL: `https://mywebhdfsprovider.com/webhdfs/v1/data/my-art.txt?op=OPEN`. This is all you need for webHDFS input artifacts to work! Now, when run, the workflow will download the specified webHDFS artifact into the given `path`. There are some additional fields that can be set for HTTP artifacts (e.g. HTTP headers), which you can find in the [full webHDFS example](https://github.com/argoproj/argo-workflows/blob/master/examples/webhdfs-input-output-artifacts.yaml).
+
+```yaml
+spec:
+ [...]
+ inputs:
+ artifacts:
+ - name: my-art
+ path: /my-artifact
+ http:
+ url: "https://mywebhdfsprovider.com/webhdfs/v1/file.txt?op=OPEN"
+```
+
+## Output Artifacts
+
+In order to declare a webHDFS output artifact, little change is necessary: We only need to change the webHDFS operation to the [CREATE operation](https://hadoop.apache.org/docs/r3.3.3/hadoop-project-dist/hadoop-hdfs/WebHDFS.html#Create_and_Write_to_a_File) and set the file path to where we want the output artifact to be stored. In this example we want to store the artifact under `outputs/newfile.txt`. We also supply the optional overwrite parameter `overwrite=true` to allow overwriting existing files in the webHDFS provider's data storage. If the `overwrite` flag is unset, the default behavior is used, which depends on the particular webHDFS provider. Below shows the example output artifact:
+
+```yaml
+spec:
+ [...]
+ outputs:
+ artifacts:
+ - name: my-art
+ path: /my-artifact
+ http:
+ url: "https://mywebhdfsprovider.com/webhdfs/v1/outputs/newfile.txt?op=CREATE&overwrite=true"
+```
+
+## Authentication
+
+Above example showed a minimal use case without any authentication. However, in a real-world scenario, you may want to provide some authentication option. Currently, Argo Workflows' HTTP artifacts support the following authentication mechanisms:
+
+- HTTP Basic Auth
+- OAuth2
+- Client Certificates
+
+Hence, the authentication mechanism that can be used for webHDFS artifacts are limited to those supported by HTTP artifacts. Examples for the latter two authentication mechanisms can be found in the [webHDFS example file](https://github.com/argoproj/argo-workflows/blob/master/examples/webhdfs-input-output-artifacts.yaml).
+
+**Limitation**: Apache Hadoop itself only supports authentication via Kerberos SPNEGO and Hadoop delegation token (see ). While the former one is currently not supported for HTTP artifacts a usage of delegation tokens can be realized by supplying the authentication token in the HTTP URL of the respective input or output artifact.
diff --git a/docs/variables.md b/docs/variables.md
index ac6a92cdd787..da5892488aa8 100644
--- a/docs/variables.md
+++ b/docs/variables.md
@@ -25,10 +25,10 @@ spec:
container:
image: docker/whalesay
command: [ cowsay ]
- args: [ "{{inputs.parameters.message}}" ]
+ args: [ "{{inputs.parameters.message}}" ]
```
-The following variables are made available to reference various metadata of a workflow:
+The following variables are made available to reference various meta-data of a workflow:
## Template Tag Kinds
@@ -41,7 +41,7 @@ There are two kinds of template tag:
The tag is substituted with the variable that has a name the same as the tag.
-Simple tags **may** have whitespace between the brackets and variable.
+Simple tags **may** have white-space between the brackets and variable as seen below. However, there is a known issue where variables may fail to interpolate with white-space, so it is recommended to avoid using white-space until this issue is resolved. [Please report](https://github.com/argoproj/argo-workflows/issues/8960) unexpected behavior with reproducible examples.
```yaml
args: [ "{{ inputs.parameters.message }}" ]
@@ -62,19 +62,19 @@ indexing into the parameter or step map, e.g. `inputs.parameters['my-param']` or
Plain list:
-```
+```text
[1, 2]
```
Filter a list:
-```
+```text
filter([1, 2], { # > 1})
```
Map a list:
-```
+```text
map([1, 2], { # * 2 })
```
@@ -82,31 +82,31 @@ We provide some core functions:
Cast to int:
-```
+```text
asInt(inputs.parameters['my-int-param'])
```
Cast to float:
-```
+```text
asFloat(inputs.parameters['my-float-param'])
```
Cast to string:
-```
+```text
string(1)
```
Convert to a JSON string (needed for `withParam`):
-```
+```text
toJson([1, 2])
```
Extract data from JSON:
-```
+```text
jsonpath(inputs.parameters.json, '$.some.path')
```
@@ -114,7 +114,7 @@ You can also use [Sprig functions](http://masterminds.github.io/sprig/):
Trim a string:
-```
+```text
sprig.trim(inputs.parameters['my-string-param'])
```
@@ -135,30 +135,32 @@ returns `0`. Please review the Sprig documentation to understand which functions
| Variable | Description|
|----------|------------|
+| `steps.name` | Name of the step |
| `steps..id` | unique id of container step |
| `steps..ip` | IP address of a previous daemon container step |
| `steps..status` | Phase status of any previous step |
| `steps..exitCode` | Exit code of any previous script or container step |
-| `steps..startedAt` | Timestamp when the step started |
-| `steps..finishedAt` | Timestamp when the step finished |
+| `steps..startedAt` | Time-stamp when the step started |
+| `steps..finishedAt` | Time-stamp when the step finished |
| `steps..outputs.result` | Output result of any previous container or script step |
-| `steps..outputs.parameters` | When the previous step uses 'withItems' or 'withParams', this contains a JSON array of the output parameter maps of each invocation |
-| `steps..outputs.parameters.` | Output parameter of any previous step. When the previous step uses 'withItems' or 'withParams', this contains a JSON array of the output parameter values of each invocation |
+| `steps..outputs.parameters` | When the previous step uses `withItems` or `withParams`, this contains a JSON array of the output parameter maps of each invocation |
+| `steps..outputs.parameters.` | Output parameter of any previous step. When the previous step uses `withItems` or `withParams`, this contains a JSON array of the output parameter values of each invocation |
| `steps..outputs.artifacts.` | Output artifact of any previous step |
### DAG Templates
| Variable | Description|
|----------|------------|
+| `tasks.name` | Name of the task |
| `tasks..id` | unique id of container task |
| `tasks..ip` | IP address of a previous daemon container task |
| `tasks..status` | Phase status of any previous task |
| `tasks..exitCode` | Exit code of any previous script or container task |
-| `tasks..startedAt` | Timestamp when the task started |
-| `tasks..finishedAt` | Timestamp when the task finished |
+| `tasks..startedAt` | Time-stamp when the task started |
+| `tasks..finishedAt` | Time-stamp when the task finished |
| `tasks..outputs.result` | Output result of any previous container or script task |
-| `tasks..outputs.parameters` | When the previous task uses 'withItems' or 'withParams', this contains a JSON array of the output parameter maps of each invocation |
-| `tasks..outputs.parameters.` | Output parameter of any previous task. When the previous task uses 'withItems' or 'withParams', this contains a JSON array of the output parameter values of each invocation |
+| `tasks..outputs.parameters` | When the previous task uses `withItems` or `withParams`, this contains a JSON array of the output parameter maps of each invocation |
+| `tasks..outputs.parameters.` | Output parameter of any previous task. When the previous task uses `withItems` or `withParams`, this contains a JSON array of the output parameter values of each invocation |
| `tasks..outputs.artifacts.` | Output artifact of any previous task |
### HTTP Templates
@@ -177,7 +179,7 @@ Only available for `successCondition`
| `response.body` | Response body (`string`) |
| `response.headers` | Response headers (`map[string][]string`) |
-### RetryStrategy
+### `RetryStrategy`
When using the `expression` field within `retryStrategy`, special variables are available.
@@ -194,12 +196,12 @@ Note: These variables evaluate to a string type. If using advanced expressions,
| Variable | Description|
|----------|------------|
| `pod.name` | Pod name of the container/script |
-| `retries` | The retry number of the container/script if retryStrategy is specified |
+| `retries` | The retry number of the container/script if `retryStrategy` is specified |
| `inputs.artifacts..path` | Local path of the input artifact |
| `outputs.artifacts..path` | Local path of the output artifact |
| `outputs.parameters..path` | Local path of the output parameter |
-### Loops (withItems / withParam)
+### Loops (`withItems` / `withParam`)
| Variable | Description|
|----------|------------|
@@ -221,9 +223,9 @@ step.
| `outputs.result` | Output result of the metric-emitting template |
| `resourcesDuration.{cpu,memory}` | Resources duration **in seconds**. Must be one of `resourcesDuration.cpu` or `resourcesDuration.memory`, if available. For more info, see the [Resource Duration](resource-duration.md) doc.|
-### Realtime Metrics
+### Real-Time Metrics
-Some variables can be emitted in realtime (as opposed to just when the step/task completes). To emit these variables in
+Some variables can be emitted in real-time (as opposed to just when the step/task completes). To emit these variables in
real time, set `realtime: true` under `gauge` (note: only Gauge metrics allow for real time variable emission). Metrics
currently available for real time emission:
@@ -249,12 +251,12 @@ For `Template`-level metrics:
| `workflow.outputs.artifacts.` | Global artifact in the workflow |
| `workflow.annotations.` | Workflow annotations |
| `workflow.labels.` | Workflow labels |
-| `workflow.creationTimestamp` | Workflow creation timestamp formatted in RFC 3339 (e.g. `2018-08-23T05:42:49Z`) |
-| `workflow.creationTimestamp.` | Creation timestamp formatted with a [strftime](http://strftime.org) format character. |
-| `workflow.creationTimestamp.RFC3339` | Creation timestamp formatted with in RFC 3339. |
+| `workflow.creationTimestamp` | Workflow creation time-stamp formatted in RFC 3339 (e.g. `2018-08-23T05:42:49Z`) |
+| `workflow.creationTimestamp.` | Creation time-stamp formatted with a [`strftime`](http://strftime.org) format character. |
+| `workflow.creationTimestamp.RFC3339` | Creation time-stamp formatted with in RFC 3339. |
| `workflow.priority` | Workflow priority |
| `workflow.duration` | Workflow duration estimate, may differ from actual duration by a couple of seconds |
-| `workflow.scheduledTime` | Scheduled runtime formatted in RFC 3339 (only available for CronWorkflows) |
+| `workflow.scheduledTime` | Scheduled runtime formatted in RFC 3339 (only available for `CronWorkflow`) |
### Exit Handler
diff --git a/docs/versioning.md b/docs/versioning.md
deleted file mode 100644
index 1689b36d69e6..000000000000
--- a/docs/versioning.md
+++ /dev/null
@@ -1,9 +0,0 @@
-# Versioning
-
-Argo Workflows does not use Semantic Versioning, even though we have not introduced any breaking changes since v2.
-
-Breaking changes will be communicated in the release notes.
-
-See:
-
-* [Public API](public-api.md)
diff --git a/docs/walk-through/argo-cli.md b/docs/walk-through/argo-cli.md
new file mode 100644
index 000000000000..196e5871b18e
--- /dev/null
+++ b/docs/walk-through/argo-cli.md
@@ -0,0 +1,22 @@
+# Argo CLI
+
+In case you want to follow along with this walk-through, here's a quick overview of the most useful argo command line interface (CLI) commands.
+
+```bash
+argo submit hello-world.yaml # submit a workflow spec to Kubernetes
+argo list # list current workflows
+argo get hello-world-xxx # get info about a specific workflow
+argo logs hello-world-xxx # print the logs from a workflow
+argo delete hello-world-xxx # delete workflow
+```
+
+You can also run workflow specs directly using `kubectl` but the Argo CLI provides syntax checking, nicer output, and requires less typing.
+
+```bash
+kubectl create -f hello-world.yaml
+kubectl get wf
+kubectl get wf hello-world-xxx
+kubectl get po --selector=workflows.argoproj.io/workflow=hello-world-xxx --show-all # similar to argo
+kubectl logs hello-world-xxx-yyy -c main
+kubectl delete wf hello-world-xxx
+```
diff --git a/docs/walk-through/artifacts.md b/docs/walk-through/artifacts.md
new file mode 100644
index 000000000000..bf8ee1ebfefc
--- /dev/null
+++ b/docs/walk-through/artifacts.md
@@ -0,0 +1,206 @@
+# Artifacts
+
+**Note:**
+You will need to configure an artifact repository to run this example.
+[Configuring an artifact repository here](https://argoproj.github.io/argo-workflows/configure-artifact-repository/).
+
+When running workflows, it is very common to have steps that generate or consume artifacts. Often, the output artifacts of one step may be used as input artifacts to a subsequent step.
+
+The below workflow spec consists of two steps that run in sequence. The first step named `generate-artifact` will generate an artifact using the `whalesay` template that will be consumed by the second step named `print-message` that then consumes the generated artifact.
+
+```yaml
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ generateName: artifact-passing-
+spec:
+ entrypoint: artifact-example
+ templates:
+ - name: artifact-example
+ steps:
+ - - name: generate-artifact
+ template: whalesay
+ - - name: consume-artifact
+ template: print-message
+ arguments:
+ artifacts:
+ # bind message to the hello-art artifact
+ # generated by the generate-artifact step
+ - name: message
+ from: "{{steps.generate-artifact.outputs.artifacts.hello-art}}"
+
+ - name: whalesay
+ container:
+ image: docker/whalesay:latest
+ command: [sh, -c]
+ args: ["cowsay hello world | tee /tmp/hello_world.txt"]
+ outputs:
+ artifacts:
+ # generate hello-art artifact from /tmp/hello_world.txt
+ # artifacts can be directories as well as files
+ - name: hello-art
+ path: /tmp/hello_world.txt
+
+ - name: print-message
+ inputs:
+ artifacts:
+ # unpack the message input artifact
+ # and put it at /tmp/message
+ - name: message
+ path: /tmp/message
+ container:
+ image: alpine:latest
+ command: [sh, -c]
+ args: ["cat /tmp/message"]
+```
+
+The `whalesay` template uses the `cowsay` command to generate a file named `/tmp/hello-world.txt`. It then `outputs` this file as an artifact named `hello-art`. In general, the artifact's `path` may be a directory rather than just a file. The `print-message` template takes an input artifact named `message`, unpacks it at the `path` named `/tmp/message` and then prints the contents of `/tmp/message` using the `cat` command.
+The `artifact-example` template passes the `hello-art` artifact generated as an output of the `generate-artifact` step as the `message` input artifact to the `print-message` step. DAG templates use the tasks prefix to refer to another task, for example `{{tasks.generate-artifact.outputs.artifacts.hello-art}}`.
+
+Artifacts are packaged as Tarballs and gzipped by default. You may customize this behavior by specifying an archive strategy, using the `archive` field. For example:
+
+```yaml
+<... snipped ...>
+ outputs:
+ artifacts:
+ # default behavior - tar+gzip default compression.
+ - name: hello-art-1
+ path: /tmp/hello_world.txt
+
+ # disable archiving entirely - upload the file / directory as is.
+ # this is useful when the container layout matches the desired target repository layout.
+ - name: hello-art-2
+ path: /tmp/hello_world.txt
+ archive:
+ none: {}
+
+ # customize the compression behavior (disabling it here).
+ # this is useful for files with varying compression benefits,
+ # e.g. disabling compression for a cached build workspace and large binaries,
+ # or increasing compression for "perfect" textual data - like a json/xml export of a large database.
+ - name: hello-art-3
+ path: /tmp/hello_world.txt
+ archive:
+ tar:
+ # no compression (also accepts the standard gzip 1 to 9 values)
+ compressionLevel: 0
+<... snipped ...>
+```
+
+## Artifact Garbage Collection
+
+As of version 3.4 you can configure your Workflow to automatically delete Artifacts that you don't need (presuming you're using S3 - other storage engines still need to be implemented).
+
+Artifacts can be deleted `OnWorkflowCompletion` or `OnWorkflowDeletion`. You can specify your Garbage Collection strategy on both the Workflow level and the Artifact level, so for example, you may have temporary artifacts that can be deleted right away but a final output that should be persisted:
+
+```yaml
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ generateName: artifact-gc-
+spec:
+ entrypoint: main
+ artifactGC:
+ strategy: OnWorkflowDeletion # default Strategy set here applies to all Artifacts by default
+ templates:
+ - name: main
+ container:
+ image: argoproj/argosay:v2
+ command:
+ - sh
+ - -c
+ args:
+ - |
+ echo "can throw this away" > /tmp/temporary-artifact.txt
+ echo "keep this" > /tmp/keep-this.txt
+ outputs:
+ artifacts:
+ - name: temporary-artifact
+ path: /tmp/temporary-artifact.txt
+ s3:
+ key: temporary-artifact.txt
+ - name: keep-this
+ path: /tmp/keep-this.txt
+ s3:
+ key: keep-this.txt
+ artifactGC:
+ strategy: Never # optional override for an Artifact
+```
+
+### Artifact Naming
+
+Consider parameterizing your S3 keys by {{workflow.uid}}, etc (as shown in the example above) if there's a possibility that you could have concurrent Workflows of the same spec. This would be to avoid a scenario in which the artifact from one Workflow is being deleted while the same S3 key is being generated for a different Workflow.
+
+### Service Accounts and Annotations
+
+Does your S3 bucket require you to run with a special Service Account or IAM Role Annotation? You can either use the same ones you use for creating artifacts or generate new ones that are specific for deletion permission. Generally users will probably just have a single Service Account or IAM Role to apply to all artifacts for the Workflow, but you can also customize on the artifact level if you need that:
+
+```yaml
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ generateName: artifact-gc-
+spec:
+ entrypoint: main
+ artifactGC:
+ strategy: OnWorkflowDeletion
+ ##############################################################################################
+ # Workflow Level Service Account and Metadata
+ ##############################################################################################
+ serviceAccountName: my-sa
+ podMetadata:
+ annotations:
+ eks.amazonaws.com/role-arn: arn:aws:iam::111122223333:role/my-iam-role
+ templates:
+ - name: main
+ container:
+ image: argoproj/argosay:v2
+ command:
+ - sh
+ - -c
+ args:
+ - |
+ echo "can throw this away" > /tmp/temporary-artifact.txt
+ echo "keep this" > /tmp/keep-this.txt
+ outputs:
+ artifacts:
+ - name: temporary-artifact
+ path: /tmp/temporary-artifact.txt
+ s3:
+ key: temporary-artifact-{{workflow.uid}}.txt
+ artifactGC:
+ ####################################################################################
+ # Optional override capability
+ ####################################################################################
+ serviceAccountName: artifact-specific-sa
+ podMetadata:
+ annotations:
+ eks.amazonaws.com/role-arn: arn:aws:iam::111122223333:role/artifact-specific-iam-role
+ - name: keep-this
+ path: /tmp/keep-this.txt
+ s3:
+ key: keep-this-{{workflow.uid}}.txt
+ artifactGC:
+ strategy: Never
+```
+
+If you do supply your own Service Account you will need to create a RoleBinding that binds it with the new `artifactgc` Role.
+
+### What happens if Garbage Collection fails?
+
+If deletion of the artifact fails for some reason (other than the Artifact already have been deleted which is not considered a failure), the Workflow's Status will be marked with a new Condition to indicate "Artifact GC Failure", a Kubernetes Event will be issued, and the Argo Server UI will also indicate the failure. In that case, if the user needs to delete the Workflow and its child CRD objects, the user will need to patch the Workflow to remove the finalizer preventing the deletion:
+
+```yaml
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+ finalizers:
+ - workflows.argoproj.io/artifact-gc
+```
+
+The finalizer can be deleted by doing:
+
+```sh
+kubectl patch workflow my-wf \
+ --type json \
+ --patch='[ { "op": "remove", "path": "/metadata/finalizers" } ]'
+```
diff --git a/docs/walk-through/conditionals.md b/docs/walk-through/conditionals.md
new file mode 100644
index 000000000000..b07c04ec1437
--- /dev/null
+++ b/docs/walk-through/conditionals.md
@@ -0,0 +1,78 @@
+# Conditionals
+
+We also support conditional execution. The syntax is implemented by [`govaluate`](https://github.com/Knetic/govaluate) which offers the support for complex syntax. See in the example:
+
+```yaml
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ generateName: coinflip-
+spec:
+ entrypoint: coinflip
+ templates:
+ - name: coinflip
+ steps:
+ # flip a coin
+ - - name: flip-coin
+ template: flip-coin
+ # evaluate the result in parallel
+ - - name: heads
+ template: heads # call heads template if "heads"
+ when: "{{steps.flip-coin.outputs.result}} == heads"
+ - name: tails
+ template: tails # call tails template if "tails"
+ when: "{{steps.flip-coin.outputs.result}} == tails"
+ - - name: flip-again
+ template: flip-coin
+ - - name: complex-condition
+ template: heads-tails-or-twice-tails
+ # call heads template if first flip was "heads" and second was "tails" OR both were "tails"
+ when: >-
+ ( {{steps.flip-coin.outputs.result}} == heads &&
+ {{steps.flip-again.outputs.result}} == tails
+ ) ||
+ ( {{steps.flip-coin.outputs.result}} == tails &&
+ {{steps.flip-again.outputs.result}} == tails )
+ - name: heads-regex
+ template: heads # call heads template if ~ "hea"
+ when: "{{steps.flip-again.outputs.result}} =~ hea"
+ - name: tails-regex
+ template: tails # call heads template if ~ "tai"
+ when: "{{steps.flip-again.outputs.result}} =~ tai"
+
+ # Return heads or tails based on a random number
+ - name: flip-coin
+ script:
+ image: python:alpine3.6
+ command: [python]
+ source: |
+ import random
+ result = "heads" if random.randint(0,1) == 0 else "tails"
+ print(result)
+
+ - name: heads
+ container:
+ image: alpine:3.6
+ command: [sh, -c]
+ args: ["echo \"it was heads\""]
+
+ - name: tails
+ container:
+ image: alpine:3.6
+ command: [sh, -c]
+ args: ["echo \"it was tails\""]
+
+ - name: heads-tails-or-twice-tails
+ container:
+ image: alpine:3.6
+ command: [sh, -c]
+ args: ["echo \"it was heads the first flip and tails the second. Or it was two times tails.\""]
+```
+
+!!! note
+If the parameter value contains quotes, it may invalidate the govaluate expression. To handle parameters with
+quotes, embed an [expr](https://github.com/antonmedv/expr) expression in the conditional. For example:
+
+```yaml
+when: "{{=inputs.parameters['may-contain-quotes'] == 'example'}}"
+```
diff --git a/docs/walk-through/continuous-integration-examples.md b/docs/walk-through/continuous-integration-examples.md
new file mode 100644
index 000000000000..a5ba5c2bba4a
--- /dev/null
+++ b/docs/walk-through/continuous-integration-examples.md
@@ -0,0 +1,15 @@
+# Continuous Integration Examples
+
+Continuous integration is a popular application for workflows.
+
+Some quick examples of CI workflows:
+
+-
+-
+
+And a CI `WorkflowTemplate` example:
+
+-
+
+A more detailed example is , which allows you to
+create a local CI workflow for the purposes of learning.
diff --git a/docs/walk-through/custom-template-variable-reference.md b/docs/walk-through/custom-template-variable-reference.md
new file mode 100644
index 000000000000..0c4d0b2b1337
--- /dev/null
+++ b/docs/walk-through/custom-template-variable-reference.md
@@ -0,0 +1,40 @@
+# Custom Template Variable Reference
+
+In this example, we can see how we can use the other template language variable reference (E.g: Jinja) in Argo workflow template.
+Argo will validate and resolve only the variable that starts with an Argo allowed prefix
+{***"item", "steps", "inputs", "outputs", "workflow", "tasks"***}
+
+```yaml
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ generateName: custom-template-variable-
+spec:
+ entrypoint: hello-hello-hello
+
+ templates:
+ - name: hello-hello-hello
+ steps:
+ - - name: hello1
+ template: whalesay
+ arguments:
+ parameters: [{name: message, value: "hello1"}]
+ - - name: hello2a
+ template: whalesay
+ arguments:
+ parameters: [{name: message, value: "hello2a"}]
+ - name: hello2b
+ template: whalesay
+ arguments:
+ parameters: [{name: message, value: "hello2b"}]
+
+ - name: whalesay
+ inputs:
+ parameters:
+ - name: message
+ container:
+ image: docker/whalesay
+ command: [cowsay]
+ args: ["{{user.username}}"]
+
+```
diff --git a/docs/walk-through/daemon-containers.md b/docs/walk-through/daemon-containers.md
new file mode 100644
index 000000000000..ec147c7ffadd
--- /dev/null
+++ b/docs/walk-through/daemon-containers.md
@@ -0,0 +1,78 @@
+# Daemon Containers
+
+Argo workflows can start containers that run in the background (also known as `daemon containers`) while the workflow itself continues execution. Note that the daemons will be *automatically destroyed* when the workflow exits the template scope in which the daemon was invoked. Daemon containers are useful for starting up services to be tested or to be used in testing (e.g., fixtures). We also find it very useful when running large simulations to spin up a database as a daemon for collecting and organizing the results. The big advantage of daemons compared with sidecars is that their existence can persist across multiple steps or even the entire workflow.
+
+```yaml
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ generateName: daemon-step-
+spec:
+ entrypoint: daemon-example
+ templates:
+ - name: daemon-example
+ steps:
+ - - name: influx
+ template: influxdb # start an influxdb as a daemon (see the influxdb template spec below)
+
+ - - name: init-database # initialize influxdb
+ template: influxdb-client
+ arguments:
+ parameters:
+ - name: cmd
+ value: curl -XPOST 'http://{{steps.influx.ip}}:8086/query' --data-urlencode "q=CREATE DATABASE mydb"
+
+ - - name: producer-1 # add entries to influxdb
+ template: influxdb-client
+ arguments:
+ parameters:
+ - name: cmd
+ value: for i in $(seq 1 20); do curl -XPOST 'http://{{steps.influx.ip}}:8086/write?db=mydb' -d "cpu,host=server01,region=uswest load=$i" ; sleep .5 ; done
+ - name: producer-2 # add entries to influxdb
+ template: influxdb-client
+ arguments:
+ parameters:
+ - name: cmd
+ value: for i in $(seq 1 20); do curl -XPOST 'http://{{steps.influx.ip}}:8086/write?db=mydb' -d "cpu,host=server02,region=uswest load=$((RANDOM % 100))" ; sleep .5 ; done
+ - name: producer-3 # add entries to influxdb
+ template: influxdb-client
+ arguments:
+ parameters:
+ - name: cmd
+ value: curl -XPOST 'http://{{steps.influx.ip}}:8086/write?db=mydb' -d 'cpu,host=server03,region=useast load=15.4'
+
+ - - name: consumer # consume intries from influxdb
+ template: influxdb-client
+ arguments:
+ parameters:
+ - name: cmd
+ value: curl --silent -G http://{{steps.influx.ip}}:8086/query?pretty=true --data-urlencode "db=mydb" --data-urlencode "q=SELECT * FROM cpu"
+
+ - name: influxdb
+ daemon: true # start influxdb as a daemon
+ retryStrategy:
+ limit: 10 # retry container if it fails
+ container:
+ image: influxdb:1.2
+ command:
+ - influxd
+ readinessProbe: # wait for readinessProbe to succeed
+ httpGet:
+ path: /ping
+ port: 8086
+
+ - name: influxdb-client
+ inputs:
+ parameters:
+ - name: cmd
+ container:
+ image: appropriate/curl:latest
+ command: ["/bin/sh", "-c"]
+ args: ["{{inputs.parameters.cmd}}"]
+ resources:
+ requests:
+ memory: 32Mi
+ cpu: 100m
+```
+
+Step templates use the `steps` prefix to refer to another step: for example `{{steps.influx.ip}}`. In DAG templates, the `tasks` prefix is used instead: for example `{{tasks.influx.ip}}`.
diff --git a/docs/walk-through/dag.md b/docs/walk-through/dag.md
new file mode 100644
index 000000000000..79f26aab4e60
--- /dev/null
+++ b/docs/walk-through/dag.md
@@ -0,0 +1,49 @@
+# DAG
+
+As an alternative to specifying sequences of steps, you can define the workflow as a directed-acyclic graph (DAG) by specifying the dependencies of each task. This can be simpler to maintain for complex workflows and allows for maximum parallelism when running tasks.
+
+In the following workflow, step `A` runs first, as it has no dependencies. Once `A` has finished, steps `B` and `C` run in parallel. Finally, once `B` and `C` have completed, step `D` can run.
+
+```yaml
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ generateName: dag-diamond-
+spec:
+ entrypoint: diamond
+ templates:
+ - name: echo
+ inputs:
+ parameters:
+ - name: message
+ container:
+ image: alpine:3.7
+ command: [echo, "{{inputs.parameters.message}}"]
+ - name: diamond
+ dag:
+ tasks:
+ - name: A
+ template: echo
+ arguments:
+ parameters: [{name: message, value: A}]
+ - name: B
+ dependencies: [A]
+ template: echo
+ arguments:
+ parameters: [{name: message, value: B}]
+ - name: C
+ dependencies: [A]
+ template: echo
+ arguments:
+ parameters: [{name: message, value: C}]
+ - name: D
+ dependencies: [B, C]
+ template: echo
+ arguments:
+ parameters: [{name: message, value: D}]
+```
+
+The dependency graph may have [multiple roots](https://github.com/argoproj/argo-workflows/tree/master/examples/dag-multiroot.yaml). The templates called from a DAG or steps template can themselves be DAG or steps templates. This can allow for complex workflows to be split into manageable pieces.
+
+The DAG logic has a built-in `fail fast` feature to stop scheduling new steps, as soon as it detects that one of the DAG nodes is failed. Then it waits until all DAG nodes are completed before failing the DAG itself.
+The [FailFast](https://github.com/argoproj/argo-workflows/tree/master/examples/dag-disable-failFast.yaml) flag default is `true`, if set to `false`, it will allow a DAG to run all branches of the DAG to completion (either success or failure), regardless of the failed outcomes of branches in the DAG. More info and example about this feature at [here](https://github.com/argoproj/argo-workflows/issues/1442).
diff --git a/docs/walk-through/docker-in-docker-using-sidecars.md b/docs/walk-through/docker-in-docker-using-sidecars.md
new file mode 100644
index 000000000000..5ac9e41987fa
--- /dev/null
+++ b/docs/walk-through/docker-in-docker-using-sidecars.md
@@ -0,0 +1,35 @@
+# Docker-in-Docker Using Sidecars
+
+An application of sidecars is to implement Docker-in-Docker (DIND). DIND is useful when you want to run Docker commands from inside a container. For example, you may want to build and push a container image from inside your build container. In the following example, we use the `docker:dind` image to run a Docker daemon in a sidecar and give the main container access to the daemon.
+
+```yaml
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ generateName: sidecar-dind-
+spec:
+ entrypoint: dind-sidecar-example
+ templates:
+ - name: dind-sidecar-example
+ container:
+ image: docker:19.03.13
+ command: [sh, -c]
+ args: ["until docker ps; do sleep 3; done; docker run --rm debian:latest cat /etc/os-release"]
+ env:
+ - name: DOCKER_HOST # the docker daemon can be access on the standard port on localhost
+ value: 127.0.0.1
+ sidecars:
+ - name: dind
+ image: docker:19.03.13-dind # Docker already provides an image for running a Docker daemon
+ command: [dockerd-entrypoint.sh]
+ env:
+ - name: DOCKER_TLS_CERTDIR # Docker TLS env config
+ value: ""
+ securityContext:
+ privileged: true # the Docker daemon can only run in a privileged container
+ # mirrorVolumeMounts will mount the same volumes specified in the main container
+ # to the sidecar (including artifacts), at the same mountPaths. This enables
+ # dind daemon to (partially) see the same filesystem as the main container in
+ # order to use features such as docker volume binding.
+ mirrorVolumeMounts: true
+```
diff --git a/docs/walk-through/exit-handlers.md b/docs/walk-through/exit-handlers.md
new file mode 100644
index 000000000000..51bf9e3ff720
--- /dev/null
+++ b/docs/walk-through/exit-handlers.md
@@ -0,0 +1,57 @@
+# Exit handlers
+
+An exit handler is a template that *always* executes, irrespective of success or failure, at the end of the workflow.
+
+Some common use cases of exit handlers are:
+
+- cleaning up after a workflow runs
+- sending notifications of workflow status (e.g., e-mail/Slack)
+- posting the pass/fail status to a web-hook result (e.g. GitHub build result)
+- resubmitting or submitting another workflow
+
+```yaml
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ generateName: exit-handlers-
+spec:
+ entrypoint: intentional-fail
+ onExit: exit-handler # invoke exit-handler template at end of the workflow
+ templates:
+ # primary workflow template
+ - name: intentional-fail
+ container:
+ image: alpine:latest
+ command: [sh, -c]
+ args: ["echo intentional failure; exit 1"]
+
+ # Exit handler templates
+ # After the completion of the entrypoint template, the status of the
+ # workflow is made available in the global variable {{workflow.status}}.
+ # {{workflow.status}} will be one of: Succeeded, Failed, Error
+ - name: exit-handler
+ steps:
+ - - name: notify
+ template: send-email
+ - name: celebrate
+ template: celebrate
+ when: "{{workflow.status}} == Succeeded"
+ - name: cry
+ template: cry
+ when: "{{workflow.status}} != Succeeded"
+ - name: send-email
+ container:
+ image: alpine:latest
+ command: [sh, -c]
+ args: ["echo send e-mail: {{workflow.name}} {{workflow.status}} {{workflow.duration}}"]
+ - name: celebrate
+ container:
+ image: alpine:latest
+ command: [sh, -c]
+ args: ["echo hooray!"]
+ - name: cry
+ container:
+ image: alpine:latest
+ command: [sh, -c]
+ args: ["echo boohoo!"]
+```
diff --git a/docs/walk-through/hardwired-artifacts.md b/docs/walk-through/hardwired-artifacts.md
new file mode 100644
index 000000000000..f973494ac49c
--- /dev/null
+++ b/docs/walk-through/hardwired-artifacts.md
@@ -0,0 +1,46 @@
+# Hardwired Artifacts
+
+With Argo, you can use any container image that you like to generate any kind of artifact. In practice, however, we find certain types of artifacts are very common, so there is built-in support for git, HTTP, GCS and S3 artifacts.
+
+```yaml
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ generateName: hardwired-artifact-
+spec:
+ entrypoint: hardwired-artifact
+ templates:
+ - name: hardwired-artifact
+ inputs:
+ artifacts:
+ # Check out the master branch of the argo repo and place it at /src
+ # revision can be anything that git checkout accepts: branch, commit, tag, etc.
+ - name: argo-source
+ path: /src
+ git:
+ repo: https://github.com/argoproj/argo-workflows.git
+ revision: "master"
+ # Download kubectl 1.8.0 and place it at /bin/kubectl
+ - name: kubectl
+ path: /bin/kubectl
+ mode: 0755
+ http:
+ url: https://storage.googleapis.com/kubernetes-release/release/v1.8.0/bin/linux/amd64/kubectl
+ # Copy an s3 compatible artifact repository bucket (such as AWS, GCS and MinIO) and place it at /s3
+ - name: objects
+ path: /s3
+ s3:
+ endpoint: storage.googleapis.com
+ bucket: my-bucket-name
+ key: path/in/bucket
+ accessKeySecret:
+ name: my-s3-credentials
+ key: accessKey
+ secretKeySecret:
+ name: my-s3-credentials
+ key: secretKey
+ container:
+ image: debian
+ command: [sh, -c]
+ args: ["ls -l /src /bin/kubectl /s3"]
+```
diff --git a/docs/walk-through/hello-world.md b/docs/walk-through/hello-world.md
new file mode 100644
index 000000000000..beef6f120e53
--- /dev/null
+++ b/docs/walk-through/hello-world.md
@@ -0,0 +1,56 @@
+# Hello World
+
+Let's start by creating a very simple workflow template to echo "hello world" using the `docker/whalesay` container
+image from Docker Hub.
+
+You can run this directly from your shell with a simple docker command:
+
+```bash
+$ docker run docker/whalesay cowsay "hello world"
+ _____________
+< hello world >
+ -------------
+ \
+ \
+ \
+ ## .
+ ## ## ## ==
+ ## ## ## ## ===
+ /""""""""""""""""___/ ===
+ ~~~ {~~ ~~~~ ~~~ ~~~~ ~~ ~ / ===- ~~~
+ \______ o __/
+ \ \ __/
+ \____\______/
+
+
+Hello from Docker!
+This message shows that your installation appears to be working correctly.
+```
+
+Below, we run the same container on a Kubernetes cluster using an Argo workflow template. Be sure to read the comments
+as they provide useful explanations.
+
+```yaml
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow # new type of k8s spec
+metadata:
+ generateName: hello-world- # name of the workflow spec
+spec:
+ entrypoint: whalesay # invoke the whalesay template
+ templates:
+ - name: whalesay # name of the template
+ container:
+ image: docker/whalesay
+ command: [ cowsay ]
+ args: [ "hello world" ]
+ resources: # limit the resources
+ limits:
+ memory: 32Mi
+ cpu: 100m
+```
+
+Argo adds a new `kind` of Kubernetes spec called a `Workflow`. The above spec contains a single `template`
+called `whalesay` which runs the `docker/whalesay` container and invokes `cowsay "hello world"`. The `whalesay` template
+is the `entrypoint` for the spec. The entrypoint specifies the initial template that should be invoked when the workflow
+spec is executed by Kubernetes. Being able to specify the entrypoint is more useful when there is more than one template
+defined in the Kubernetes workflow spec. :-)
diff --git a/docs/walk-through/index.md b/docs/walk-through/index.md
new file mode 100644
index 000000000000..f9d4f5ac4a59
--- /dev/null
+++ b/docs/walk-through/index.md
@@ -0,0 +1,16 @@
+# About
+
+Argo is implemented as a Kubernetes CRD (Custom Resource Definition). As a result, Argo workflows can be managed
+using `kubectl` and natively integrates with other Kubernetes services such as volumes, secrets, and RBAC. The new Argo
+software is light-weight and installs in under a minute, and provides complete workflow features including parameter
+substitution, artifacts, fixtures, loops and recursive workflows.
+
+Dozens of examples are available in
+the [examples directory](https://github.com/argoproj/argo-workflows/tree/master/examples) on GitHub.
+
+For a complete description of the Argo workflow spec, please refer
+to [the spec documentation](../fields.md#workflowspec).
+
+Progress through these examples in sequence to learn all the basics.
+
+Start with [Argo CLI](argo-cli.md).
diff --git a/docs/walk-through/kubernetes-resources.md b/docs/walk-through/kubernetes-resources.md
new file mode 100644
index 000000000000..0e65e1220068
--- /dev/null
+++ b/docs/walk-through/kubernetes-resources.md
@@ -0,0 +1,84 @@
+# Kubernetes Resources
+
+In many cases, you will want to manage Kubernetes resources from Argo workflows. The resource template allows you to create, delete or updated any type of Kubernetes resource.
+
+```yaml
+# in a workflow. The resource template type accepts any k8s manifest
+# (including CRDs) and can perform any `kubectl` action against it (e.g. create,
+# apply, delete, patch).
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ generateName: k8s-jobs-
+spec:
+ entrypoint: pi-tmpl
+ templates:
+ - name: pi-tmpl
+ resource: # indicates that this is a resource template
+ action: create # can be any kubectl action (e.g. create, delete, apply, patch)
+ # The successCondition and failureCondition are optional expressions.
+ # If failureCondition is true, the step is considered failed.
+ # If successCondition is true, the step is considered successful.
+ # They use kubernetes label selection syntax and can be applied against any field
+ # of the resource (not just labels). Multiple AND conditions can be represented by comma
+ # delimited expressions.
+ # For more details: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
+ successCondition: status.succeeded > 0
+ failureCondition: status.failed > 3
+ manifest: | #put your kubernetes spec here
+ apiVersion: batch/v1
+ kind: Job
+ metadata:
+ generateName: pi-job-
+ spec:
+ template:
+ metadata:
+ name: pi
+ spec:
+ containers:
+ - name: pi
+ image: perl
+ command: ["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"]
+ restartPolicy: Never
+ backoffLimit: 4
+```
+
+**Note:**
+Currently only a single resource can be managed by a resource template so either a `generateName` or `name` must be provided in the resource's meta-data.
+
+Resources created in this way are independent of the workflow. If you want the resource to be deleted when the workflow is deleted then you can use [Kubernetes garbage collection](https://kubernetes.io/docs/concepts/workloads/controllers/garbage-collection/) with the workflow resource as an owner reference ([example](https://github.com/argoproj/argo-workflows/tree/master/examples/k8s-owner-reference.yaml)).
+
+You can also collect data about the resource in output parameters (see more at [k8s-jobs.yaml](https://github.com/argoproj/argo-workflows/tree/master/examples/k8s-jobs.yaml))
+
+**Note:**
+When patching, the resource will accept another attribute, `mergeStrategy`, which can either be `strategic`, `merge`, or `json`. If this attribute is not supplied, it will default to `strategic`. Keep in mind that Custom Resources cannot be patched with `strategic`, so a different strategy must be chosen. For example, suppose you have the [`CronTab` CRD](https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/#create-a-customresourcedefinition) defined, and the following instance of a `CronTab`:
+
+```yaml
+apiVersion: "stable.example.com/v1"
+kind: CronTab
+spec:
+ cronSpec: "* * * * */5"
+ image: my-awesome-cron-image
+```
+
+This `CronTab` can be modified using the following Argo Workflow:
+
+```yaml
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ generateName: k8s-patch-
+spec:
+ entrypoint: cront-tmpl
+ templates:
+ - name: cront-tmpl
+ resource:
+ action: patch
+ mergeStrategy: merge # Must be one of [strategic merge json]
+ manifest: |
+ apiVersion: "stable.example.com/v1"
+ kind: CronTab
+ spec:
+ cronSpec: "* * * * */10"
+ image: my-awesome-cron-image
+```
diff --git a/docs/walk-through/loops.md b/docs/walk-through/loops.md
new file mode 100644
index 000000000000..6c04a702aead
--- /dev/null
+++ b/docs/walk-through/loops.md
@@ -0,0 +1,161 @@
+# Loops
+
+When writing workflows, it is often very useful to be able to iterate over a set of inputs as shown in this example:
+
+```yaml
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ generateName: loops-
+spec:
+ entrypoint: loop-example
+ templates:
+ - name: loop-example
+ steps:
+ - - name: print-message
+ template: whalesay
+ arguments:
+ parameters:
+ - name: message
+ value: "{{item}}"
+ withItems: # invoke whalesay once for each item in parallel
+ - hello world # item 1
+ - goodbye world # item 2
+
+ - name: whalesay
+ inputs:
+ parameters:
+ - name: message
+ container:
+ image: docker/whalesay:latest
+ command: [cowsay]
+ args: ["{{inputs.parameters.message}}"]
+```
+
+We can also iterate over sets of items:
+
+```yaml
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ generateName: loops-maps-
+spec:
+ entrypoint: loop-map-example
+ templates:
+ - name: loop-map-example
+ steps:
+ - - name: test-linux
+ template: cat-os-release
+ arguments:
+ parameters:
+ - name: image
+ value: "{{item.image}}"
+ - name: tag
+ value: "{{item.tag}}"
+ withItems:
+ - { image: 'debian', tag: '9.1' } #item set 1
+ - { image: 'debian', tag: '8.9' } #item set 2
+ - { image: 'alpine', tag: '3.6' } #item set 3
+ - { image: 'ubuntu', tag: '17.10' } #item set 4
+
+ - name: cat-os-release
+ inputs:
+ parameters:
+ - name: image
+ - name: tag
+ container:
+ image: "{{inputs.parameters.image}}:{{inputs.parameters.tag}}"
+ command: [cat]
+ args: [/etc/os-release]
+```
+
+We can pass lists of items as parameters:
+
+```yaml
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ generateName: loops-param-arg-
+spec:
+ entrypoint: loop-param-arg-example
+ arguments:
+ parameters:
+ - name: os-list # a list of items
+ value: |
+ [
+ { "image": "debian", "tag": "9.1" },
+ { "image": "debian", "tag": "8.9" },
+ { "image": "alpine", "tag": "3.6" },
+ { "image": "ubuntu", "tag": "17.10" }
+ ]
+
+ templates:
+ - name: loop-param-arg-example
+ inputs:
+ parameters:
+ - name: os-list
+ steps:
+ - - name: test-linux
+ template: cat-os-release
+ arguments:
+ parameters:
+ - name: image
+ value: "{{item.image}}"
+ - name: tag
+ value: "{{item.tag}}"
+ withParam: "{{inputs.parameters.os-list}}" # parameter specifies the list to iterate over
+
+ # This template is the same as in the previous example
+ - name: cat-os-release
+ inputs:
+ parameters:
+ - name: image
+ - name: tag
+ container:
+ image: "{{inputs.parameters.image}}:{{inputs.parameters.tag}}"
+ command: [cat]
+ args: [/etc/os-release]
+```
+
+We can even dynamically generate the list of items to iterate over!
+
+```yaml
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ generateName: loops-param-result-
+spec:
+ entrypoint: loop-param-result-example
+ templates:
+ - name: loop-param-result-example
+ steps:
+ - - name: generate
+ template: gen-number-list
+ # Iterate over the list of numbers generated by the generate step above
+ - - name: sleep
+ template: sleep-n-sec
+ arguments:
+ parameters:
+ - name: seconds
+ value: "{{item}}"
+ withParam: "{{steps.generate.outputs.result}}"
+
+ # Generate a list of numbers in JSON format
+ - name: gen-number-list
+ script:
+ image: python:alpine3.6
+ command: [python]
+ source: |
+ import json
+ import sys
+ json.dump([i for i in range(20, 31)], sys.stdout)
+
+ - name: sleep-n-sec
+ inputs:
+ parameters:
+ - name: seconds
+ container:
+ image: alpine:latest
+ command: [sh, -c]
+ args: ["echo sleeping for {{inputs.parameters.seconds}} seconds; sleep {{inputs.parameters.seconds}}; echo done"]
+```
diff --git a/docs/walk-through/output-parameters.md b/docs/walk-through/output-parameters.md
new file mode 100644
index 000000000000..3515dbd46d4c
--- /dev/null
+++ b/docs/walk-through/output-parameters.md
@@ -0,0 +1,61 @@
+# Output Parameters
+
+Output parameters provide a general mechanism to use the result of a step as a parameter (and not just as an artifact). This allows you to use the result from any type of step, not just a `script`, for conditional tests, loops, and arguments. Output parameters work similarly to `script result` except that the value of the output parameter is set to the contents of a generated file rather than the contents of `stdout`.
+
+```yaml
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ generateName: output-parameter-
+spec:
+ entrypoint: output-parameter
+ templates:
+ - name: output-parameter
+ steps:
+ - - name: generate-parameter
+ template: whalesay
+ - - name: consume-parameter
+ template: print-message
+ arguments:
+ parameters:
+ # Pass the hello-param output from the generate-parameter step as the message input to print-message
+ - name: message
+ value: "{{steps.generate-parameter.outputs.parameters.hello-param}}"
+
+ - name: whalesay
+ container:
+ image: docker/whalesay:latest
+ command: [sh, -c]
+ args: ["echo -n hello world > /tmp/hello_world.txt"] # generate the content of hello_world.txt
+ outputs:
+ parameters:
+ - name: hello-param # name of output parameter
+ valueFrom:
+ path: /tmp/hello_world.txt # set the value of hello-param to the contents of this hello-world.txt
+
+ - name: print-message
+ inputs:
+ parameters:
+ - name: message
+ container:
+ image: docker/whalesay:latest
+ command: [cowsay]
+ args: ["{{inputs.parameters.message}}"]
+```
+
+DAG templates use the tasks prefix to refer to another task, for example `{{tasks.generate-parameter.outputs.parameters.hello-param}}`.
+
+## `result` output parameter
+
+The `result` output parameter captures standard output.
+It is accessible from the `outputs` map: `outputs.result`.
+Only 256 kb of the standard output stream will be captured.
+
+### Scripts
+
+Outputs of a `script` are assigned to standard output and captured in the `result` parameter. More details [here](scripts-and-results.md).
+
+### Containers
+
+Container steps and tasks also have their standard output captured in the `result` parameter.
+Given a `task`, called `log-int`, `result` would then be accessible as `{{ tasks.log-int.outputs.result }}`. If using [steps](steps.md), substitute `tasks` for `steps`: `{{ steps.log-int.outputs.result }}`.
diff --git a/docs/walk-through/parameters.md b/docs/walk-through/parameters.md
new file mode 100644
index 000000000000..0d835ca8adfd
--- /dev/null
+++ b/docs/walk-through/parameters.md
@@ -0,0 +1,91 @@
+# Parameters
+
+Let's look at a slightly more complex workflow spec with parameters.
+
+```yaml
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ generateName: hello-world-parameters-
+spec:
+ # invoke the whalesay template with
+ # "hello world" as the argument
+ # to the message parameter
+ entrypoint: whalesay
+ arguments:
+ parameters:
+ - name: message
+ value: hello world
+
+ templates:
+ - name: whalesay
+ inputs:
+ parameters:
+ - name: message # parameter declaration
+ container:
+ # run cowsay with that message input parameter as args
+ image: docker/whalesay
+ command: [cowsay]
+ args: ["{{inputs.parameters.message}}"]
+```
+
+This time, the `whalesay` template takes an input parameter named `message` that is passed as the `args` to the `cowsay` command. In order to reference parameters (e.g., ``"{{inputs.parameters.message}}"``), the parameters must be enclosed in double quotes to escape the curly braces in YAML.
+
+The argo CLI provides a convenient way to override parameters used to invoke the entrypoint. For example, the following command would bind the `message` parameter to "goodbye world" instead of the default "hello world".
+
+```bash
+argo submit arguments-parameters.yaml -p message="goodbye world"
+```
+
+In case of multiple parameters that can be overridden, the argo CLI provides a command to load parameters files in YAML or JSON format. Here is an example of that kind of parameter file:
+
+```yaml
+message: goodbye world
+```
+
+To run use following command:
+
+```bash
+argo submit arguments-parameters.yaml --parameter-file params.yaml
+```
+
+Command-line parameters can also be used to override the default entrypoint and invoke any template in the workflow spec. For example, if you add a new version of the `whalesay` template called `whalesay-caps` but you don't want to change the default entrypoint, you can invoke this from the command line as follows:
+
+```bash
+argo submit arguments-parameters.yaml --entrypoint whalesay-caps
+```
+
+By using a combination of the `--entrypoint` and `-p` parameters, you can call any template in the workflow spec with any parameter that you like.
+
+The values set in the `spec.arguments.parameters` are globally scoped and can be accessed via `{{workflow.parameters.parameter_name}}`. This can be useful to pass information to multiple steps in a workflow. For example, if you wanted to run your workflows with different logging levels that are set in the environment of each container, you could have a YAML file similar to this one:
+
+```yaml
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ generateName: global-parameters-
+spec:
+ entrypoint: A
+ arguments:
+ parameters:
+ - name: log-level
+ value: INFO
+
+ templates:
+ - name: A
+ container:
+ image: containerA
+ env:
+ - name: LOG_LEVEL
+ value: "{{workflow.parameters.log-level}}"
+ command: [runA]
+ - name: B
+ container:
+ image: containerB
+ env:
+ - name: LOG_LEVEL
+ value: "{{workflow.parameters.log-level}}"
+ command: [runB]
+```
+
+In this workflow, both steps `A` and `B` would have the same log-level set to `INFO` and can easily be changed between workflow submissions using the `-p` flag.
diff --git a/docs/walk-through/recursion.md b/docs/walk-through/recursion.md
new file mode 100644
index 000000000000..759bd5f137ac
--- /dev/null
+++ b/docs/walk-through/recursion.md
@@ -0,0 +1,69 @@
+# Recursion
+
+Templates can recursively invoke each other! In this variation of the above coin-flip template, we continue to flip coins until it comes up heads.
+
+```yaml
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ generateName: coinflip-recursive-
+spec:
+ entrypoint: coinflip
+ templates:
+ - name: coinflip
+ steps:
+ # flip a coin
+ - - name: flip-coin
+ template: flip-coin
+ # evaluate the result in parallel
+ - - name: heads
+ template: heads # call heads template if "heads"
+ when: "{{steps.flip-coin.outputs.result}} == heads"
+ - name: tails # keep flipping coins if "tails"
+ template: coinflip
+ when: "{{steps.flip-coin.outputs.result}} == tails"
+
+ - name: flip-coin
+ script:
+ image: python:alpine3.6
+ command: [python]
+ source: |
+ import random
+ result = "heads" if random.randint(0,1) == 0 else "tails"
+ print(result)
+
+ - name: heads
+ container:
+ image: alpine:3.6
+ command: [sh, -c]
+ args: ["echo \"it was heads\""]
+```
+
+Here's the result of a couple of runs of coin-flip for comparison.
+
+```bash
+argo get coinflip-recursive-tzcb5
+
+STEP PODNAME MESSAGE
+ ✔ coinflip-recursive-vhph5
+ ├───✔ flip-coin coinflip-recursive-vhph5-2123890397
+ └─┬─✔ heads coinflip-recursive-vhph5-128690560
+ └─○ tails
+
+STEP PODNAME MESSAGE
+ ✔ coinflip-recursive-tzcb5
+ ├───✔ flip-coin coinflip-recursive-tzcb5-322836820
+ └─┬─○ heads
+ └─✔ tails
+ ├───✔ flip-coin coinflip-recursive-tzcb5-1863890320
+ └─┬─○ heads
+ └─✔ tails
+ ├───✔ flip-coin coinflip-recursive-tzcb5-1768147140
+ └─┬─○ heads
+ └─✔ tails
+ ├───✔ flip-coin coinflip-recursive-tzcb5-4080411136
+ └─┬─✔ heads coinflip-recursive-tzcb5-4080323273
+ └─○ tails
+```
+
+In the first run, the coin immediately comes up heads and we stop. In the second run, the coin comes up tail three times before it finally comes up heads and we stop.
diff --git a/docs/walk-through/retrying-failed-or-errored-steps.md b/docs/walk-through/retrying-failed-or-errored-steps.md
new file mode 100644
index 000000000000..42795b3cf348
--- /dev/null
+++ b/docs/walk-through/retrying-failed-or-errored-steps.md
@@ -0,0 +1,36 @@
+# Retrying Failed or Errored Steps
+
+You can specify a `retryStrategy` that will dictate how failed or errored steps are retried:
+
+```yaml
+# This example demonstrates the use of retry back offs
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ generateName: retry-backoff-
+spec:
+ entrypoint: retry-backoff
+ templates:
+ - name: retry-backoff
+ retryStrategy:
+ limit: 10
+ retryPolicy: "Always"
+ backoff:
+ duration: "1" # Must be a string. Default unit is seconds. Could also be a Duration, e.g.: "2m", "6h", "1d"
+ factor: 2
+ maxDuration: "1m" # Must be a string. Default unit is seconds. Could also be a Duration, e.g.: "2m", "6h", "1d"
+ affinity:
+ nodeAntiAffinity: {}
+ container:
+ image: python:alpine3.6
+ command: ["python", -c]
+ # fail with a 66% probability
+ args: ["import random; import sys; exit_code = random.choice([0, 1, 1]); sys.exit(exit_code)"]
+```
+
+* `limit` is the maximum number of times the container will be retried.
+* `retryPolicy` specifies if a container will be retried on failure, error, both, or only transient errors (e.g. i/o or TLS handshake timeout). "Always" retries on both errors and failures. Also available: `OnFailure` (default), "`OnError`", and "`OnTransientError`" (available after v3.0.0-rc2).
+* `backoff` is an exponential back-off
+* `nodeAntiAffinity` prevents running steps on the same host. Current implementation allows only empty `nodeAntiAffinity` (i.e. `nodeAntiAffinity: {}`) and by default it uses label `kubernetes.io/hostname` as the selector.
+
+Providing an empty `retryStrategy` (i.e. `retryStrategy: {}`) will cause a container to retry until completion.
diff --git a/docs/walk-through/scripts-and-results.md b/docs/walk-through/scripts-and-results.md
new file mode 100644
index 000000000000..07cb9dc2fea6
--- /dev/null
+++ b/docs/walk-through/scripts-and-results.md
@@ -0,0 +1,60 @@
+# Scripts And Results
+
+Often, we just want a template that executes a script specified as a here-script (also known as a `here document`) in the workflow spec. This example shows how to do that:
+
+```yaml
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ generateName: scripts-bash-
+spec:
+ entrypoint: bash-script-example
+ templates:
+ - name: bash-script-example
+ steps:
+ - - name: generate
+ template: gen-random-int-bash
+ - - name: print
+ template: print-message
+ arguments:
+ parameters:
+ - name: message
+ value: "{{steps.generate.outputs.result}}" # The result of the here-script
+
+ - name: gen-random-int-bash
+ script:
+ image: debian:9.4
+ command: [bash]
+ source: | # Contents of the here-script
+ cat /dev/urandom | od -N2 -An -i | awk -v f=1 -v r=100 '{printf "%i\n", f + r * $1 / 65536}'
+
+ - name: gen-random-int-python
+ script:
+ image: python:alpine3.6
+ command: [python]
+ source: |
+ import random
+ i = random.randint(1, 100)
+ print(i)
+
+ - name: gen-random-int-javascript
+ script:
+ image: node:9.1-alpine
+ command: [node]
+ source: |
+ var rand = Math.floor(Math.random() * 100);
+ console.log(rand);
+
+ - name: print-message
+ inputs:
+ parameters:
+ - name: message
+ container:
+ image: alpine:latest
+ command: [sh, -c]
+ args: ["echo result was: {{inputs.parameters.message}}"]
+```
+
+The `script` keyword allows the specification of the script body using the `source` tag. This creates a temporary file containing the script body and then passes the name of the temporary file as the final parameter to `command`, which should be an interpreter that executes the script body.
+
+The use of the `script` feature also assigns the standard output of running the script to a special output parameter named `result`. This allows you to use the result of running the script itself in the rest of the workflow spec. In this example, the result is simply echoed by the print-message template.
diff --git a/docs/walk-through/secrets.md b/docs/walk-through/secrets.md
new file mode 100644
index 000000000000..e5f67e29a79e
--- /dev/null
+++ b/docs/walk-through/secrets.md
@@ -0,0 +1,40 @@
+# Secrets
+
+Argo supports the same secrets syntax and mechanisms as Kubernetes Pod specs, which allows access to secrets as environment variables or volume mounts. See the [Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/secret/) for more information.
+
+```yaml
+# To run this example, first create the secret by running:
+# kubectl create secret generic my-secret --from-literal=mypassword=S00perS3cretPa55word
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ generateName: secret-example-
+spec:
+ entrypoint: whalesay
+ # To access secrets as files, add a volume entry in spec.volumes[] and
+ # then in the container template spec, add a mount using volumeMounts.
+ volumes:
+ - name: my-secret-vol
+ secret:
+ secretName: my-secret # name of an existing k8s secret
+ templates:
+ - name: whalesay
+ container:
+ image: alpine:3.7
+ command: [sh, -c]
+ args: ['
+ echo "secret from env: $MYSECRETPASSWORD";
+ echo "secret from file: `cat /secret/mountpath/mypassword`"
+ ']
+ # To access secrets as environment variables, use the k8s valueFrom and
+ # secretKeyRef constructs.
+ env:
+ - name: MYSECRETPASSWORD # name of env var
+ valueFrom:
+ secretKeyRef:
+ name: my-secret # name of an existing k8s secret
+ key: mypassword # 'key' subcomponent of the secret
+ volumeMounts:
+ - name: my-secret-vol # mount file containing secret at /secret/mountpath
+ mountPath: "/secret/mountpath"
+```
diff --git a/docs/walk-through/sidecars.md b/docs/walk-through/sidecars.md
new file mode 100644
index 000000000000..feebff026966
--- /dev/null
+++ b/docs/walk-through/sidecars.md
@@ -0,0 +1,26 @@
+# Sidecars
+
+A sidecar is another container that executes concurrently in the same pod as the main container and is useful in creating multi-container pods.
+
+```yaml
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ generateName: sidecar-nginx-
+spec:
+ entrypoint: sidecar-nginx-example
+ templates:
+ - name: sidecar-nginx-example
+ container:
+ image: appropriate/curl
+ command: [sh, -c]
+ # Try to read from nginx web server until it comes up
+ args: ["until `curl -G 'http://127.0.0.1/' >& /tmp/out`; do echo sleep && sleep 1; done && cat /tmp/out"]
+ # Create a simple nginx web server
+ sidecars:
+ - name: nginx
+ image: nginx:1.13
+ command: [nginx, -g, daemon off;]
+```
+
+In the above example, we create a sidecar container that runs Nginx as a simple web server. The order in which containers come up is random, so in this example the main container polls the Nginx container until it is ready to service requests. This is a good design pattern when designing multi-container systems: always wait for any services you need to come up before running your main code.
diff --git a/docs/walk-through/steps.md b/docs/walk-through/steps.md
new file mode 100644
index 000000000000..1815c794fb71
--- /dev/null
+++ b/docs/walk-through/steps.md
@@ -0,0 +1,57 @@
+# Steps
+
+In this example, we'll see how to create multi-step workflows, how to define more than one template in a workflow spec, and how to create nested workflows. Be sure to read the comments as they provide useful explanations.
+
+```yaml
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ generateName: steps-
+spec:
+ entrypoint: hello-hello-hello
+
+ # This spec contains two templates: hello-hello-hello and whalesay
+ templates:
+ - name: hello-hello-hello
+ # Instead of just running a container
+ # This template has a sequence of steps
+ steps:
+ - - name: hello1 # hello1 is run before the following steps
+ template: whalesay
+ arguments:
+ parameters:
+ - name: message
+ value: "hello1"
+ - - name: hello2a # double dash => run after previous step
+ template: whalesay
+ arguments:
+ parameters:
+ - name: message
+ value: "hello2a"
+ - name: hello2b # single dash => run in parallel with previous step
+ template: whalesay
+ arguments:
+ parameters:
+ - name: message
+ value: "hello2b"
+
+ # This is the same template as from the previous example
+ - name: whalesay
+ inputs:
+ parameters:
+ - name: message
+ container:
+ image: docker/whalesay
+ command: [cowsay]
+ args: ["{{inputs.parameters.message}}"]
+```
+
+The above workflow spec prints three different flavors of "hello". The `hello-hello-hello` template consists of three `steps`. The first step named `hello1` will be run in sequence whereas the next two steps named `hello2a` and `hello2b` will be run in parallel with each other. Using the argo CLI command, we can graphically display the execution history of this workflow spec, which shows that the steps named `hello2a` and `hello2b` ran in parallel with each other.
+
+```bash
+STEP TEMPLATE PODNAME DURATION MESSAGE
+ ✔ steps-z2zdn hello-hello-hello
+ ├───✔ hello1 whalesay steps-z2zdn-27420706 2s
+ └─┬─✔ hello2a whalesay steps-z2zdn-2006760091 3s
+ └─✔ hello2b whalesay steps-z2zdn-2023537710 3s
+```
diff --git a/docs/walk-through/suspending.md b/docs/walk-through/suspending.md
new file mode 100644
index 000000000000..53b957c108aa
--- /dev/null
+++ b/docs/walk-through/suspending.md
@@ -0,0 +1,50 @@
+# Suspending
+
+Workflows can be suspended by
+
+```bash
+argo suspend WORKFLOW
+```
+
+Or by specifying a `suspend` step on the workflow:
+
+```yaml
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ generateName: suspend-template-
+spec:
+ entrypoint: suspend
+ templates:
+ - name: suspend
+ steps:
+ - - name: build
+ template: whalesay
+ - - name: approve
+ template: approve
+ - - name: delay
+ template: delay
+ - - name: release
+ template: whalesay
+
+ - name: approve
+ suspend: {}
+
+ - name: delay
+ suspend:
+ duration: "20" # Must be a string. Default unit is seconds. Could also be a Duration, e.g.: "2m", "6h", "1d"
+
+ - name: whalesay
+ container:
+ image: docker/whalesay
+ command: [cowsay]
+ args: ["hello world"]
+```
+
+Once suspended, a Workflow will not schedule any new steps until it is resumed. It can be resumed manually by
+
+```bash
+argo resume WORKFLOW
+```
+
+Or automatically with a `duration` limit as the example above.
diff --git a/docs/walk-through/the-structure-of-workflow-specs.md b/docs/walk-through/the-structure-of-workflow-specs.md
new file mode 100644
index 000000000000..2fa0129d8e6a
--- /dev/null
+++ b/docs/walk-through/the-structure-of-workflow-specs.md
@@ -0,0 +1,19 @@
+# The Structure of Workflow Specs
+
+We now know enough about the basic components of a workflow spec. To review its basic structure:
+
+- Kubernetes header including meta-data
+- Spec body
+ - Entrypoint invocation with optional arguments
+ - List of template definitions
+
+- For each template definition
+ - Name of the template
+ - Optionally a list of inputs
+ - Optionally a list of outputs
+ - Container invocation (leaf template) or a list of steps
+ - For each step, a template invocation
+
+To summarize, workflow specs are composed of a set of Argo templates where each template consists of an optional input section, an optional output section and either a container invocation or a list of steps where each step invokes another template.
+
+Note that the container section of the workflow spec will accept the same options as the container section of a pod spec, including but not limited to environment variables, secrets, and volume mounts. Similarly, for volume claims and volumes.
diff --git a/docs/walk-through/timeouts.md b/docs/walk-through/timeouts.md
new file mode 100644
index 000000000000..32076f147ca0
--- /dev/null
+++ b/docs/walk-through/timeouts.md
@@ -0,0 +1,20 @@
+# Timeouts
+
+To limit the elapsed time for a workflow, you can set the variable `activeDeadlineSeconds`.
+
+```yaml
+# To enforce a timeout for a container template, specify a value for activeDeadlineSeconds.
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ generateName: timeouts-
+spec:
+ entrypoint: sleep
+ templates:
+ - name: sleep
+ container:
+ image: alpine:latest
+ command: [sh, -c]
+ args: ["echo sleeping for 1m; sleep 60; echo done"]
+ activeDeadlineSeconds: 10 # terminate container template after 10 seconds
+```
diff --git a/docs/walk-through/volumes.md b/docs/walk-through/volumes.md
new file mode 100644
index 000000000000..87d3224fffae
--- /dev/null
+++ b/docs/walk-through/volumes.md
@@ -0,0 +1,196 @@
+# Volumes
+
+The following example dynamically creates a volume and then uses the volume in a two step workflow.
+
+```yaml
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ generateName: volumes-pvc-
+spec:
+ entrypoint: volumes-pvc-example
+ volumeClaimTemplates: # define volume, same syntax as k8s Pod spec
+ - metadata:
+ name: workdir # name of volume claim
+ spec:
+ accessModes: [ "ReadWriteOnce" ]
+ resources:
+ requests:
+ storage: 1Gi # Gi => 1024 * 1024 * 1024
+
+ templates:
+ - name: volumes-pvc-example
+ steps:
+ - - name: generate
+ template: whalesay
+ - - name: print
+ template: print-message
+
+ - name: whalesay
+ container:
+ image: docker/whalesay:latest
+ command: [sh, -c]
+ args: ["echo generating message in volume; cowsay hello world | tee /mnt/vol/hello_world.txt"]
+ # Mount workdir volume at /mnt/vol before invoking docker/whalesay
+ volumeMounts: # same syntax as k8s Pod spec
+ - name: workdir
+ mountPath: /mnt/vol
+
+ - name: print-message
+ container:
+ image: alpine:latest
+ command: [sh, -c]
+ args: ["echo getting message from volume; find /mnt/vol; cat /mnt/vol/hello_world.txt"]
+ # Mount workdir volume at /mnt/vol before invoking docker/whalesay
+ volumeMounts: # same syntax as k8s Pod spec
+ - name: workdir
+ mountPath: /mnt/vol
+
+```
+
+Volumes are a very useful way to move large amounts of data from one step in a workflow to another. Depending on the system, some volumes may be accessible concurrently from multiple steps.
+
+In some cases, you want to access an already existing volume rather than creating/destroying one dynamically.
+
+```yaml
+# Define Kubernetes PVC
+kind: PersistentVolumeClaim
+apiVersion: v1
+metadata:
+ name: my-existing-volume
+spec:
+ accessModes: [ "ReadWriteOnce" ]
+ resources:
+ requests:
+ storage: 1Gi
+
+---
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ generateName: volumes-existing-
+spec:
+ entrypoint: volumes-existing-example
+ volumes:
+ # Pass my-existing-volume as an argument to the volumes-existing-example template
+ # Same syntax as k8s Pod spec
+ - name: workdir
+ persistentVolumeClaim:
+ claimName: my-existing-volume
+
+ templates:
+ - name: volumes-existing-example
+ steps:
+ - - name: generate
+ template: whalesay
+ - - name: print
+ template: print-message
+
+ - name: whalesay
+ container:
+ image: docker/whalesay:latest
+ command: [sh, -c]
+ args: ["echo generating message in volume; cowsay hello world | tee /mnt/vol/hello_world.txt"]
+ volumeMounts:
+ - name: workdir
+ mountPath: /mnt/vol
+
+ - name: print-message
+ container:
+ image: alpine:latest
+ command: [sh, -c]
+ args: ["echo getting message from volume; find /mnt/vol; cat /mnt/vol/hello_world.txt"]
+ volumeMounts:
+ - name: workdir
+ mountPath: /mnt/vol
+```
+
+It's also possible to declare existing volumes at the template level, instead of the workflow level.
+Workflows can generate volumes using a `resource` step.
+
+```yaml
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ generateName: template-level-volume-
+spec:
+ entrypoint: generate-and-use-volume
+ templates:
+ - name: generate-and-use-volume
+ steps:
+ - - name: generate-volume
+ template: generate-volume
+ arguments:
+ parameters:
+ - name: pvc-size
+ # In a real-world example, this could be generated by a previous workflow step.
+ value: '1Gi'
+ - - name: generate
+ template: whalesay
+ arguments:
+ parameters:
+ - name: pvc-name
+ value: '{{steps.generate-volume.outputs.parameters.pvc-name}}'
+ - - name: print
+ template: print-message
+ arguments:
+ parameters:
+ - name: pvc-name
+ value: '{{steps.generate-volume.outputs.parameters.pvc-name}}'
+
+ - name: generate-volume
+ inputs:
+ parameters:
+ - name: pvc-size
+ resource:
+ action: create
+ setOwnerReference: true
+ manifest: |
+ apiVersion: v1
+ kind: PersistentVolumeClaim
+ metadata:
+ generateName: pvc-example-
+ spec:
+ accessModes: ['ReadWriteOnce', 'ReadOnlyMany']
+ resources:
+ requests:
+ storage: '{{inputs.parameters.pvc-size}}'
+ outputs:
+ parameters:
+ - name: pvc-name
+ valueFrom:
+ jsonPath: '{.metadata.name}'
+
+ - name: whalesay
+ inputs:
+ parameters:
+ - name: pvc-name
+ volumes:
+ - name: workdir
+ persistentVolumeClaim:
+ claimName: '{{inputs.parameters.pvc-name}}'
+ container:
+ image: docker/whalesay:latest
+ command: [sh, -c]
+ args: ["echo generating message in volume; cowsay hello world | tee /mnt/vol/hello_world.txt"]
+ volumeMounts:
+ - name: workdir
+ mountPath: /mnt/vol
+
+ - name: print-message
+ inputs:
+ parameters:
+ - name: pvc-name
+ volumes:
+ - name: workdir
+ persistentVolumeClaim:
+ claimName: '{{inputs.parameters.pvc-name}}'
+ container:
+ image: alpine:latest
+ command: [sh, -c]
+ args: ["echo getting message from volume; find /mnt/vol; cat /mnt/vol/hello_world.txt"]
+ volumeMounts:
+ - name: workdir
+ mountPath: /mnt/vol
+
+```
diff --git a/docs/webhooks.md b/docs/webhooks.md
index 42d8418ab096..01947ab1f11f 100644
--- a/docs/webhooks.md
+++ b/docs/webhooks.md
@@ -1,6 +1,5 @@
# Webhooks
-
> v2.11 and after
Many clients can send events via the [events](events.md) API endpoint using a standard authorization header. However, for clients that are unable to do so (e.g. because they use signature verification as proof of origin), additional configuration is required.
@@ -8,15 +7,14 @@ Many clients can send events via the [events](events.md) API endpoint using a st
In the namespace that will receive the event, create [access token](access-token.md) resources for your client:
* A role with permissions to get workflow templates and to create a workflow: [example](https://raw.githubusercontent.com/argoproj/argo-workflows/master/manifests/quick-start/base/webhooks/submit-workflow-template-role.yaml)
-* A service account for the client: [example](https://raw.githubusercontent.com/argoproj/argo-workflows/master/manifests/quick-start/base/webhooks/github.com-sa.yaml).
+* A service account for the client: [example](https://raw.githubusercontent.com/argoproj/argo-workflows/master/manifests/quick-start/base/webhooks/github.com-sa.yaml).
* A binding of the account to the role: [example](https://raw.githubusercontent.com/argoproj/argo-workflows/master/manifests/quick-start/base/webhooks/github.com-rolebinding.yaml)
Additionally create:
-* A secret named "argo-workflows-webhook-clients" listing the service accounts: [example](https://raw.githubusercontent.com/argoproj/argo-workflows/master/manifests/quick-start/base/webhooks/argo-workflows-webhook-clients-secret.yaml)
-
-The secret "argo-workflows-webhook-clients" tells Argo:
+* A secret named `argo-workflows-webhook-clients` listing the service accounts: [example](https://raw.githubusercontent.com/argoproj/argo-workflows/master/manifests/quick-start/base/webhooks/argo-workflows-webhook-clients-secret.yaml)
-* What type of webhook the account can be used for, e.g. "github"
-* What "secret" that webhook is configured for, e.g. in your [Github settings page](https://github.com/alexec/argo/settings/hooks)
+The secret `argo-workflows-webhook-clients` tells Argo:
+* What type of webhook the account can be used for, e.g. `github`.
+* What "secret" that webhook is configured for, e.g. in your Github settings page.
diff --git a/docs/widgets.md b/docs/widgets.md
index b5ff4d5bfaf0..475c57af19b7 100644
--- a/docs/widgets.md
+++ b/docs/widgets.md
@@ -2,7 +2,7 @@
> v3.0 and after
-Widgets are intended to be embedded into other applications using iframes. This is may not work with your configuration. You may need to:
+Widgets are intended to be embedded into other applications using inline frames (`iframe`). This may not work with your configuration. You may need to:
* Run the Argo Server with an account that can read workflows. That can be done using `--auth-mode=server` and configuring the `argo-server` service account.
* Run the Argo Server with `--x-frame-options=SAMEORIGIN` or `--x-frame-options=`.
diff --git a/docs/windows.md b/docs/windows.md
index 7b124d0aa119..1086b8015d5a 100644
--- a/docs/windows.md
+++ b/docs/windows.md
@@ -3,9 +3,10 @@
The Argo server and the workflow controller currently only run on Linux. The workflow executor however also runs on Windows nodes, meaning you can use Windows containers inside your workflows! Here are the steps to get started.
## Requirements
+
* Kubernetes 1.14 or later, supporting Windows nodes
* Hybrid cluster containing Linux and Windows nodes like described in the [Kubernetes docs](https://kubernetes.io/docs/setup/production-environment/windows/user-guide-windows-containers/)
-* Argo configured and running like described [here](quick-start.md)
+* Argo configured and running like described [here](quick-start.md)
## Schedule workflows with Windows containers
@@ -29,7 +30,8 @@ spec:
```
You can run this example and get the logs:
-```
+
+```bash
$ argo submit --watch https://raw.githubusercontent.com/argoproj/argo-workflows/master/examples/hello-windows.yaml
$ argo logs hello-windows-s9kk5
hello-windows-s9kk5: "Hello from Windows Container!"
@@ -37,9 +39,10 @@ hello-windows-s9kk5: "Hello from Windows Container!"
## Schedule hybrid workflows
-You can also run different steps on different host OSs. This can for example be very helpful when you need to compile your application on Windows and Linux.
+You can also run different steps on different host operating systems. This can for example be very helpful when you need to compile your application on Windows and Linux.
An example workflow can look like the following:
+
```yaml
apiVersion: argoproj.io/v1alpha1
kind: Workflow
@@ -69,11 +72,11 @@ spec:
image: alpine
command: [echo]
args: ["Hello from Linux Container!"]
-
```
Again, you can run this example and get the logs:
-```
+
+```bash
$ argo submit --watch https://raw.githubusercontent.com/argoproj/argo-workflows/master/examples/hello-hybrid.yaml
$ argo logs hello-hybrid-plqpp
hello-hybrid-plqpp-1977432187: "Hello from Windows Container!"
@@ -105,16 +108,16 @@ Remember that [volume mounts on Windows can only target a directory](https://kub
## Limitations
-- Sharing process namespaces [doesn't work on Windows](https://kubernetes.io/docs/setup/production-environment/windows/intro-windows-in-kubernetes/#v1-pod) so you can't use the Process Namespace Sharing (pns) workflow executor.
-- The argoexec Windows container is built using [nanoserver:1809](https://github.com/argoproj/argo-workflows/blob/b18b9920f678f420552864eccf3d4b98f3604cfa/Dockerfile.windows#L28) as the base image. Running a newer windows version (e.g. 1909) is currently [not confirmed to be working](https://github.com/argoproj/argo-workflows/issues/5376). If this is required, you need to build the argoexec container yourself by first adjusting the base image.
+* Sharing process namespaces [doesn't work on Windows](https://kubernetes.io/docs/setup/production-environment/windows/intro-windows-in-kubernetes/#v1-pod) so you can't use the Process Namespace Sharing (PNS) workflow executor.
+* The executor Windows container is built using [Nano Server](https://github.com/argoproj/argo-workflows/blob/b18b9920f678f420552864eccf3d4b98f3604cfa/Dockerfile.windows#L28) as the base image. Running a newer windows version (e.g. 1909) is currently [not confirmed to be working](https://github.com/argoproj/argo-workflows/issues/5376). If this is required, you need to build the executor container yourself by first adjusting the base image.
## Building the workflow executor image for Windows
To build the workflow executor image for Windows you need a Windows machine running Windows Server 2019 with Docker installed like described [in the docs](https://docs.docker.com/ee/docker-ee/windows/docker-ee/#install-docker-engine---enterprise).
-You then clone the project and run the Docker build with the Dockerfile for Windows and `argoexec` as a target:
+You then clone the project and run the Docker build with the `Dockerfile` for Windows and `argoexec` as a target:
-```
+```bash
git clone https://github.com/argoproj/argo-workflows.git
cd argo
docker build -t myargoexec -f .\Dockerfile.windows --target argoexec .
diff --git a/docs/work-avoidance.md b/docs/work-avoidance.md
index 083573ef8524..a2d99a6bb5ec 100644
--- a/docs/work-avoidance.md
+++ b/docs/work-avoidance.md
@@ -1,7 +1,5 @@
# Work Avoidance
-
-
> v2.9 and after
You can make workflows faster and more robust by employing **work avoidance**. A workflow that utilizes this is simply a workflow containing steps that do not run if the work has already been done. This simplest way to do this is to use **marker files**.
@@ -9,9 +7,9 @@ You can make workflows faster and more robust by employing **work avoidance**. A
Use cases:
* An expensive step appears across multiple workflows - you want to avoid repeating them.
-* A workflow has unreliable tasks - you want to be able resubmit the workflow.
+* A workflow has unreliable tasks - you want to be able to resubmit the workflow.
-A **marker file** is a file on that indicates the work has already been done, before doing the work you check to see if the marker has already been done:
+A **marker file** is a file that indicates the work has already been done. Before doing the work you check to see if the marker has already been done:
```sh
if [ -e /work/markers/name-of-task ]; then
@@ -21,18 +19,18 @@ fi
echo "working very hard"
touch /work/markers/name-of-task
```
-
+
Choose a name for the file that is unique for the task, e.g. the template name and all the parameters:
```sh
touch /work/markers/$(date +%Y-%m-%d)-echo-{{inputs.parameters.num}}
-```
-
-You need to store the marker files between workflows and this can be achieved using [a PVC](fields.md#persistentvolumeclaim) and [optional input artifact](fields.md#artifact).
+```
+
+You need to store the marker files between workflows and this can be achieved using [a PVC](fields.md#persistentvolumeclaim) and [optional input artifact](fields.md#artifact).
[This complete work avoidance example](https://raw.githubusercontent.com/argoproj/argo-workflows/master/examples/work-avoidance.yaml) has the following:
* A PVC to store the markers on.
* A `load-markers` step that loads the marker files from artifact storage.
* Multiple `echo` tasks that avoid work using marker files.
-* A `save-markers` exit handler to save the marker files, even if they are not needed.
+* A `save-markers` exit handler to save the marker files, even if they are not needed.
diff --git a/docs/workflow-archive.md b/docs/workflow-archive.md
index 3e295295f8b2..e76fb1a95c2f 100644
--- a/docs/workflow-archive.md
+++ b/docs/workflow-archive.md
@@ -1,10 +1,8 @@
# Workflow Archive
-
-
> v2.5 and after
-For many uses, you may wish to keep workflows for a long time. Argo can save completed workflows to an SQL database.
+For many uses, you may wish to keep workflows for a long time. Argo can save completed workflows to an SQL database.
To enable this feature, configure a Postgres or MySQL (>= 5.7.8) database under `persistence` in [your configuration](workflow-controller-configmap.yaml) and set `archive: true`.
@@ -12,15 +10,16 @@ Be aware that this feature will only archive the statuses of the workflows (whic
However, the logs of each pod will NOT be archived. If you need to access the logs of the pods, you need to setup [an artifact repository](artifact-repository-ref.md) thanks to [this doc](configure-artifact-repository.md).
-In addition the table specified in the configmap above, the following tables are created when enabling archiving:
+In addition to the table specified in the config map above, the following tables are created when enabling archiving:
-* argo_archived_workflows
-* argo_archived_workflows_labels
-* schema_history
+* `argo_archived_workflows`
+* `argo_archived_workflows_labels`
+* `schema_history`
The database migration will only occur successfully if none of the tables exist. If a partial set of the tables exist, the database migration may fail and the Argo workflow-controller pod may fail to start. If this occurs delete all of the tables and try restarting the deployment.
## Required database permissions
### Postgres
+
The database user/role needs to have `CREATE` and `USAGE` permissions on the `public` schema of the database so that the necessary table can be generated during the migration.
diff --git a/docs/workflow-concepts.md b/docs/workflow-concepts.md
index ee7839a6424c..e7724a82c829 100644
--- a/docs/workflow-concepts.md
+++ b/docs/workflow-concepts.md
@@ -45,9 +45,10 @@ These templates _define_ work to be done, usually in a Container.
##### [Container](fields.md#container)
-Perhaps the most common template type, it will schedule a Container. The spec of the template is the same as the [K8s container spec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#container-v1-core), so you can define a container here the same way you do anywhere else in K8s.
-
+Perhaps the most common template type, it will schedule a Container. The spec of the template is the same as the [Kubernetes container spec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#container-v1-core), so you can define a container here the same way you do anywhere else in Kubernetes.
+
Example:
+
```yaml
- name: whalesay
container:
@@ -59,9 +60,10 @@ Example:
##### [Script](fields.md#scripttemplate)
A convenience wrapper around a `container`. The spec is the same as a container, but adds the `source:` field which allows you to define a script in-place.
-The script will be saved into a file and executed for you. The result of the script is automatically exported into an [Argo variable](./variables.md) either `{{tasks..outputs.result}}` or `{{steps..outputs.result}}`, depending how it was called.
-
+The script will be saved into a file and executed for you. The result of the script is automatically exported into an [Argo variable](./variables.md) either `{{tasks..outputs.result}}` or `{{steps..outputs.result}}`, depending how it was called.
+
Example:
+
```yaml
- name: gen-random-int
script:
@@ -76,8 +78,9 @@ Example:
##### [Resource](fields.md#resourcetemplate)
Performs operations on cluster Resources directly. It can be used to get, create, apply, delete, replace, or patch resources on your cluster.
-
+
This example creates a `ConfigMap` resource on the cluster:
+
```yaml
- name: k8s-owner-reference
resource:
@@ -94,8 +97,9 @@ This example creates a `ConfigMap` resource on the cluster:
##### [Suspend](fields.md#suspendtemplate)
A suspend template will suspend execution, either for a duration or until it is resumed manually. Suspend templates can be resumed from the CLI (with `argo resume`), the API endpoint, or the UI.
-
+
Example:
+
```yaml
- name: delay
suspend:
@@ -109,8 +113,9 @@ These templates are used to invoke/call other templates and provide execution co
##### [Steps](fields.md#workflowstep)
A steps template allows you to define your tasks in a series of steps. The structure of the template is a "list of lists". Outer lists will run sequentially and inner lists will run in parallel. If you want to run inner lists one by one, use the [Synchronization](fields.md#synchronization) feature. You can set a wide array of options to control execution, such as [`when:` clauses to conditionally execute a step](https://raw.githubusercontent.com/argoproj/argo-workflows/master/examples/coinflip.yaml).
-
+
In this example `step1` runs first. Once it is completed, `step2a` and `step2b` will run in parallel:
+
```yaml
- name: hello-hello-hello
steps:
@@ -125,8 +130,9 @@ In this example `step1` runs first. Once it is completed, `step2a` and `step2b`
##### [DAG](fields.md#dagtemplate)
A dag template allows you to define your tasks as a graph of dependencies. In a DAG, you list all your tasks and set which other tasks must complete before a particular task can begin. Tasks without any dependencies will be run immediately.
-
+
In this example `A` runs first. Once it is completed, `B` and `C` will run in parallel and once they both complete, `D` will run:
+
```yaml
- name: diamond
dag:
diff --git a/docs/workflow-controller-configmap.md b/docs/workflow-controller-configmap.md
index ceb278190cd5..b1d9aef6ab43 100644
--- a/docs/workflow-controller-configmap.md
+++ b/docs/workflow-controller-configmap.md
@@ -1,87 +1,11 @@
-# Workflow Controller Configmap
+# Workflow Controller Config Map
## Introduction
-The Workflow Controller Configmap is used to set controller-wide settings.
+The Workflow Controller Config Map is used to set controller-wide settings.
For a detailed example, please see [`workflow-controller-configmap.yaml`](./workflow-controller-configmap.yaml).
-## Setting the Configmap
-
-The configmap should be saved as a K8s Configmap on the cluster in the same namespace as the `workflow-controller`.
-It should then be referenced by the `workflow-controller` and `argo-server` as a command argument:
-
-```yaml
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- name: workflow-controller
-spec:
- selector:
- matchLabels:
- app: workflow-controller
- template:
- metadata:
- labels:
- app: workflow-controller
- spec:
- containers:
- - args:
- - --configmap
- - workflow-controller-configmap # Set configmap name here
- - --executor-image
- - argoproj/argoexec:latest
- - --namespaced
- command:
- - workflow-controller
- image: argoproj/workflow-controller:latest
- name: workflow-controller
- serviceAccountName: argo
- nodeSelector:
- kubernetes.io/os: linux
-```
-```yaml
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- name: argo-server
-spec:
- selector:
- matchLabels:
- app: argo-server
- template:
- metadata:
- labels:
- app: argo-server
- spec:
- containers:
- - args:
- - server
- - --configmap
- - workflow-controller-configmap # Set configmap name here
- image: argoproj/argocli:latest
- name: argo-server
- ports:
- - containerPort: 2746
- name: web
- readinessProbe:
- httpGet:
- path: /
- port: 2746
- scheme: HTTP
- initialDelaySeconds: 10
- periodSeconds: 20
- securityContext:
- capabilities:
- drop:
- - ALL
- volumeMounts:
- - mountPath: /tmp
- name: tmp
- nodeSelector:
- kubernetes.io/os: linux
-```
-
## Alternate Structure
In all versions, the configuration may be under a `config: |` key:
diff --git a/docs/workflow-controller-configmap.yaml b/docs/workflow-controller-configmap.yaml
index 044d4d6a05e6..c9567585cac3 100644
--- a/docs/workflow-controller-configmap.yaml
+++ b/docs/workflow-controller-configmap.yaml
@@ -146,6 +146,7 @@ data:
# Specifies the container runtime interface to use (default: emissary)
# must be one of: docker, kubelet, k8sapi, pns, emissary
# It has lower precedence than either `--container-runtime-executor` and `containerRuntimeExecutors`.
+ # (removed in v3.4)
containerRuntimeExecutor: emissary
# Specifies the executor to use.
@@ -158,6 +159,7 @@ data:
#
# The list is in order of precedence; the first matching executor is used.
# This has precedence over `containerRuntimeExecutor`.
+ # (removed in v3.4)
containerRuntimeExecutors: |
- name: emissary
selector:
@@ -169,26 +171,24 @@ data:
workflows.argoproj.io/container-runtime-executor: pns
# Specifies the location of docker.sock on the host for docker executor (default: /var/run/docker.sock)
- # (available since Argo v2.4)
+ # (available v2.4-v3.3)
dockerSockPath: /var/someplace/else/docker.sock
- # kubelet port when using kubelet executor (default: 10250)
+ # kubelet port when using kubelet executor (default: 10250) (kubelet executor will be deprecated use emissary instead)
+ # (removed in v3.4)
kubeletPort: 10250
# disable the TLS verification of the kubelet executor (default: false)
+ # (removed in v3.4)
kubeletInsecure: false
# The command/args for each image, needed when the command is not specified and the emissary executor is used.
# https://argoproj.github.io/argo-workflows/workflow-executors/#emissary-emissary
images: |
- argoproj/argosay:v1:
- command: [cowsay]
argoproj/argosay:v2:
- command: [/argosay]
+ cmd: [/argosay]
docker/whalesay:latest:
- command: [cowsay]
- python:alpine3.6:
- command: [python3]
+ cmd: [/bin/bash]
# Defaults for main containers. These can be overridden by the template.
# <= v3.3 only `resources` are supported.
diff --git a/docs/workflow-creator.md b/docs/workflow-creator.md
index bc5809d4ade7..6ad7cd6b4016 100644
--- a/docs/workflow-creator.md
+++ b/docs/workflow-creator.md
@@ -1,10 +1,8 @@
# Workflow Creator
-
-
> v2.9 and after
-If you create your workflow via the CLI or UI, an attempt will be made to label it with the user who created it
+If you create your workflow via the CLI or UI, an attempt will be made to label it with the user who created it
```yaml
apiVersion: argoproj.io/v1alpha1
@@ -15,8 +13,8 @@ metadata:
workflows.argoproj.io/creator: admin
# labels must be DNS formatted, so the "@" is replaces by '.at.'
workflows.argoproj.io/creator-email: admin.at.your.org
-```
+ workflows.argoproj.io/creator-preferred-username: admin-preferred-username
+```
!!! NOTE
Labels only contain `[-_.0-9a-zA-Z]`, so any other characters will be turned into `-`.
-
\ No newline at end of file
diff --git a/docs/workflow-events.md b/docs/workflow-events.md
index d55b8ec3c250..0e906b8487b0 100644
--- a/docs/workflow-events.md
+++ b/docs/workflow-events.md
@@ -1,9 +1,9 @@
# Workflow Events
-
-
> v2.7.2
+⚠️ Do not use Kubernetes events for automation. Events maybe lost or rolled-up.
+
We emit Kubernetes events on certain events.
Workflow state change:
@@ -20,7 +20,6 @@ Node state change:
* `WorkflowNodeFailed`
* `WorkflowNodeError`
-
The involved object is the workflow in both cases. Additionally, for node state change events, annotations indicate the name and type of the involved node:
```yaml
diff --git a/docs/workflow-executors.md b/docs/workflow-executors.md
index a0d276a3dbfc..96b3ddcd0640 100644
--- a/docs/workflow-executors.md
+++ b/docs/workflow-executors.md
@@ -1,14 +1,14 @@
# Workflow Executors
-A workflow executor is a process that conforms to a specific interface that allows Argo to perform certain actions like monitoring pod logs, collecting artifacts, managing container lifecycles, etc..
+A workflow executor is a process that conforms to a specific interface that allows Argo to perform certain actions like monitoring pod logs, collecting artifacts, managing container life-cycles, etc..
-The executor to be used in your workflows can be changed in [the configmap](./workflow-controller-configmap.yaml) under the `containerRuntimeExecutor` key.
+The executor to be used in your workflows can be changed in [the config map](./workflow-controller-configmap.yaml) under the `containerRuntimeExecutor` key.
## Emissary (emissary)
> v3.1 and after
-**default in >= v3.3**
+Default in >= v3.3.
This is the most fully featured executor.
@@ -27,7 +27,7 @@ This is the most fully featured executor.
* Configuration:
* `command` must be specified for containers.
-You can determine the command and args as follows:
+You can determine values as follows:
```bash
docker image inspect -f '{{.Config.Entrypoint}} {{.Config.Cmd}}' argoproj/argosay:v2
@@ -45,12 +45,11 @@ a [configuration item](workflow-controller-configmap.yaml).
The emissary will exit with code 64 if it fails. This may indicate a bug in the emissary.
+## Docker (docker)
-## Docker (docker)
+⚠️Deprecated. Removed in v3.4.
-⚠️Deprecated.
-
-**default in <= v3.2**
+Default in <= v3.2.
* Least secure:
* It requires `privileged` access to `docker.sock` of the host to be mounted which. Often rejected by Open Policy Agent (OPA) or your Pod Security Policy (PSP).
@@ -67,6 +66,8 @@ The emissary will exit with code 64 if it fails. This may indicate a bug in the
## Kubelet (kubelet)
+⚠️Deprecated. Removed in v3.4.
+
* Secure
* No `privileged` access
* Cannot escape the privileges of the pod's service account
@@ -74,15 +75,15 @@ The emissary will exit with code 64 if it fails. This may indicate a bug in the
* Scalable:
* Operations performed against the local Kubelet
* Artifacts:
- * Output artifacts must be saved on volumes (e.g. [emptyDir](empty-dir.md)) and not the base image layer (e.g. `/tmp`)
+ * Output artifacts must be saved on volumes (e.g. [empty-dir](empty-dir.md)) and not the base image layer (e.g. `/tmp`)
* Step/Task result:
* Warnings that normally goes to stderr will get captured in a step or a dag task's `outputs.result`. May require changes if your pipeline is conditioned on `steps/tasks.name.outputs.result`
* Configuration:
* Additional Kubelet configuration maybe needed
-## Kubernetes API (k8sapi)
+## Kubernetes API (`k8sapi`)
-⚠️Deprecated.
+⚠️Deprecated. Removed in v3.4.
* Reliability:
* Works on GKE Autopilot
@@ -93,18 +94,20 @@ The emissary will exit with code 64 if it fails. This may indicate a bug in the
* Least scalable:
* Log retrieval and container operations performed against the remote Kubernetes API
* Artifacts:
- * Output artifacts must be saved on volumes (e.g. [emptyDir](empty-dir.md)) and not the base image layer (e.g. `/tmp`)
+ * Output artifacts must be saved on volumes (e.g. [empty-dir](empty-dir.md)) and not the base image layer (e.g. `/tmp`)
* Step/Task result:
* Warnings that normally goes to stderr will get captured in a step or a dag task's `outputs.result`. May require changes if your pipeline is conditioned on `steps/tasks.name.outputs.result`
* Configuration:
* No additional configuration needed.
-## Process Namespace Sharing (pns)
+## Process Namespace Sharing (`pns`)
+
+⚠️Deprecated. Removed in v3.4.
* More secure:
* No `privileged` access
* cannot escape the privileges of the pod's service account
- * Can [`runAsNonRoot`](workflow-pod-security-context.md), if you use volumes (e.g. [emptyDir](empty-dir.md)) for your output artifacts
+ * Can [`runAsNonRoot`](workflow-pod-security-context.md), if you use volumes (e.g. [empty-dir](empty-dir.md)) for your output artifacts
* Processes are visible to other containers in the pod. This includes all information visible in /proc, such as passwords that were passed as arguments or environment variables. These are protected only by regular Unix permissions.
* Scalable:
* Most operations use local `procfs`.
@@ -118,4 +121,4 @@ The emissary will exit with code 64 if it fails. This may indicate a bug in the
* Process will no longer run with PID 1
* [Doesn't work for Windows containers](https://kubernetes.io/docs/setup/production-environment/windows/intro-windows-in-kubernetes/#v1-pod).
-[https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/](https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/)
+[Learn more](https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/)
diff --git a/docs/workflow-inputs.md b/docs/workflow-inputs.md
index 1152badd938b..989d94885570 100644
--- a/docs/workflow-inputs.md
+++ b/docs/workflow-inputs.md
@@ -1,7 +1,5 @@
# Workflow Inputs
-
-
## Introduction
`Workflows` and `template`s operate on a set of defined parameters and arguments that are supplied to the running container. The precise details of how to manage the inputs can be confusing; this article attempts to clarify concepts and provide simple working examples to illustrate the various configuration options.
@@ -10,26 +8,29 @@ The examples below are limited to `DAGTemplate`s and mainly focused on `paramete
### Parameter Inputs
-First, some clarification of terms is needed. For a glossary reference, see [Argo Core Concepts](https://argoproj.github.io/argo-workflows/core-concepts/).
+First, some clarification of terms is needed. For a glossary reference, see [Argo Core Concepts](workflow-concepts.md).
A `workflow` provides `arguments`, which are passed in to the entry point template. A `template` defines `inputs` which are then provided by template callers (such as `steps`, `dag`, or even a `workflow`). The structure of both is identical.
For example, in a `Workflow`, one parameter would look like this:
-```
+
+```yaml
arguments:
parameters:
- name: workflow-param-1
```
And in a `template`:
-```
+
+```yaml
inputs:
parameters:
- name: template-param-1
```
Inputs to `DAGTemplate`s use the `arguments` format:
-```
+
+```yaml
dag:
tasks:
- name: step-A
@@ -41,7 +42,8 @@ dag:
```
Previous examples in context:
-```
+
+```yaml
apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
@@ -76,8 +78,10 @@ spec:
To run this example: `argo submit -n argo example.yaml -p 'workflow-param-1="abcd"' --watch`
### Using Previous Step Outputs As Inputs
+
In `DAGTemplate`s, it is common to want to take the output of one step and send it as the input to another step. However, there is a difference in how this works for artifacts vs parameters. Suppose our `step-template-A` defines some outputs:
-```
+
+```yaml
outputs:
parameters:
- name: output-param-1
@@ -89,7 +93,8 @@ outputs:
```
In my `DAGTemplate`, I can send these outputs to another template like this:
-```
+
+```yaml
dag:
tasks:
- name: step-A
diff --git a/docs/workflow-notifications.md b/docs/workflow-notifications.md
index 7ae446fd23f0..c8ea68413e09 100644
--- a/docs/workflow-notifications.md
+++ b/docs/workflow-notifications.md
@@ -10,4 +10,4 @@ You have options:
1. For individual workflows, can add an exit handler to your workflow, [for example](https://raw.githubusercontent.com/argoproj/argo-workflows/master/examples/exit-handlers.yaml).
1. If you want the same for every workflow, you can add an exit handler to [the default workflow spec](default-workflow-specs.md).
-1. Use a service (e.g. [Heptio Labs EventRouter](https://github.com/heptiolabs/eventrouter)) to the [Workflow events](workflow-events.md) we emit.
\ No newline at end of file
+1. Use a service (e.g. [Heptio Labs EventRouter](https://github.com/heptiolabs/eventrouter)) to the [Workflow events](workflow-events.md) we emit.
diff --git a/docs/workflow-of-workflows.md b/docs/workflow-of-workflows.md
index 4968df817fa2..075a896ed063 100644
--- a/docs/workflow-of-workflows.md
+++ b/docs/workflow-of-workflows.md
@@ -3,10 +3,13 @@
> v2.9 and after
## Introduction
-The Workflow of Workflows pattern involves a parent workflow triggering one or more child workflows, managing them, and acting their results.
-
+
+The Workflow of Workflows pattern involves a parent workflow triggering one or more child workflows, managing them, and acting on their results.
+
## Examples
+
You can use `workflowTemplateRef` to trigger a workflow inline.
+
1. Define your workflow as a `workflowtemplate`.
```yaml
@@ -30,11 +33,13 @@ spec:
command: [cowsay]
args: ["{{inputs.parameters.message}}"]
```
-2. Create the `Workflowtemplate` in cluster using `argo template create `
-3. Define the workflow of workflows.
+
+1. Create the `Workflowtemplate` in cluster using `argo template create `
+2. Define the workflow of workflows.
+
```yaml
# This template demonstrates a workflow of workflows.
-# Workflow triggers one or more workflow and manage it.
+# Workflow triggers one or more workflows and manages them.
apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
diff --git a/docs/workflow-pod-security-context.md b/docs/workflow-pod-security-context.md
index 92413fcf65e7..1247f0304bbb 100644
--- a/docs/workflow-pod-security-context.md
+++ b/docs/workflow-pod-security-context.md
@@ -1,6 +1,6 @@
# Workflow Pod Security Context
-By default, a workflow pods run as root. The Docker executor even requires `privileged: true`.
+By default, all workflow pods run as root. The Docker executor even requires `privileged: true`.
For other [workflow executors](workflow-executors.md), you can run your workflow pods more securely by configuring the [security context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) for your workflow pod.
diff --git a/docs/workflow-rbac.md b/docs/workflow-rbac.md
index 672c24d15146..0b2d974982cb 100644
--- a/docs/workflow-rbac.md
+++ b/docs/workflow-rbac.md
@@ -21,7 +21,7 @@ rules:
- apiGroups:
- argoproj.io
resources:
- - workflowtaskresult
+ - workflowtaskresults
verbs:
- create
- patch
@@ -44,7 +44,7 @@ rules:
- patch
```
-Warning: For many organisations, it may not be acceptable to give a workflow the `pod patch` permission, see [#3961](https://github.com/argoproj/argo-workflows/issues/3961)
+Warning: For many organizations, it may not be acceptable to give a workflow the `pod patch` permission, see [#3961](https://github.com/argoproj/argo-workflows/issues/3961)
If you are not using the emissary, you'll need additional permissions.
See [executor](https://github.com/argoproj/argo-workflows/tree/master/manifests/quick-start/base/executor) for suitable
diff --git a/docs/workflow-restrictions.md b/docs/workflow-restrictions.md
index dfce2c302ba6..7b7c03a5aa7d 100644
--- a/docs/workflow-restrictions.md
+++ b/docs/workflow-restrictions.md
@@ -1,17 +1,15 @@
# Workflow Restrictions
-
-
> v2.9 and after
## Introduction
-As the administrator of the controller, you may want to limit which types of Workflows your users can run. Setting workflow restrictions allows you to ensure that Workflows comply with certain requirements.
+As the administrator of the controller, you may want to limit which types of Workflows your users can run. Setting workflow restrictions allows you to ensure that Workflows comply with certain requirements.
## Available Restrictions
-* `templateReferencing: Strict`: Only Workflows using "workflowTemplateRef" will be processed. This allows the administrator of the controller to set a "library" of templates that may be run by its operator, limiting arbitrary Workflow execution.
-* `templateReferencing: Secure`: Only Workflows using "workflowTemplateRef" will be processed and the controller will enforce that the WorkflowTemplate that is referenced hasn't changed between operations. If you want to make sure the operator of the Workflow cannot run an arbitrary Workflow, use this option.
+* `templateReferencing: Strict`: Only Workflows using `workflowTemplateRef` will be processed. This allows the administrator of the controller to set a "library" of templates that may be run by its operator, limiting arbitrary Workflow execution.
+* `templateReferencing: Secure`: Only Workflows using `workflowTemplateRef` will be processed and the controller will enforce that the workflow template that is referenced hasn't changed between operations. If you want to make sure the operator of the Workflow cannot run an arbitrary Workflow, use this option.
## Setting Workflow Restrictions
diff --git a/docs/workflow-submitting-workflow.md b/docs/workflow-submitting-workflow.md
index 7f3f27365565..cec0bb731501 100644
--- a/docs/workflow-submitting-workflow.md
+++ b/docs/workflow-submitting-workflow.md
@@ -1,7 +1,5 @@
# One Workflow Submitting Another
-
-
> v2.8 and after
If you want one workflow to create another, you can do this using `curl`. You'll need an [access token](access-token.md). Typically the best way is to submit from a workflow template:
@@ -29,10 +27,3 @@ spec:
-H "Authorization: Bearer eyJhbGci..." \
-d '{"resourceKind": "WorkflowTemplate", "resourceName": "wait", "submitOptions": {"labels": "workflows.argoproj.io/workflow-template=wait"}}'
```
-
-See also:
-
-* [access token](access-token.md)
-* [resuming a workflow via automation](resuming-workflow-via-automation.md)
-* [submitting a workflow via automation](submit-workflow-via-automation.md)
-* [async pattern](async-pattern.md)
diff --git a/docs/workflow-templates.md b/docs/workflow-templates.md
index bfcc41003aab..8dc4d6822132 100644
--- a/docs/workflow-templates.md
+++ b/docs/workflow-templates.md
@@ -1,7 +1,5 @@
# Workflow Templates
-
-
> v2.4 and after
## Introduction
@@ -19,8 +17,9 @@ in the past. However, a quick description should clarify each and their differen
`Workflow`, you must define at least one (but usually more than one) `template` to run. This `template` can be of type
`container`, `script`, `dag`, `steps`, `resource`, or `suspend` and can be referenced by an `entrypoint` or by other
`dag`, and `step` templates.
-
+
Here is an example of a `Workflow` with two `templates`:
+
```yaml
apiVersion: argoproj.io/v1alpha1
kind: Workflow
@@ -130,8 +129,9 @@ spec:
When working with parameters in a `WorkflowTemplate`, please note the following:
-1. When working with global parameters, you can instantiate your global variables in your `Workflow`
+- When working with global parameters, you can instantiate your global variables in your `Workflow`
and then directly reference them in your `WorkflowTemplate`. Below is a working example:
+
```yaml
apiVersion: argoproj.io/v1alpha1
kind: WorkflowTemplate
@@ -165,8 +165,10 @@ spec:
name: hello-world-template-global-arg
template: hello-world
```
-2. When working with local parameters, the values of local parameters must be supplied at the template definition inside
-the `WorkflowTemplate`. Below is a working example:
+
+- When working with local parameters, the values of local parameters must be supplied at the template definition inside
+the `WorkflowTemplate`. Below is a working example:
+
```yaml
apiVersion: argoproj.io/v1alpha1
kind: WorkflowTemplate
@@ -205,6 +207,7 @@ You can reference `templates` from another `WorkflowTemplates` (see the [differe
Just as how you reference other `templates` within the same `Workflow`, you should do so from a `steps` or `dag` template.
Here is an example from a `steps` template:
+
```yaml
apiVersion: argoproj.io/v1alpha1
kind: Workflow
@@ -226,6 +229,7 @@ spec:
```
You can also do so similarly with a `dag` template:
+
```yaml
apiVersion: argoproj.io/v1alpha1
kind: Workflow
@@ -252,6 +256,7 @@ This includes both using `template` and `templateRef`.
This behavior is deprecated, no longer supported, and will be removed in a future version.
Here is an example of a **deprecated** reference that **should not be used**:
+
```yaml
apiVersion: argoproj.io/v1alpha1
kind: Workflow
@@ -278,9 +283,12 @@ to pass in "live" arguments and reference other templates (those other templates
This behavior has been problematic and dangerous. It causes confusion and has design inconsistencies.
> 2.9 and after
-#### Create `Workflow` from `WorkflowTemplate` Spec
-You can create `Workflow` from `WorkflowTemplate` spec using `workflowTemplateRef`. If you pass the arguments to created `Workflow`, it will be merged with WorkflowTemplate arguments.
+
+### Create `Workflow` from `WorkflowTemplate` Spec
+
+You can create `Workflow` from `WorkflowTemplate` spec using `workflowTemplateRef`. If you pass the arguments to created `Workflow`, it will be merged with workflow template arguments.
Here is an example for referring `WorkflowTemplate` as Workflow with passing `entrypoint` and `Workflow Arguments` to `WorkflowTemplate`
+
```yaml
apiVersion: argoproj.io/v1alpha1
kind: Workflow
@@ -297,6 +305,7 @@ spec:
```
Here is an example of a referring `WorkflowTemplate` as Workflow and using `WorkflowTemplates`'s `entrypoint` and `Workflow Arguments`
+
```yaml
apiVersion: argoproj.io/v1alpha1
kind: Workflow
@@ -314,13 +323,13 @@ spec:
You can create some example templates as follows:
-```
+```bash
argo template create https://raw.githubusercontent.com/argoproj/argo-workflows/master/examples/workflow-template/templates.yaml
```
Then submit a workflow using one of those templates:
-```
+```bash
argo submit https://raw.githubusercontent.com/argoproj/argo-workflows/master/examples/workflow-template/hello-world.yaml
```
@@ -328,18 +337,16 @@ argo submit https://raw.githubusercontent.com/argoproj/argo-workflows/master/exa
Then submit a `WorkflowTemplate` as a `Workflow`:
-```sh
+```bash
argo submit --from workflowtemplate/workflow-template-submittable
```
If you need to submit a `WorkflowTemplate` as a `Workflow` with parameters:
-```sh
+```bash
argo submit --from workflowtemplate/workflow-template-submittable -p param1=value1
```
-
-
### `kubectl`
Using `kubectl apply -f` and `kubectl get wftmpl`
diff --git a/errors/errors.go b/errors/errors.go
index 2b146e8c2743..957f4b1319fc 100644
--- a/errors/errors.go
+++ b/errors/errors.go
@@ -4,6 +4,7 @@ import (
"encoding/json"
"errors"
"fmt"
+ "net/http"
)
// Externally visible error codes
@@ -22,6 +23,7 @@ const (
type ArgoError interface {
Error() string
Code() string
+ HTTPCode() int
JSON() []byte
}
@@ -138,6 +140,25 @@ func (e argoerr) JSON() []byte {
return j
}
+func (e argoerr) HTTPCode() int {
+ switch e.Code() {
+ case CodeUnauthorized:
+ return http.StatusUnauthorized
+ case CodeForbidden:
+ return http.StatusForbidden
+ case CodeNotFound:
+ return http.StatusNotFound
+ case CodeBadRequest:
+ return http.StatusBadRequest
+ case CodeNotImplemented:
+ return http.StatusNotImplemented
+ case CodeTimeout, CodeInternal:
+ return http.StatusInternalServerError
+ default:
+ return http.StatusInternalServerError
+ }
+}
+
// IsCode is a helper to determine if the error is of a specific code
func IsCode(code string, err error) bool {
if argoErr, ok := err.(argoerr); ok {
diff --git a/examples/README.md b/examples/README.md
index 6a8dbaf5032b..8cb416a8781b 100644
--- a/examples/README.md
+++ b/examples/README.md
@@ -1,1563 +1,3 @@
# Documentation by Example
-## Welcome!
-
-Argo is an open source project that provides container-native workflows for Kubernetes. Each step in an Argo workflow is defined as a container.
-
-Argo is implemented as a Kubernetes CRD (Custom Resource Definition). As a result, Argo workflows can be managed using `kubectl` and natively integrates with other Kubernetes services such as volumes, secrets, and RBAC. The new Argo software is light-weight and installs in under a minute, and provides complete workflow features including parameter substitution, artifacts, fixtures, loops and recursive workflows.
-
-Many of the Argo examples used in this walkthrough are available in the [`/examples` directory](https://github.com/argoproj/argo-workflows/tree/master/examples) on GitHub. If you like this project, please give us a star!
-
-For a complete description of the Argo workflow spec, please refer to [the spec documentation](https://argoproj.github.io/argo-workflows/fields/#workflowspec).
-
-## Table of Contents
-
-1. [Argo CLI](#argo-cli)
-1. [Hello World!](#hello-world)
-1. [Parameters](#parameters)
-1. [Steps](#steps)
-1. [DAG](#dag)
-1. [Artifacts](#artifacts)
-1. [The Structure of Workflow Specs](#the-structure-of-workflow-specs)
-1. [Secrets](#secrets)
-1. [Scripts & Results](#scripts--results)
-1. [Output Parameters](#output-parameters)
-1. [Loops](#loops)
-1. [Conditionals](#conditionals)
-1. [Retrying Failed or Errored Steps](#retrying-failed-or-errored-steps)
-1. [Recursion](#recursion)
-1. [Exit Handlers](#exit-handlers)
-1. [Timeouts](#timeouts)
-1. [Volumes](#volumes)
-1. [Suspending](#suspending)
-1. [Daemon Containers](#daemon-containers)
-1. [Sidecars](#sidecars)
-1. [Hardwired Artifacts](#hardwired-artifacts)
-1. [Kubernetes Resources](#kubernetes-resources)
-1. [Docker-in-Docker Using Sidecars](#docker-in-docker-using-sidecars)
-1. [Custom Template Variable Reference](#custom-template-variable-reference)
-1. [Continuous Integration Example](#continuous-integration-example)
-
-## Argo CLI
-
-In case you want to follow along with this walkthrough, here's a quick overview of the most useful argo command line interface (CLI) commands.
-
-```sh
-argo submit hello-world.yaml # submit a workflow spec to Kubernetes
-argo list # list current workflows
-argo get hello-world-xxx # get info about a specific workflow
-argo logs hello-world-xxx # print the logs from a workflow
-argo delete hello-world-xxx # delete workflow
-```
-
-You can also run workflow specs directly using `kubectl` but the Argo CLI provides syntax checking, nicer output, and requires less typing.
-
-```sh
-kubectl create -f hello-world.yaml
-kubectl get wf
-kubectl get wf hello-world-xxx
-kubectl get po --selector=workflows.argoproj.io/workflow=hello-world-xxx --show-all # similar to argo
-kubectl logs hello-world-xxx-yyy -c main
-kubectl delete wf hello-world-xxx
-```
-
-## Hello World!
-
-Let's start by creating a very simple workflow template to echo "hello world" using the docker/whalesay container image from DockerHub.
-
-You can run this directly from your shell with a simple docker command:
-
-```sh
-$ docker run docker/whalesay cowsay "hello world"
- _____________
-< hello world >
- -------------
- \
- \
- \
- ## .
- ## ## ## ==
- ## ## ## ## ===
- /""""""""""""""""___/ ===
- ~~~ {~~ ~~~~ ~~~ ~~~~ ~~ ~ / ===- ~~~
- \______ o __/
- \ \ __/
- \____\______/
-
-
-Hello from Docker!
-This message shows that your installation appears to be working correctly.
-```
-
-Below, we run the same container on a Kubernetes cluster using an Argo workflow template.
-Be sure to read the comments as they provide useful explanations.
-
-```yaml
-apiVersion: argoproj.io/v1alpha1
-kind: Workflow # new type of k8s spec
-metadata:
- generateName: hello-world- # name of the workflow spec
-spec:
- entrypoint: whalesay # invoke the whalesay template
- templates:
- - name: whalesay # name of the template
- container:
- image: docker/whalesay
- command: [cowsay]
- args: ["hello world"]
- resources: # limit the resources
- limits:
- memory: 32Mi
- cpu: 100m
-```
-
-Argo adds a new `kind` of Kubernetes spec called a `Workflow`. The above spec contains a single `template` called `whalesay` which runs the `docker/whalesay` container and invokes `cowsay "hello world"`. The `whalesay` template is the `entrypoint` for the spec. The entrypoint specifies the initial template that should be invoked when the workflow spec is executed by Kubernetes. Being able to specify the entrypoint is more useful when there is more than one template defined in the Kubernetes workflow spec. :-)
-
-## Parameters
-
-Let's look at a slightly more complex workflow spec with parameters.
-
-```yaml
-apiVersion: argoproj.io/v1alpha1
-kind: Workflow
-metadata:
- generateName: hello-world-parameters-
-spec:
- # invoke the whalesay template with
- # "hello world" as the argument
- # to the message parameter
- entrypoint: whalesay
- arguments:
- parameters:
- - name: message
- value: hello world
-
- templates:
- - name: whalesay
- inputs:
- parameters:
- - name: message # parameter declaration
- container:
- # run cowsay with that message input parameter as args
- image: docker/whalesay
- command: [cowsay]
- args: ["{{inputs.parameters.message}}"]
-```
-
-This time, the `whalesay` template takes an input parameter named `message` that is passed as the `args` to the `cowsay` command. In order to reference parameters (e.g., ``"{{inputs.parameters.message}}"``), the parameters must be enclosed in double quotes to escape the curly braces in YAML.
-
-The argo CLI provides a convenient way to override parameters used to invoke the entrypoint. For example, the following command would bind the `message` parameter to "goodbye world" instead of the default "hello world".
-
-```sh
-argo submit arguments-parameters.yaml -p message="goodbye world"
-```
-
-In case of multiple parameters that can be overriten, the argo CLI provides a command to load parameters files in YAML or JSON format. Here is an example of that kind of parameter file:
-
-```yaml
-message: goodbye world
-```
-
-To run use following command:
-
-```sh
-argo submit arguments-parameters.yaml --parameter-file params.yaml
-```
-
-Command-line parameters can also be used to override the default entrypoint and invoke any template in the workflow spec. For example, if you add a new version of the `whalesay` template called `whalesay-caps` but you don't want to change the default entrypoint, you can invoke this from the command line as follows:
-
-```sh
-argo submit arguments-parameters.yaml --entrypoint whalesay-caps
-```
-
-By using a combination of the `--entrypoint` and `-p` parameters, you can call any template in the workflow spec with any parameter that you like.
-
-The values set in the `spec.arguments.parameters` are globally scoped and can be accessed via `{{workflow.parameters.parameter_name}}`. This can be useful to pass information to multiple steps in a workflow. For example, if you wanted to run your workflows with different logging levels that are set in the environment of each container, you could have a YAML file similar to this one:
-
-```yaml
-apiVersion: argoproj.io/v1alpha1
-kind: Workflow
-metadata:
- generateName: global-parameters-
-spec:
- entrypoint: A
- arguments:
- parameters:
- - name: log-level
- value: INFO
-
- templates:
- - name: A
- container:
- image: containerA
- env:
- - name: LOG_LEVEL
- value: "{{workflow.parameters.log-level}}"
- command: [runA]
- - name: B
- container:
- image: containerB
- env:
- - name: LOG_LEVEL
- value: "{{workflow.parameters.log-level}}"
- command: [runB]
-```
-
-In this workflow, both steps `A` and `B` would have the same log-level set to `INFO` and can easily be changed between workflow submissions using the `-p` flag.
-
-## Steps
-
-In this example, we'll see how to create multi-step workflows, how to define more than one template in a workflow spec, and how to create nested workflows. Be sure to read the comments as they provide useful explanations.
-
-```yaml
-apiVersion: argoproj.io/v1alpha1
-kind: Workflow
-metadata:
- generateName: steps-
-spec:
- entrypoint: hello-hello-hello
-
- # This spec contains two templates: hello-hello-hello and whalesay
- templates:
- - name: hello-hello-hello
- # Instead of just running a container
- # This template has a sequence of steps
- steps:
- - - name: hello1 # hello1 is run before the following steps
- template: whalesay
- arguments:
- parameters:
- - name: message
- value: "hello1"
- - - name: hello2a # double dash => run after previous step
- template: whalesay
- arguments:
- parameters:
- - name: message
- value: "hello2a"
- - name: hello2b # single dash => run in parallel with previous step
- template: whalesay
- arguments:
- parameters:
- - name: message
- value: "hello2b"
-
- # This is the same template as from the previous example
- - name: whalesay
- inputs:
- parameters:
- - name: message
- container:
- image: docker/whalesay
- command: [cowsay]
- args: ["{{inputs.parameters.message}}"]
-```
-
-The above workflow spec prints three different flavors of "hello". The `hello-hello-hello` template consists of three `steps`. The first step named `hello1` will be run in sequence whereas the next two steps named `hello2a` and `hello2b` will be run in parallel with each other. Using the argo CLI command, we can graphically display the execution history of this workflow spec, which shows that the steps named `hello2a` and `hello2b` ran in parallel with each other.
-
-```sh
-STEP TEMPLATE PODNAME DURATION MESSAGE
- ✔ steps-z2zdn hello-hello-hello
- ├───✔ hello1 whalesay steps-z2zdn-27420706 2s
- └─┬─✔ hello2a whalesay steps-z2zdn-2006760091 3s
- └─✔ hello2b whalesay steps-z2zdn-2023537710 3s
-```
-
-## DAG
-
-As an alternative to specifying sequences of steps, you can define the workflow as a directed-acyclic graph (DAG) by specifying the dependencies of each task. This can be simpler to maintain for complex workflows and allows for maximum parallelism when running tasks.
-
-In the following workflow, step `A` runs first, as it has no dependencies. Once `A` has finished, steps `B` and `C` run in parallel. Finally, once `B` and `C` have completed, step `D` can run.
-
-```yaml
-apiVersion: argoproj.io/v1alpha1
-kind: Workflow
-metadata:
- generateName: dag-diamond-
-spec:
- entrypoint: diamond
- templates:
- - name: echo
- inputs:
- parameters:
- - name: message
- container:
- image: alpine:3.7
- command: [echo, "{{inputs.parameters.message}}"]
- - name: diamond
- dag:
- tasks:
- - name: A
- template: echo
- arguments:
- parameters: [{name: message, value: A}]
- - name: B
- dependencies: [A]
- template: echo
- arguments:
- parameters: [{name: message, value: B}]
- - name: C
- dependencies: [A]
- template: echo
- arguments:
- parameters: [{name: message, value: C}]
- - name: D
- dependencies: [B, C]
- template: echo
- arguments:
- parameters: [{name: message, value: D}]
-```
-
-The dependency graph may have [multiple roots](./dag-multiroot.yaml). The templates called from a DAG or steps template can themselves be DAG or steps templates. This can allow for complex workflows to be split into manageable pieces.
-
-The DAG logic has a built-in `fail fast` feature to stop scheduling new steps, as soon as it detects that one of the DAG nodes is failed. Then it waits until all DAG nodes are completed before failing the DAG itself.
-The [FailFast](./dag-disable-failFast.yaml) flag default is `true`, if set to `false`, it will allow a DAG to run all branches of the DAG to completion (either success or failure), regardless of the failed outcomes of branches in the DAG. More info and example about this feature at [here](https://github.com/argoproj/argo-workflows/issues/1442).
-## Artifacts
-
-**Note:**
-You will need to configure an artifact repository to run this example.
-[Configuring an artifact repository here](https://argoproj.github.io/argo-workflows/configure-artifact-repository/).
-
-When running workflows, it is very common to have steps that generate or consume artifacts. Often, the output artifacts of one step may be used as input artifacts to a subsequent step.
-
-The below workflow spec consists of two steps that run in sequence. The first step named `generate-artifact` will generate an artifact using the `whalesay` template that will be consumed by the second step named `print-message` that then consumes the generated artifact.
-
-```yaml
-apiVersion: argoproj.io/v1alpha1
-kind: Workflow
-metadata:
- generateName: artifact-passing-
-spec:
- entrypoint: artifact-example
- templates:
- - name: artifact-example
- steps:
- - - name: generate-artifact
- template: whalesay
- - - name: consume-artifact
- template: print-message
- arguments:
- artifacts:
- # bind message to the hello-art artifact
- # generated by the generate-artifact step
- - name: message
- from: "{{steps.generate-artifact.outputs.artifacts.hello-art}}"
-
- - name: whalesay
- container:
- image: docker/whalesay:latest
- command: [sh, -c]
- args: ["cowsay hello world | tee /tmp/hello_world.txt"]
- outputs:
- artifacts:
- # generate hello-art artifact from /tmp/hello_world.txt
- # artifacts can be directories as well as files
- - name: hello-art
- path: /tmp/hello_world.txt
-
- - name: print-message
- inputs:
- artifacts:
- # unpack the message input artifact
- # and put it at /tmp/message
- - name: message
- path: /tmp/message
- container:
- image: alpine:latest
- command: [sh, -c]
- args: ["cat /tmp/message"]
-```
-
-The `whalesay` template uses the `cowsay` command to generate a file named `/tmp/hello-world.txt`. It then `outputs` this file as an artifact named `hello-art`. In general, the artifact's `path` may be a directory rather than just a file. The `print-message` template takes an input artifact named `message`, unpacks it at the `path` named `/tmp/message` and then prints the contents of `/tmp/message` using the `cat` command.
-The `artifact-example` template passes the `hello-art` artifact generated as an output of the `generate-artifact` step as the `message` input artifact to the `print-message` step. DAG templates use the tasks prefix to refer to another task, for example `{{tasks.generate-artifact.outputs.artifacts.hello-art}}`.
-
-Artifacts are packaged as Tarballs and gzipped by default. You may customize this behavior by specifying an archive strategy, using the `archive` field. For example:
-
-```yaml
-<... snipped ...>
- outputs:
- artifacts:
- # default behavior - tar+gzip default compression.
- - name: hello-art-1
- path: /tmp/hello_world.txt
-
- # disable archiving entirely - upload the file / directory as is.
- # this is useful when the container layout matches the desired target repository layout.
- - name: hello-art-2
- path: /tmp/hello_world.txt
- archive:
- none: {}
-
- # customize the compression behavior (disabling it here).
- # this is useful for files with varying compression benefits,
- # e.g. disabling compression for a cached build workspace and large binaries,
- # or increasing compression for "perfect" textual data - like a json/xml export of a large database.
- - name: hello-art-3
- path: /tmp/hello_world.txt
- archive:
- tar:
- # no compression (also accepts the standard gzip 1 to 9 values)
- compressionLevel: 0
-<... snipped ...>
-```
-
-## The Structure of Workflow Specs
-
-We now know enough about the basic components of a workflow spec to review its basic structure:
-
-- Kubernetes header including metadata
-- Spec body
- - Entrypoint invocation with optionally arguments
- - List of template definitions
-
-- For each template definition
- - Name of the template
- - Optionally a list of inputs
- - Optionally a list of outputs
- - Container invocation (leaf template) or a list of steps
- - For each step, a template invocation
-
-To summarize, workflow specs are composed of a set of Argo templates where each template consists of an optional input section, an optional output section and either a container invocation or a list of steps where each step invokes another template.
-
-Note that the container section of the workflow spec will accept the same options as the container section of a pod spec, including but not limited to environment variables, secrets, and volume mounts. Similarly, for volume claims and volumes.
-
-## Secrets
-
-Argo supports the same secrets syntax and mechanisms as Kubernetes Pod specs, which allows access to secrets as environment variables or volume mounts. See the [Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/secret/) for more information.
-
-```yaml
-# To run this example, first create the secret by running:
-# kubectl create secret generic my-secret --from-literal=mypassword=S00perS3cretPa55word
-apiVersion: argoproj.io/v1alpha1
-kind: Workflow
-metadata:
- generateName: secret-example-
-spec:
- entrypoint: whalesay
- # To access secrets as files, add a volume entry in spec.volumes[] and
- # then in the container template spec, add a mount using volumeMounts.
- volumes:
- - name: my-secret-vol
- secret:
- secretName: my-secret # name of an existing k8s secret
- templates:
- - name: whalesay
- container:
- image: alpine:3.7
- command: [sh, -c]
- args: ['
- echo "secret from env: $MYSECRETPASSWORD";
- echo "secret from file: `cat /secret/mountpath/mypassword`"
- ']
- # To access secrets as environment variables, use the k8s valueFrom and
- # secretKeyRef constructs.
- env:
- - name: MYSECRETPASSWORD # name of env var
- valueFrom:
- secretKeyRef:
- name: my-secret # name of an existing k8s secret
- key: mypassword # 'key' subcomponent of the secret
- volumeMounts:
- - name: my-secret-vol # mount file containing secret at /secret/mountpath
- mountPath: "/secret/mountpath"
-```
-
-## Scripts & Results
-
-Often, we just want a template that executes a script specified as a here-script (also known as a `here document`) in the workflow spec. This example shows how to do that:
-
-```yaml
-apiVersion: argoproj.io/v1alpha1
-kind: Workflow
-metadata:
- generateName: scripts-bash-
-spec:
- entrypoint: bash-script-example
- templates:
- - name: bash-script-example
- steps:
- - - name: generate
- template: gen-random-int-bash
- - - name: print
- template: print-message
- arguments:
- parameters:
- - name: message
- value: "{{steps.generate.outputs.result}}" # The result of the here-script
-
- - name: gen-random-int-bash
- script:
- image: debian:9.4
- command: [bash]
- source: | # Contents of the here-script
- cat /dev/urandom | od -N2 -An -i | awk -v f=1 -v r=100 '{printf "%i\n", f + r * $1 / 65536}'
-
- - name: gen-random-int-python
- script:
- image: python:alpine3.6
- command: [python]
- source: |
- import random
- i = random.randint(1, 100)
- print(i)
-
- - name: gen-random-int-javascript
- script:
- image: node:9.1-alpine
- command: [node]
- source: |
- var rand = Math.floor(Math.random() * 100);
- console.log(rand);
-
- - name: print-message
- inputs:
- parameters:
- - name: message
- container:
- image: alpine:latest
- command: [sh, -c]
- args: ["echo result was: {{inputs.parameters.message}}"]
-```
-
-The `script` keyword allows the specification of the script body using the `source` tag. This creates a temporary file containing the script body and then passes the name of the temporary file as the final parameter to `command`, which should be an interpreter that executes the script body.
-
-The use of the `script` feature also assigns the standard output of running the script to a special output parameter named `result`. This allows you to use the result of running the script itself in the rest of the workflow spec. In this example, the result is simply echoed by the print-message template.
-
-## Output Parameters
-
-Output parameters provide a general mechanism to use the result of a step as a parameter rather than as an artifact. This allows you to use the result from any type of step, not just a `script`, for conditional tests, loops, and arguments. Output parameters work similarly to `script result` except that the value of the output parameter is set to the contents of a generated file rather than the contents of `stdout`.
-
-```yaml
-apiVersion: argoproj.io/v1alpha1
-kind: Workflow
-metadata:
- generateName: output-parameter-
-spec:
- entrypoint: output-parameter
- templates:
- - name: output-parameter
- steps:
- - - name: generate-parameter
- template: whalesay
- - - name: consume-parameter
- template: print-message
- arguments:
- parameters:
- # Pass the hello-param output from the generate-parameter step as the message input to print-message
- - name: message
- value: "{{steps.generate-parameter.outputs.parameters.hello-param}}"
-
- - name: whalesay
- container:
- image: docker/whalesay:latest
- command: [sh, -c]
- args: ["echo -n hello world > /tmp/hello_world.txt"] # generate the content of hello_world.txt
- outputs:
- parameters:
- - name: hello-param # name of output parameter
- valueFrom:
- path: /tmp/hello_world.txt # set the value of hello-param to the contents of this hello-world.txt
-
- - name: print-message
- inputs:
- parameters:
- - name: message
- container:
- image: docker/whalesay:latest
- command: [cowsay]
- args: ["{{inputs.parameters.message}}"]
-```
-
-DAG templates use the tasks prefix to refer to another task, for example `{{tasks.generate-parameter.outputs.parameters.hello-param}}`.
-
-## Loops
-
-When writing workflows, it is often very useful to be able to iterate over a set of inputs as shown in this example:
-
-```yaml
-apiVersion: argoproj.io/v1alpha1
-kind: Workflow
-metadata:
- generateName: loops-
-spec:
- entrypoint: loop-example
- templates:
- - name: loop-example
- steps:
- - - name: print-message
- template: whalesay
- arguments:
- parameters:
- - name: message
- value: "{{item}}"
- withItems: # invoke whalesay once for each item in parallel
- - hello world # item 1
- - goodbye world # item 2
-
- - name: whalesay
- inputs:
- parameters:
- - name: message
- container:
- image: docker/whalesay:latest
- command: [cowsay]
- args: ["{{inputs.parameters.message}}"]
-```
-
-We can also iterate over sets of items:
-
-```yaml
-apiVersion: argoproj.io/v1alpha1
-kind: Workflow
-metadata:
- generateName: loops-maps-
-spec:
- entrypoint: loop-map-example
- templates:
- - name: loop-map-example
- steps:
- - - name: test-linux
- template: cat-os-release
- arguments:
- parameters:
- - name: image
- value: "{{item.image}}"
- - name: tag
- value: "{{item.tag}}"
- withItems:
- - { image: 'debian', tag: '9.1' } #item set 1
- - { image: 'debian', tag: '8.9' } #item set 2
- - { image: 'alpine', tag: '3.6' } #item set 3
- - { image: 'ubuntu', tag: '17.10' } #item set 4
-
- - name: cat-os-release
- inputs:
- parameters:
- - name: image
- - name: tag
- container:
- image: "{{inputs.parameters.image}}:{{inputs.parameters.tag}}"
- command: [cat]
- args: [/etc/os-release]
-```
-
-We can pass lists of items as parameters:
-
-```yaml
-apiVersion: argoproj.io/v1alpha1
-kind: Workflow
-metadata:
- generateName: loops-param-arg-
-spec:
- entrypoint: loop-param-arg-example
- arguments:
- parameters:
- - name: os-list # a list of items
- value: |
- [
- { "image": "debian", "tag": "9.1" },
- { "image": "debian", "tag": "8.9" },
- { "image": "alpine", "tag": "3.6" },
- { "image": "ubuntu", "tag": "17.10" }
- ]
-
- templates:
- - name: loop-param-arg-example
- inputs:
- parameters:
- - name: os-list
- steps:
- - - name: test-linux
- template: cat-os-release
- arguments:
- parameters:
- - name: image
- value: "{{item.image}}"
- - name: tag
- value: "{{item.tag}}"
- withParam: "{{inputs.parameters.os-list}}" # parameter specifies the list to iterate over
-
- # This template is the same as in the previous example
- - name: cat-os-release
- inputs:
- parameters:
- - name: image
- - name: tag
- container:
- image: "{{inputs.parameters.image}}:{{inputs.parameters.tag}}"
- command: [cat]
- args: [/etc/os-release]
-```
-
-We can even dynamically generate the list of items to iterate over!
-
-```yaml
-apiVersion: argoproj.io/v1alpha1
-kind: Workflow
-metadata:
- generateName: loops-param-result-
-spec:
- entrypoint: loop-param-result-example
- templates:
- - name: loop-param-result-example
- steps:
- - - name: generate
- template: gen-number-list
- # Iterate over the list of numbers generated by the generate step above
- - - name: sleep
- template: sleep-n-sec
- arguments:
- parameters:
- - name: seconds
- value: "{{item}}"
- withParam: "{{steps.generate.outputs.result}}"
-
- # Generate a list of numbers in JSON format
- - name: gen-number-list
- script:
- image: python:alpine3.6
- command: [python]
- source: |
- import json
- import sys
- json.dump([i for i in range(20, 31)], sys.stdout)
-
- - name: sleep-n-sec
- inputs:
- parameters:
- - name: seconds
- container:
- image: alpine:latest
- command: [sh, -c]
- args: ["echo sleeping for {{inputs.parameters.seconds}} seconds; sleep {{inputs.parameters.seconds}}; echo done"]
-```
-
-## Conditionals
-
-We also support conditional execution. The syntax is implemented by [govaluate](https://github.com/Knetic/govaluate) which offers the support for complex syntax. See in the example:
-
-```yaml
-apiVersion: argoproj.io/v1alpha1
-kind: Workflow
-metadata:
- generateName: coinflip-
-spec:
- entrypoint: coinflip
- templates:
- - name: coinflip
- steps:
- # flip a coin
- - - name: flip-coin
- template: flip-coin
- # evaluate the result in parallel
- - - name: heads
- template: heads # call heads template if "heads"
- when: "{{steps.flip-coin.outputs.result}} == heads"
- - name: tails
- template: tails # call tails template if "tails"
- when: "{{steps.flip-coin.outputs.result}} == tails"
- - - name: flip-again
- template: flip-coin
- - - name: complex-condition
- template: heads-tails-or-twice-tails
- # call heads template if first flip was "heads" and second was "tails" OR both were "tails"
- when: >-
- ( {{steps.flip-coin.outputs.result}} == heads &&
- {{steps.flip-again.outputs.result}} == tails
- ) ||
- ( {{steps.flip-coin.outputs.result}} == tails &&
- {{steps.flip-again.outputs.result}} == tails )
- - name: heads-regex
- template: heads # call heads template if ~ "hea"
- when: "{{steps.flip-again.outputs.result}} =~ hea"
- - name: tails-regex
- template: tails # call heads template if ~ "tai"
- when: "{{steps.flip-again.outputs.result}} =~ tai"
-
- # Return heads or tails based on a random number
- - name: flip-coin
- script:
- image: python:alpine3.6
- command: [python]
- source: |
- import random
- result = "heads" if random.randint(0,1) == 0 else "tails"
- print(result)
-
- - name: heads
- container:
- image: alpine:3.6
- command: [sh, -c]
- args: ["echo \"it was heads\""]
-
- - name: tails
- container:
- image: alpine:3.6
- command: [sh, -c]
- args: ["echo \"it was tails\""]
-
- - name: heads-tails-or-twice-tails
- container:
- image: alpine:3.6
- command: [sh, -c]
- args: ["echo \"it was heads the first flip and tails the second. Or it was two times tails.\""]
-```
-
-!!! note
- If the parameter value contains quotes, it may invalidate the govaluate expression. To handle parameters with
- quotes, embed an [expr](https://github.com/antonmedv/expr) expression in the conditional. For example:
-
- ```yaml
- when: "{{=inputs.parameters['may-contain-quotes'] == 'example'}}"
- ```
-
-## Retrying Failed or Errored Steps
-
-You can specify a `retryStrategy` that will dictate how failed or errored steps are retried:
-
-```yaml
-# This example demonstrates the use of retry back offs
-apiVersion: argoproj.io/v1alpha1
-kind: Workflow
-metadata:
- generateName: retry-backoff-
-spec:
- entrypoint: retry-backoff
- templates:
- - name: retry-backoff
- retryStrategy:
- limit: 10
- retryPolicy: "Always"
- backoff:
- duration: "1" # Must be a string. Default unit is seconds. Could also be a Duration, e.g.: "2m", "6h", "1d"
- factor: 2
- maxDuration: "1m" # Must be a string. Default unit is seconds. Could also be a Duration, e.g.: "2m", "6h", "1d"
- affinity:
- nodeAntiAffinity: {}
- container:
- image: python:alpine3.6
- command: ["python", -c]
- # fail with a 66% probability
- args: ["import random; import sys; exit_code = random.choice([0, 1, 1]); sys.exit(exit_code)"]
-```
-
-* `limit` is the maximum number of times the container will be retried.
-* `retryPolicy` specifies if a container will be retried on failure, error, both, or only transient errors (e.g. i/o or TLS handshake timeout). "Always" retries on both errors and failures. Also available: "OnFailure" (default), "OnError", and "OnTransientError" (available after v3.0.0-rc2).
-* `backoff` is an exponential backoff
-* `nodeAntiAffinity` prevents running steps on the same host. Current implementation allows only empty `nodeAntiAffinity` (i.e. `nodeAntiAffinity: {}`) and by default it uses label `kubernetes.io/hostname` as the selector.
-
-Providing an empty `retryStrategy` (i.e. `retryStrategy: {}`) will cause a container to retry until completion.
-
-
-## Recursion
-
-Templates can recursively invoke each other! In this variation of the above coin-flip template, we continue to flip coins until it comes up heads.
-
-```yaml
-apiVersion: argoproj.io/v1alpha1
-kind: Workflow
-metadata:
- generateName: coinflip-recursive-
-spec:
- entrypoint: coinflip
- templates:
- - name: coinflip
- steps:
- # flip a coin
- - - name: flip-coin
- template: flip-coin
- # evaluate the result in parallel
- - - name: heads
- template: heads # call heads template if "heads"
- when: "{{steps.flip-coin.outputs.result}} == heads"
- - name: tails # keep flipping coins if "tails"
- template: coinflip
- when: "{{steps.flip-coin.outputs.result}} == tails"
-
- - name: flip-coin
- script:
- image: python:alpine3.6
- command: [python]
- source: |
- import random
- result = "heads" if random.randint(0,1) == 0 else "tails"
- print(result)
-
- - name: heads
- container:
- image: alpine:3.6
- command: [sh, -c]
- args: ["echo \"it was heads\""]
-```
-
-Here's the result of a couple of runs of coinflip for comparison.
-
-```sh
-argo get coinflip-recursive-tzcb5
-
-STEP PODNAME MESSAGE
- ✔ coinflip-recursive-vhph5
- ├───✔ flip-coin coinflip-recursive-vhph5-2123890397
- └─┬─✔ heads coinflip-recursive-vhph5-128690560
- └─○ tails
-
-STEP PODNAME MESSAGE
- ✔ coinflip-recursive-tzcb5
- ├───✔ flip-coin coinflip-recursive-tzcb5-322836820
- └─┬─○ heads
- └─✔ tails
- ├───✔ flip-coin coinflip-recursive-tzcb5-1863890320
- └─┬─○ heads
- └─✔ tails
- ├───✔ flip-coin coinflip-recursive-tzcb5-1768147140
- └─┬─○ heads
- └─✔ tails
- ├───✔ flip-coin coinflip-recursive-tzcb5-4080411136
- └─┬─✔ heads coinflip-recursive-tzcb5-4080323273
- └─○ tails
-```
-
-In the first run, the coin immediately comes up heads and we stop. In the second run, the coin comes up tail three times before it finally comes up heads and we stop.
-
-## Exit handlers
-
-An exit handler is a template that *always* executes, irrespective of success or failure, at the end of the workflow.
-
-Some common use cases of exit handlers are:
-
-- cleaning up after a workflow runs
-- sending notifications of workflow status (e.g., e-mail/Slack)
-- posting the pass/fail status to a webhook result (e.g. GitHub build result)
-- resubmitting or submitting another workflow
-
-```yaml
-apiVersion: argoproj.io/v1alpha1
-kind: Workflow
-metadata:
- generateName: exit-handlers-
-spec:
- entrypoint: intentional-fail
- onExit: exit-handler # invoke exit-handler template at end of the workflow
- templates:
- # primary workflow template
- - name: intentional-fail
- container:
- image: alpine:latest
- command: [sh, -c]
- args: ["echo intentional failure; exit 1"]
-
- # Exit handler templates
- # After the completion of the entrypoint template, the status of the
- # workflow is made available in the global variable {{workflow.status}}.
- # {{workflow.status}} will be one of: Succeeded, Failed, Error
- - name: exit-handler
- steps:
- - - name: notify
- template: send-email
- - name: celebrate
- template: celebrate
- when: "{{workflow.status}} == Succeeded"
- - name: cry
- template: cry
- when: "{{workflow.status}} != Succeeded"
- - name: send-email
- container:
- image: alpine:latest
- command: [sh, -c]
- args: ["echo send e-mail: {{workflow.name}} {{workflow.status}} {{workflow.duration}}"]
- - name: celebrate
- container:
- image: alpine:latest
- command: [sh, -c]
- args: ["echo hooray!"]
- - name: cry
- container:
- image: alpine:latest
- command: [sh, -c]
- args: ["echo boohoo!"]
-```
-
-## Timeouts
-
-To limit the elapsed time for a workflow, you can set the variable `activeDeadlineSeconds`.
-
-```yaml
-# To enforce a timeout for a container template, specify a value for activeDeadlineSeconds.
-apiVersion: argoproj.io/v1alpha1
-kind: Workflow
-metadata:
- generateName: timeouts-
-spec:
- entrypoint: sleep
- templates:
- - name: sleep
- container:
- image: alpine:latest
- command: [sh, -c]
- args: ["echo sleeping for 1m; sleep 60; echo done"]
- activeDeadlineSeconds: 10 # terminate container template after 10 seconds
-```
-
-## Volumes
-
-The following example dynamically creates a volume and then uses the volume in a two step workflow.
-
-```yaml
-apiVersion: argoproj.io/v1alpha1
-kind: Workflow
-metadata:
- generateName: volumes-pvc-
-spec:
- entrypoint: volumes-pvc-example
- volumeClaimTemplates: # define volume, same syntax as k8s Pod spec
- - metadata:
- name: workdir # name of volume claim
- spec:
- accessModes: [ "ReadWriteOnce" ]
- resources:
- requests:
- storage: 1Gi # Gi => 1024 * 1024 * 1024
-
- templates:
- - name: volumes-pvc-example
- steps:
- - - name: generate
- template: whalesay
- - - name: print
- template: print-message
-
- - name: whalesay
- container:
- image: docker/whalesay:latest
- command: [sh, -c]
- args: ["echo generating message in volume; cowsay hello world | tee /mnt/vol/hello_world.txt"]
- # Mount workdir volume at /mnt/vol before invoking docker/whalesay
- volumeMounts: # same syntax as k8s Pod spec
- - name: workdir
- mountPath: /mnt/vol
-
- - name: print-message
- container:
- image: alpine:latest
- command: [sh, -c]
- args: ["echo getting message from volume; find /mnt/vol; cat /mnt/vol/hello_world.txt"]
- # Mount workdir volume at /mnt/vol before invoking docker/whalesay
- volumeMounts: # same syntax as k8s Pod spec
- - name: workdir
- mountPath: /mnt/vol
-
-```
-
-Volumes are a very useful way to move large amounts of data from one step in a workflow to another. Depending on the system, some volumes may be accessible concurrently from multiple steps.
-
-In some cases, you want to access an already existing volume rather than creating/destroying one dynamically.
-
-```yaml
-# Define Kubernetes PVC
-kind: PersistentVolumeClaim
-apiVersion: v1
-metadata:
- name: my-existing-volume
-spec:
- accessModes: [ "ReadWriteOnce" ]
- resources:
- requests:
- storage: 1Gi
-
----
-apiVersion: argoproj.io/v1alpha1
-kind: Workflow
-metadata:
- generateName: volumes-existing-
-spec:
- entrypoint: volumes-existing-example
- volumes:
- # Pass my-existing-volume as an argument to the volumes-existing-example template
- # Same syntax as k8s Pod spec
- - name: workdir
- persistentVolumeClaim:
- claimName: my-existing-volume
-
- templates:
- - name: volumes-existing-example
- steps:
- - - name: generate
- template: whalesay
- - - name: print
- template: print-message
-
- - name: whalesay
- container:
- image: docker/whalesay:latest
- command: [sh, -c]
- args: ["echo generating message in volume; cowsay hello world | tee /mnt/vol/hello_world.txt"]
- volumeMounts:
- - name: workdir
- mountPath: /mnt/vol
-
- - name: print-message
- container:
- image: alpine:latest
- command: [sh, -c]
- args: ["echo getting message from volume; find /mnt/vol; cat /mnt/vol/hello_world.txt"]
- volumeMounts:
- - name: workdir
- mountPath: /mnt/vol
-```
-
-It's also possible to declare existing volumes at the template level, instead of the workflow level.
-This can be useful workflows that generate volumes using a `resource` step.
-
-```yaml
-apiVersion: argoproj.io/v1alpha1
-kind: Workflow
-metadata:
- generateName: template-level-volume-
-spec:
- entrypoint: generate-and-use-volume
- templates:
- - name: generate-and-use-volume
- steps:
- - - name: generate-volume
- template: generate-volume
- arguments:
- parameters:
- - name: pvc-size
- # In a real-world example, this could be generated by a previous workflow step.
- value: '1Gi'
- - - name: generate
- template: whalesay
- arguments:
- parameters:
- - name: pvc-name
- value: '{{steps.generate-volume.outputs.parameters.pvc-name}}'
- - - name: print
- template: print-message
- arguments:
- parameters:
- - name: pvc-name
- value: '{{steps.generate-volume.outputs.parameters.pvc-name}}'
-
- - name: generate-volume
- inputs:
- parameters:
- - name: pvc-size
- resource:
- action: create
- setOwnerReference: true
- manifest: |
- apiVersion: v1
- kind: PersistentVolumeClaim
- metadata:
- generateName: pvc-example-
- spec:
- accessModes: ['ReadWriteOnce', 'ReadOnlyMany']
- resources:
- requests:
- storage: '{{inputs.parameters.pvc-size}}'
- outputs:
- parameters:
- - name: pvc-name
- valueFrom:
- jsonPath: '{.metadata.name}'
-
- - name: whalesay
- inputs:
- parameters:
- - name: pvc-name
- volumes:
- - name: workdir
- persistentVolumeClaim:
- claimName: '{{inputs.parameters.pvc-name}}'
- container:
- image: docker/whalesay:latest
- command: [sh, -c]
- args: ["echo generating message in volume; cowsay hello world | tee /mnt/vol/hello_world.txt"]
- volumeMounts:
- - name: workdir
- mountPath: /mnt/vol
-
- - name: print-message
- inputs:
- parameters:
- - name: pvc-name
- volumes:
- - name: workdir
- persistentVolumeClaim:
- claimName: '{{inputs.parameters.pvc-name}}'
- container:
- image: alpine:latest
- command: [sh, -c]
- args: ["echo getting message from volume; find /mnt/vol; cat /mnt/vol/hello_world.txt"]
- volumeMounts:
- - name: workdir
- mountPath: /mnt/vol
-
-```
-
-## Suspending
-
-Workflows can be suspended by
-
-```sh
-argo suspend WORKFLOW
-```
-
-Or by specifying a `suspend` step on the workflow:
-
-```yaml
-apiVersion: argoproj.io/v1alpha1
-kind: Workflow
-metadata:
- generateName: suspend-template-
-spec:
- entrypoint: suspend
- templates:
- - name: suspend
- steps:
- - - name: build
- template: whalesay
- - - name: approve
- template: approve
- - - name: delay
- template: delay
- - - name: release
- template: whalesay
-
- - name: approve
- suspend: {}
-
- - name: delay
- suspend:
- duration: "20" # Must be a string. Default unit is seconds. Could also be a Duration, e.g.: "2m", "6h", "1d"
-
- - name: whalesay
- container:
- image: docker/whalesay
- command: [cowsay]
- args: ["hello world"]
-```
-
-Once suspended, a Workflow will not schedule any new steps until it is resumed. It can be resumed manually by
-```sh
-argo resume WORKFLOW
-```
-Or automatically with a `duration` limit as the example above.
-
-## Daemon Containers
-
-Argo workflows can start containers that run in the background (also known as `daemon containers`) while the workflow itself continues execution. Note that the daemons will be *automatically destroyed* when the workflow exits the template scope in which the daemon was invoked. Daemon containers are useful for starting up services to be tested or to be used in testing (e.g., fixtures). We also find it very useful when running large simulations to spin up a database as a daemon for collecting and organizing the results. The big advantage of daemons compared with sidecars is that their existence can persist across multiple steps or even the entire workflow.
-
-```yaml
-apiVersion: argoproj.io/v1alpha1
-kind: Workflow
-metadata:
- generateName: daemon-step-
-spec:
- entrypoint: daemon-example
- templates:
- - name: daemon-example
- steps:
- - - name: influx
- template: influxdb # start an influxdb as a daemon (see the influxdb template spec below)
-
- - - name: init-database # initialize influxdb
- template: influxdb-client
- arguments:
- parameters:
- - name: cmd
- value: curl -XPOST 'http://{{steps.influx.ip}}:8086/query' --data-urlencode "q=CREATE DATABASE mydb"
-
- - - name: producer-1 # add entries to influxdb
- template: influxdb-client
- arguments:
- parameters:
- - name: cmd
- value: for i in $(seq 1 20); do curl -XPOST 'http://{{steps.influx.ip}}:8086/write?db=mydb' -d "cpu,host=server01,region=uswest load=$i" ; sleep .5 ; done
- - name: producer-2 # add entries to influxdb
- template: influxdb-client
- arguments:
- parameters:
- - name: cmd
- value: for i in $(seq 1 20); do curl -XPOST 'http://{{steps.influx.ip}}:8086/write?db=mydb' -d "cpu,host=server02,region=uswest load=$((RANDOM % 100))" ; sleep .5 ; done
- - name: producer-3 # add entries to influxdb
- template: influxdb-client
- arguments:
- parameters:
- - name: cmd
- value: curl -XPOST 'http://{{steps.influx.ip}}:8086/write?db=mydb' -d 'cpu,host=server03,region=useast load=15.4'
-
- - - name: consumer # consume intries from influxdb
- template: influxdb-client
- arguments:
- parameters:
- - name: cmd
- value: curl --silent -G http://{{steps.influx.ip}}:8086/query?pretty=true --data-urlencode "db=mydb" --data-urlencode "q=SELECT * FROM cpu"
-
- - name: influxdb
- daemon: true # start influxdb as a daemon
- retryStrategy:
- limit: 10 # retry container if it fails
- container:
- image: influxdb:1.2
- readinessProbe: # wait for readinessProbe to succeed
- httpGet:
- path: /ping
- port: 8086
-
- - name: influxdb-client
- inputs:
- parameters:
- - name: cmd
- container:
- image: appropriate/curl:latest
- command: ["/bin/sh", "-c"]
- args: ["{{inputs.parameters.cmd}}"]
- resources:
- requests:
- memory: 32Mi
- cpu: 100m
-```
-
-Step templates use the `steps` prefix to refer to another step: for example `{{steps.influx.ip}}`. In DAG templates, the `tasks` prefix is used instead: for example `{{tasks.influx.ip}}`.
-
-## Sidecars
-
-A sidecar is another container that executes concurrently in the same pod as the main container and is useful in creating multi-container pods.
-
-```yaml
-apiVersion: argoproj.io/v1alpha1
-kind: Workflow
-metadata:
- generateName: sidecar-nginx-
-spec:
- entrypoint: sidecar-nginx-example
- templates:
- - name: sidecar-nginx-example
- container:
- image: appropriate/curl
- command: [sh, -c]
- # Try to read from nginx web server until it comes up
- args: ["until `curl -G 'http://127.0.0.1/' >& /tmp/out`; do echo sleep && sleep 1; done && cat /tmp/out"]
- # Create a simple nginx web server
- sidecars:
- - name: nginx
- image: nginx:1.13
-```
-
-In the above example, we create a sidecar container that runs nginx as a simple web server. The order in which containers come up is random, so in this example the main container polls the nginx container until it is ready to service requests. This is a good design pattern when designing multi-container systems: always wait for any services you need to come up before running your main code.
-
-## Hardwired Artifacts
-
-With Argo, you can use any container image that you like to generate any kind of artifact. In practice, however, we find certain types of artifacts are very common, so there is built-in support for git, http, gcs and s3 artifacts.
-
-```yaml
-apiVersion: argoproj.io/v1alpha1
-kind: Workflow
-metadata:
- generateName: hardwired-artifact-
-spec:
- entrypoint: hardwired-artifact
- templates:
- - name: hardwired-artifact
- inputs:
- artifacts:
- # Check out the master branch of the argo repo and place it at /src
- # revision can be anything that git checkout accepts: branch, commit, tag, etc.
- - name: argo-source
- path: /src
- git:
- repo: https://github.com/argoproj/argo-workflows.git
- revision: "master"
- # Download kubectl 1.8.0 and place it at /bin/kubectl
- - name: kubectl
- path: /bin/kubectl
- mode: 0755
- http:
- url: https://storage.googleapis.com/kubernetes-release/release/v1.8.0/bin/linux/amd64/kubectl
- # Copy an s3 compatible artifact repository bucket (such as AWS, GCS and Minio) and place it at /s3
- - name: objects
- path: /s3
- s3:
- endpoint: storage.googleapis.com
- bucket: my-bucket-name
- key: path/in/bucket
- accessKeySecret:
- name: my-s3-credentials
- key: accessKey
- secretKeySecret:
- name: my-s3-credentials
- key: secretKey
- container:
- image: debian
- command: [sh, -c]
- args: ["ls -l /src /bin/kubectl /s3"]
-```
-
-## Kubernetes Resources
-
-In many cases, you will want to manage Kubernetes resources from Argo workflows. The resource template allows you to create, delete or updated any type of Kubernetes resource.
-
-```yaml
-# in a workflow. The resource template type accepts any k8s manifest
-# (including CRDs) and can perform any kubectl action against it (e.g. create,
-# apply, delete, patch).
-apiVersion: argoproj.io/v1alpha1
-kind: Workflow
-metadata:
- generateName: k8s-jobs-
-spec:
- entrypoint: pi-tmpl
- templates:
- - name: pi-tmpl
- resource: # indicates that this is a resource template
- action: create # can be any kubectl action (e.g. create, delete, apply, patch)
- # The successCondition and failureCondition are optional expressions.
- # If failureCondition is true, the step is considered failed.
- # If successCondition is true, the step is considered successful.
- # They use kubernetes label selection syntax and can be applied against any field
- # of the resource (not just labels). Multiple AND conditions can be represented by comma
- # delimited expressions.
- # For more details: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
- successCondition: status.succeeded > 0
- failureCondition: status.failed > 3
- manifest: | #put your kubernetes spec here
- apiVersion: batch/v1
- kind: Job
- metadata:
- generateName: pi-job-
- spec:
- template:
- metadata:
- name: pi
- spec:
- containers:
- - name: pi
- image: perl
- command: ["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"]
- restartPolicy: Never
- backoffLimit: 4
-```
-
-**Note:**
-Currently only a single resource can be managed by a resource template so either a `generateName` or `name` must be provided in the resource's metadata.
-
-Resources created in this way are independent of the workflow. If you want the resource to be deleted when the workflow is deleted then you can use [Kubernetes garbage collection](https://kubernetes.io/docs/concepts/workloads/controllers/garbage-collection/) with the workflow resource as an owner reference ([example](./k8s-owner-reference.yaml)).
-
-You can also collect data about the resource in output parameters (see more at [k8s-jobs.yaml](./k8s-jobs.yaml))
-
-**Note:**
-When patching, the resource will accept another attribute, `mergeStrategy`, which can either be `strategic`, `merge`, or `json`. If this attribute is not supplied, it will default to `strategic`. Keep in mind that Custom Resources cannot be patched with `strategic`, so a different strategy must be chosen. For example, suppose you have the [CronTab CustomResourceDefinition](https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/#create-a-customresourcedefinition) defined, and the following instance of a CronTab:
-
-```yaml
-apiVersion: "stable.example.com/v1"
-kind: CronTab
-spec:
- cronSpec: "* * * * */5"
- image: my-awesome-cron-image
-```
-
-This Crontab can be modified using the following Argo Workflow:
-
-```yaml
-apiVersion: argoproj.io/v1alpha1
-kind: Workflow
-metadata:
- generateName: k8s-patch-
-spec:
- entrypoint: cront-tmpl
- templates:
- - name: cront-tmpl
- resource:
- action: patch
- mergeStrategy: merge # Must be one of [strategic merge json]
- manifest: |
- apiVersion: "stable.example.com/v1"
- kind: CronTab
- spec:
- cronSpec: "* * * * */10"
- image: my-awesome-cron-image
-```
-
-## Docker-in-Docker Using Sidecars
-
-An application of sidecars is to implement Docker-in-Docker (DinD). DinD is useful when you want to run Docker commands from inside a container. For example, you may want to build and push a container image from inside your build container. In the following example, we use the docker:dind container to run a Docker daemon in a sidecar and give the main container access to the daemon.
-
-```yaml
-apiVersion: argoproj.io/v1alpha1
-kind: Workflow
-metadata:
- generateName: sidecar-dind-
-spec:
- entrypoint: dind-sidecar-example
- templates:
- - name: dind-sidecar-example
- container:
- image: docker:19.03.13
- command: [sh, -c]
- args: ["until docker ps; do sleep 3; done; docker run --rm debian:latest cat /etc/os-release"]
- env:
- - name: DOCKER_HOST # the docker daemon can be access on the standard port on localhost
- value: 127.0.0.1
- sidecars:
- - name: dind
- image: docker:19.03.13-dind # Docker already provides an image for running a Docker daemon
- command: [dockerd-entrypoint.sh]
- env:
- - name: DOCKER_TLS_CERTDIR # Docker TLS env config
- value: ""
- securityContext:
- privileged: true # the Docker daemon can only run in a privileged container
- # mirrorVolumeMounts will mount the same volumes specified in the main container
- # to the sidecar (including artifacts), at the same mountPaths. This enables
- # dind daemon to (partially) see the same filesystem as the main container in
- # order to use features such as docker volume binding.
- mirrorVolumeMounts: true
-```
-
-## Custom Template Variable Reference
-
-In this example, we can see how we can use the other template language variable reference (E.g: Jinja) in Argo workflow template.
-Argo will validate and resolve only the variable that starts with Argo allowed prefix
-{***"item", "steps", "inputs", "outputs", "workflow", "tasks"***}
-
-```yaml
-apiVersion: argoproj.io/v1alpha1
-kind: Workflow
-metadata:
- generateName: custom-template-variable-
-spec:
- entrypoint: hello-hello-hello
-
- templates:
- - name: hello-hello-hello
- steps:
- - - name: hello1
- template: whalesay
- arguments:
- parameters: [{name: message, value: "hello1"}]
- - - name: hello2a
- template: whalesay
- arguments:
- parameters: [{name: message, value: "hello2a"}]
- - name: hello2b
- template: whalesay
- arguments:
- parameters: [{name: message, value: "hello2b"}]
-
- - name: whalesay
- inputs:
- parameters:
- - name: message
- container:
- image: docker/whalesay
- command: [cowsay]
- args: ["{{user.username}}"]
-
-```
-
-## Continuous Integration Example
-
-Continuous integration is a popular application for workflows. Currently, Argo does not provide event triggers for automatically kicking off your CI jobs, but we plan to do so in the near future. Until then, you can easily write a cron job that checks for new commits and kicks off the needed workflow, or use your existing Jenkins server to kick off the workflow.
-
-A good example of a CI workflow spec is provided at https://github.com/argoproj/argo-workflows/tree/master/examples/influxdb-ci.yaml. Because it just uses the concepts that we've already covered and is somewhat long, we don't go into details here.
+This has been moved to [the docs](https://argoproj.github.io/argo-workflows/walk-through/).
\ No newline at end of file
diff --git a/examples/artifact-gc-workflow.yaml b/examples/artifact-gc-workflow.yaml
new file mode 100644
index 000000000000..72b03788cefa
--- /dev/null
+++ b/examples/artifact-gc-workflow.yaml
@@ -0,0 +1,36 @@
+# This example shows how you can configure Artifact Garbage Collection for your Workflow.
+# Here there are two artifacts - one is automatically deleted when the Workflow completes, and the other
+# is deleted when the Workflow gets deleted.
+# In this case, "OnWorkflowDeletion" is defined for all artifacts by default, but the "on-completion" artifact
+# overrides that default strategy.
+apiVersion: argoproj.io/v1alpha1
+kind: Workflow
+metadata:
+ generateName: artifact-gc-
+spec:
+ entrypoint: main
+ artifactGC:
+ strategy: OnWorkflowDeletion # the overall strategy, which can be overridden
+ templates:
+ - name: main
+ container:
+ image: argoproj/argosay:v2
+ command:
+ - sh
+ - -c
+ args:
+ - |
+ echo "hello world" > /tmp/on-completion.txt
+ echo "hello world" > /tmp/on-deletion.txt
+ outputs:
+ artifacts:
+ - name: on-completion
+ path: /tmp/on-completion.txt
+ s3:
+ key: on-completion.txt
+ artifactGC:
+ strategy: OnWorkflowCompletion # overriding the default strategy for this artifact
+ - name: on-deletion
+ path: /tmp/on-deletion.txt
+ s3:
+ key: on-deletion.txt
diff --git a/examples/artifacts-workflowtemplate.yaml b/examples/artifacts-workflowtemplate.yaml
new file mode 100644
index 000000000000..580d207c82cc
--- /dev/null
+++ b/examples/artifacts-workflowtemplate.yaml
@@ -0,0 +1,135 @@
+apiVersion: argoproj.io/v1alpha1
+kind: WorkflowTemplate
+metadata:
+ name: artifacts
+ annotations:
+ workflows.argoproj.io/description: |
+ This example shows how to produce different types of artifact.
+spec:
+ entrypoint: main
+ templates:
+ - name: main
+ volumes:
+ - name: in
+ emptyDir: { }
+ - name: out
+ emptyDir: { }
+ inputs:
+ artifacts:
+ - name: temps
+ path: /in/annual.csv
+ http:
+ url: https://datahub.io/core/global-temp/r/annual.csv
+ containerSet:
+ volumeMounts:
+ - mountPath: /in
+ name: in
+ - mountPath: /out
+ name: out
+ containers:
+ - name: setup
+ image: argoproj/argosay:v2
+ command:
+ - sh
+ - -c
+ args:
+ - |
+ mkdir -p /out/assets
+ - name: gnuplot
+ image: remuslazar/gnuplot
+ dependencies:
+ - setup
+ args:
+ - -e
+ - |
+ set xlabel 'Year'; set ylabel 'Mean';
+ set grid;
+ set datafile separator ',';
+ set term png size 600,400;
+ set output '/out/assets/global-temp.png';
+ plot '/in/annual.csv' every 2::0 skip 1 using 2:3 title 'Global Temperature' with lines linewidth 2;
+ - name: main
+ image: argoproj/argosay:v2
+ dependencies:
+ - setup
+ command:
+ - sh
+ - -c
+ args:
+ - |
+ cowsay "hello world" > /out/hello.txt
+
+ cat > /out/hello.json < /out/assets/styles.css
+
+ cat > /out/index.html <
+
+
+
+
+