mirror of
https://github.com/argoproj/argo-cd.git
synced 2026-02-20 09:38:49 +01:00
Compare commits
1 Commits
commit-ser
...
temp-cherr
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
708fd99b9a |
5
.github/dependabot.yml
vendored
5
.github/dependabot.yml
vendored
@@ -31,11 +31,6 @@ updates:
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
ignore:
|
||||
# We use consistent go and node versions across a lot of different files, and updating via dependabot would cause
|
||||
# drift among those files, instead we let renovate bot handle them.
|
||||
- dependency-name: "library/golang"
|
||||
- dependency-name: "library/node"
|
||||
|
||||
- package-ecosystem: "docker"
|
||||
directory: "/test/container/"
|
||||
|
||||
49
.github/workflows/ci-build.yaml
vendored
49
.github/workflows/ci-build.yaml
vendored
@@ -13,8 +13,7 @@ on:
|
||||
|
||||
env:
|
||||
# Golang version to use across CI steps
|
||||
# renovate: datasource=golang-version packageName=golang
|
||||
GOLANG_VERSION: '1.23.2'
|
||||
GOLANG_VERSION: '1.22'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
@@ -32,7 +31,7 @@ jobs:
|
||||
docs: ${{ steps.filter.outputs.docs_any_changed }}
|
||||
steps:
|
||||
- uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
|
||||
- uses: tj-actions/changed-files@c3a1bb2c992d77180ae65be6ae6c166cf40f857c # v45.0.3
|
||||
- uses: tj-actions/changed-files@e9772d140489982e0e3704fea5ee93d536f1e275 # v45.0.1
|
||||
id: filter
|
||||
with:
|
||||
# Any file which is not under docs/, ui/ or is not a markdown file is counted as a backend file
|
||||
@@ -82,7 +81,7 @@ jobs:
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Restore go build cache
|
||||
uses: actions/cache@3624ceb22c1c5a301c8db4169662070a689d9ea8 # v4.1.1
|
||||
uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2
|
||||
with:
|
||||
path: ~/.cache/go-build
|
||||
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
|
||||
@@ -109,10 +108,9 @@ jobs:
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Run golangci-lint
|
||||
uses: golangci/golangci-lint-action@971e284b6050e8a5849b72094c50ab08da042db8 # v6.1.1
|
||||
uses: golangci/golangci-lint-action@aaa42aa0628b4ae2578232a66b541047968fac86 # v6.1.0
|
||||
with:
|
||||
# renovate: datasource=go packageName=github.com/golangci/golangci-lint versioning=regex:^v(?<major>\d+)\.(?<minor>\d+)\.(?<patch>\d+)?$
|
||||
version: v1.61.0
|
||||
version: v1.58.2
|
||||
args: --verbose
|
||||
|
||||
test-go:
|
||||
@@ -153,7 +151,7 @@ jobs:
|
||||
run: |
|
||||
echo "/usr/local/bin" >> $GITHUB_PATH
|
||||
- name: Restore go build cache
|
||||
uses: actions/cache@3624ceb22c1c5a301c8db4169662070a689d9ea8 # v4.1.1
|
||||
uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2
|
||||
with:
|
||||
path: ~/.cache/go-build
|
||||
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
|
||||
@@ -174,7 +172,7 @@ jobs:
|
||||
- name: Run all unit tests
|
||||
run: make test-local
|
||||
- name: Generate test results artifacts
|
||||
uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3
|
||||
uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0
|
||||
with:
|
||||
name: test-results
|
||||
path: test-results
|
||||
@@ -217,7 +215,7 @@ jobs:
|
||||
run: |
|
||||
echo "/usr/local/bin" >> $GITHUB_PATH
|
||||
- name: Restore go build cache
|
||||
uses: actions/cache@3624ceb22c1c5a301c8db4169662070a689d9ea8 # v4.1.1
|
||||
uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2
|
||||
with:
|
||||
path: ~/.cache/go-build
|
||||
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
|
||||
@@ -238,7 +236,7 @@ jobs:
|
||||
- name: Run all unit tests
|
||||
run: make test-race-local
|
||||
- name: Generate test results artifacts
|
||||
uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3
|
||||
uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0
|
||||
with:
|
||||
name: race-results
|
||||
path: test-results/
|
||||
@@ -305,13 +303,12 @@ jobs:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
|
||||
- name: Setup NodeJS
|
||||
uses: actions/setup-node@0a44ba7841725637a19e28fa30b79a866c81b0a6 # v4.0.4
|
||||
uses: actions/setup-node@1e60f620b9541d16bece96c5465dc8ee9832be0b # v4.0.3
|
||||
with:
|
||||
# renovate: datasource=node-version packageName=node versioning=node
|
||||
node-version: '22.9.0'
|
||||
node-version: '21.6.1'
|
||||
- name: Restore node dependency cache
|
||||
id: cache-dependencies
|
||||
uses: actions/cache@3624ceb22c1c5a301c8db4169662070a689d9ea8 # v4.1.1
|
||||
uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2
|
||||
with:
|
||||
path: ui/node_modules
|
||||
key: ${{ runner.os }}-node-dep-v2-${{ hashFiles('**/yarn.lock') }}
|
||||
@@ -351,7 +348,7 @@ jobs:
|
||||
fetch-depth: 0
|
||||
- name: Restore node dependency cache
|
||||
id: cache-dependencies
|
||||
uses: actions/cache@3624ceb22c1c5a301c8db4169662070a689d9ea8 # v4.1.1
|
||||
uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2
|
||||
with:
|
||||
path: ui/node_modules
|
||||
key: ${{ runner.os }}-node-dep-v2-${{ hashFiles('**/yarn.lock') }}
|
||||
@@ -376,7 +373,7 @@ jobs:
|
||||
run: |
|
||||
go tool covdata percent -i=test-results,e2e-code-coverage/applicationset-controller,e2e-code-coverage/repo-server,e2e-code-coverage/app-controller -o test-results/full-coverage.out
|
||||
- name: Upload code coverage information to codecov.io
|
||||
uses: codecov/codecov-action@b9fd7d16f6d7d1b5d2bec1a2887e65ceed900238 # v4.6.0
|
||||
uses: codecov/codecov-action@e28ff129e5465c2c0dcc6f003fc735cb6ae0c673 # v4.5.0
|
||||
with:
|
||||
file: test-results/full-coverage.out
|
||||
fail_ci_if_error: true
|
||||
@@ -384,7 +381,7 @@ jobs:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
- name: Upload test results to Codecov
|
||||
if: github.ref == 'refs/heads/master' && github.event_name == 'push' && github.repository == 'argoproj/argo-cd'
|
||||
uses: codecov/test-results-action@9739113ad922ea0a9abb4b2c0f8bf6a4aa8ef820 # v1.0.1
|
||||
uses: codecov/test-results-action@1b5b448b98e58ba90d1a1a1d9fcb72ca2263be46 # v1.0.0
|
||||
with:
|
||||
file: test-results/junit.xml
|
||||
fail_ci_if_error: true
|
||||
@@ -393,7 +390,7 @@ jobs:
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}
|
||||
uses: SonarSource/sonarqube-scan-action@884b79409bbd464b2a59edc326a4b77dc56b2195 # v2.2
|
||||
uses: SonarSource/sonarqube-scan-action@aecaf43ae57e412bd97d70ef9ce6076e672fe0a9 # v2.2
|
||||
if: env.sonar_secret != ''
|
||||
test-e2e:
|
||||
name: Run end-to-end tests
|
||||
@@ -403,14 +400,14 @@ jobs:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
k3s:
|
||||
- version: v1.31.0
|
||||
- version: v1.30.2
|
||||
# We designate the latest version because we only collect code coverage for that version.
|
||||
latest: true
|
||||
- version: v1.30.4
|
||||
- version: v1.29.6
|
||||
latest: false
|
||||
- version: v1.29.8
|
||||
- version: v1.28.11
|
||||
latest: false
|
||||
- version: v1.28.13
|
||||
- version: v1.27.15
|
||||
latest: false
|
||||
needs:
|
||||
- build-go
|
||||
@@ -451,7 +448,7 @@ jobs:
|
||||
sudo chmod go-r $HOME/.kube/config
|
||||
kubectl version
|
||||
- name: Restore go build cache
|
||||
uses: actions/cache@3624ceb22c1c5a301c8db4169662070a689d9ea8 # v4.1.1
|
||||
uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2
|
||||
with:
|
||||
path: ~/.cache/go-build
|
||||
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
|
||||
@@ -509,13 +506,13 @@ jobs:
|
||||
goreman run stop-all || echo "goreman trouble"
|
||||
sleep 30
|
||||
- name: Upload e2e coverage report
|
||||
uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3
|
||||
uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0
|
||||
with:
|
||||
name: e2e-code-coverage
|
||||
path: /tmp/coverage
|
||||
if: ${{ matrix.k3s.latest }}
|
||||
- name: Upload e2e-server logs
|
||||
uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3
|
||||
uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0
|
||||
with:
|
||||
name: e2e-server-k8s${{ matrix.k3s.version }}.log
|
||||
path: /tmp/e2e-server.log
|
||||
|
||||
2
.github/workflows/codeql.yml
vendored
2
.github/workflows/codeql.yml
vendored
@@ -23,7 +23,7 @@ jobs:
|
||||
actions: read # for github/codeql-action/init to get workflow details
|
||||
contents: read # for actions/checkout to fetch code
|
||||
security-events: write # for github/codeql-action/autobuild to send a status report
|
||||
if: github.repository == 'argoproj/argo-cd' || vars.enable_codeql
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
|
||||
# CodeQL runs on ubuntu-latest and windows-latest
|
||||
runs-on: ubuntu-22.04
|
||||
|
||||
6
.github/workflows/image-reuse.yaml
vendored
6
.github/workflows/image-reuse.yaml
vendored
@@ -74,10 +74,10 @@ jobs:
|
||||
go-version: ${{ inputs.go-version }}
|
||||
|
||||
- name: Install cosign
|
||||
uses: sigstore/cosign-installer@dc72c7d5c4d10cd6bcb8cf6e3fd625a9e5e537da # v3.7.0
|
||||
uses: sigstore/cosign-installer@4959ce089c160fddf62f7b42464195ba1a56d382 # v3.6.0
|
||||
|
||||
- uses: docker/setup-qemu-action@49b3bc8e6bdd4a60e6116a5414239cba5943d3cf # v3.2.0
|
||||
- uses: docker/setup-buildx-action@c47758b77c9736f4b2ef4073d4d51994fabfe349 # v3.7.1
|
||||
- uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1
|
||||
|
||||
- name: Setup tags for container image as a CSV type
|
||||
run: |
|
||||
@@ -143,7 +143,7 @@ jobs:
|
||||
|
||||
- name: Build and push container image
|
||||
id: image
|
||||
uses: docker/build-push-action@4f58ea79222b3b9dc2c8bbdd6debcef730109a75 #v6.9.0
|
||||
uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 #v6.7.0
|
||||
with:
|
||||
context: .
|
||||
platforms: ${{ inputs.platforms }}
|
||||
|
||||
6
.github/workflows/image.yaml
vendored
6
.github/workflows/image.yaml
vendored
@@ -52,8 +52,7 @@ jobs:
|
||||
uses: ./.github/workflows/image-reuse.yaml
|
||||
with:
|
||||
# Note: cannot use env variables to set go-version (https://docs.github.com/en/actions/using-workflows/reusing-workflows#limitations)
|
||||
# renovate: datasource=golang-version packageName=golang
|
||||
go-version: 1.23.2
|
||||
go-version: 1.22
|
||||
platforms: ${{ needs.set-vars.outputs.platforms }}
|
||||
push: false
|
||||
|
||||
@@ -69,8 +68,7 @@ jobs:
|
||||
quay_image_name: quay.io/argoproj/argocd:latest
|
||||
ghcr_image_name: ghcr.io/argoproj/argo-cd/argocd:${{ needs.set-vars.outputs.image-tag }}
|
||||
# Note: cannot use env variables to set go-version (https://docs.github.com/en/actions/using-workflows/reusing-workflows#limitations)
|
||||
# renovate: datasource=golang-version packageName=golang
|
||||
go-version: 1.23.2
|
||||
go-version: 1.22
|
||||
platforms: ${{ needs.set-vars.outputs.platforms }}
|
||||
push: true
|
||||
secrets:
|
||||
|
||||
2
.github/workflows/init-release.yaml
vendored
2
.github/workflows/init-release.yaml
vendored
@@ -64,7 +64,7 @@ jobs:
|
||||
git stash pop
|
||||
|
||||
- name: Create pull request
|
||||
uses: peter-evans/create-pull-request@5e914681df9dc83aa4e4905692ca88beb2f9e91f # v7.0.5
|
||||
uses: peter-evans/create-pull-request@d121e62763d8cc35b5fb1710e887d6e69a52d3a4 # v7.0.2
|
||||
with:
|
||||
commit-message: "Bump version to ${{ inputs.TARGET_VERSION }}"
|
||||
title: "Bump version to ${{ inputs.TARGET_VERSION }} on ${{ inputs.TARGET_BRANCH }} branch"
|
||||
|
||||
34
.github/workflows/release.yaml
vendored
34
.github/workflows/release.yaml
vendored
@@ -10,8 +10,7 @@ on:
|
||||
permissions: {}
|
||||
|
||||
env:
|
||||
# renovate: datasource=golang-version packageName=golang
|
||||
GOLANG_VERSION: '1.23.2' # Note: go-version must also be set in job argocd-image.with.go-version
|
||||
GOLANG_VERSION: '1.22' # Note: go-version must also be set in job argocd-image.with.go-version
|
||||
|
||||
jobs:
|
||||
argocd-image:
|
||||
@@ -24,8 +23,7 @@ jobs:
|
||||
with:
|
||||
quay_image_name: quay.io/argoproj/argocd:${{ github.ref_name }}
|
||||
# Note: cannot use env variables to set go-version (https://docs.github.com/en/actions/using-workflows/reusing-workflows#limitations)
|
||||
# renovate: datasource=golang-version packageName=golang
|
||||
go-version: 1.23.2
|
||||
go-version: 1.22
|
||||
platforms: linux/amd64,linux/arm64,linux/s390x,linux/ppc64le
|
||||
push: true
|
||||
secrets:
|
||||
@@ -69,16 +67,20 @@ jobs:
|
||||
- name: Fetch all tags
|
||||
run: git fetch --force --tags
|
||||
|
||||
- name: Set GORELEASER_PREVIOUS_TAG # Workaround, GoReleaser uses 'git-describe' to determine a previous tag. Our tags are created in realease branches.
|
||||
run: |
|
||||
set -xue
|
||||
if echo ${{ github.ref_name }} | grep -E -- '-rc1+$';then
|
||||
echo "GORELEASER_PREVIOUS_TAG=$(git -c 'versionsort.suffix=-rc' tag --list --sort=version:refname | tail -n 2 | head -n 1)" >> $GITHUB_ENV
|
||||
else
|
||||
echo "This is not the first release on the branch, Using GoReleaser defaults"
|
||||
fi
|
||||
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
|
||||
- name: Set GORELEASER_PREVIOUS_TAG # Workaround, GoReleaser uses 'git-describe' to determine a previous tag. Our tags are created in release branches.
|
||||
run: |
|
||||
set -xue
|
||||
echo "GORELEASER_PREVIOUS_TAG=$(go run hack/get-previous-release/get-previous-version-for-release-notes.go ${{ github.ref_name }})" >> $GITHUB_ENV
|
||||
|
||||
- name: Set environment variables for ldflags
|
||||
id: set_ldflag
|
||||
run: |
|
||||
@@ -101,7 +103,7 @@ jobs:
|
||||
args: release --clean --timeout 55m
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
KUBECTL_VERSION: ${{ env.KUBECTL_VERSION }}
|
||||
KUBECTL_VERSION: ${{ env.KUBECTL_VERSION }}
|
||||
GIT_TREE_STATE: ${{ env.GIT_TREE_STATE }}
|
||||
|
||||
- name: Generate subject for provenance
|
||||
@@ -184,7 +186,7 @@ jobs:
|
||||
fi
|
||||
|
||||
cd /tmp && tar -zcf sbom.tar.gz *.spdx
|
||||
|
||||
|
||||
- name: Generate SBOM hash
|
||||
shell: bash
|
||||
id: sbom-hash
|
||||
@@ -193,7 +195,7 @@ jobs:
|
||||
# base64 -w0 encodes to base64 and outputs on a single line.
|
||||
# sha256sum /tmp/sbom.tar.gz ... | base64 -w0
|
||||
echo "hashes=$(sha256sum /tmp/sbom.tar.gz | base64 -w0)" >> "$GITHUB_OUTPUT"
|
||||
|
||||
|
||||
- name: Upload SBOM
|
||||
uses: softprops/action-gh-release@c062e08bd532815e2082a85e87e3ef29c3e6d191 # v2.0.8
|
||||
env:
|
||||
@@ -201,7 +203,7 @@ jobs:
|
||||
with:
|
||||
files: |
|
||||
/tmp/sbom.tar.gz
|
||||
|
||||
|
||||
sbom-provenance:
|
||||
needs: [generate-sbom]
|
||||
permissions:
|
||||
@@ -209,13 +211,13 @@ jobs:
|
||||
id-token: write # Needed for provenance signing and ID
|
||||
contents: write # Needed for release uploads
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
# Must be referenced by a tag. https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#referencing-the-slsa-generator
|
||||
# Must be refernced by a tag. https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#referencing-the-slsa-generator
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.0.0
|
||||
with:
|
||||
base64-subjects: "${{ needs.generate-sbom.outputs.hashes }}"
|
||||
provenance-name: "argocd-sbom.intoto.jsonl"
|
||||
upload-assets: true
|
||||
|
||||
|
||||
post-release:
|
||||
needs:
|
||||
- argocd-image
|
||||
@@ -293,7 +295,7 @@ jobs:
|
||||
if: ${{ env.UPDATE_VERSION == 'true' }}
|
||||
|
||||
- name: Create PR to update VERSION on master branch
|
||||
uses: peter-evans/create-pull-request@5e914681df9dc83aa4e4905692ca88beb2f9e91f # v7.0.5
|
||||
uses: peter-evans/create-pull-request@d121e62763d8cc35b5fb1710e887d6e69a52d3a4 # v7.0.2
|
||||
with:
|
||||
commit-message: Bump version in master
|
||||
title: "chore: Bump version in master"
|
||||
|
||||
2
.github/workflows/scorecard.yaml
vendored
2
.github/workflows/scorecard.yaml
vendored
@@ -54,7 +54,7 @@ jobs:
|
||||
# Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
|
||||
# format to the repository Actions tab.
|
||||
- name: "Upload artifact"
|
||||
uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3
|
||||
uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0
|
||||
with:
|
||||
name: SARIF file
|
||||
path: results.sarif
|
||||
|
||||
@@ -8,4 +8,4 @@ python:
|
||||
build:
|
||||
os: "ubuntu-22.04"
|
||||
tools:
|
||||
python: "3.12"
|
||||
python: "3.7"
|
||||
|
||||
@@ -4,7 +4,7 @@ ARG BASE_IMAGE=docker.io/library/ubuntu:24.04@sha256:3f85b7caad41a95462cf5b787d8
|
||||
# Initial stage which pulls prepares build dependencies and CLI tooling we need for our final image
|
||||
# Also used as the image in CI jobs so needs all dependencies
|
||||
####################################################################################################
|
||||
FROM docker.io/library/golang:1.23.2@sha256:a7f2fc9834049c1f5df787690026a53738e55fc097cd8a4a93faa3e06c67ee32 AS builder
|
||||
FROM docker.io/library/golang:1.23.1@sha256:2fe82a3f3e006b4f2a316c6a21f62b66e1330ae211d039bb8d1128e12ed57bf1 AS builder
|
||||
|
||||
RUN echo 'deb http://archive.debian.org/debian buster-backports main' >> /etc/apt/sources.list
|
||||
|
||||
@@ -83,7 +83,7 @@ WORKDIR /home/argocd
|
||||
####################################################################################################
|
||||
# Argo CD UI stage
|
||||
####################################################################################################
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/node:22.9.0@sha256:69e667a79aa41ec0db50bc452a60e705ca16f35285eaf037ebe627a65a5cdf52 AS argocd-ui
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/node:22.8.0@sha256:bd00c03095f7586432805dbf7989be10361d27987f93de904b1fc003949a4794 AS argocd-ui
|
||||
|
||||
WORKDIR /src
|
||||
COPY ["ui/package.json", "ui/yarn.lock", "./"]
|
||||
@@ -101,7 +101,7 @@ RUN HOST_ARCH=$TARGETARCH NODE_ENV='production' NODE_ONLINE_ENV='online' NODE_OP
|
||||
####################################################################################################
|
||||
# Argo CD Build stage which performs the actual build of Argo CD binaries
|
||||
####################################################################################################
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/golang:1.23.2@sha256:a7f2fc9834049c1f5df787690026a53738e55fc097cd8a4a93faa3e06c67ee32 AS argocd-build
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/golang:1.23.1@sha256:2fe82a3f3e006b4f2a316c6a21f62b66e1330ae211d039bb8d1128e12ed57bf1 AS argocd-build
|
||||
|
||||
WORKDIR /go/src/github.com/argoproj/argo-cd
|
||||
|
||||
|
||||
1
Makefile
1
Makefile
@@ -486,7 +486,6 @@ start-e2e-local: mod-vendor-local dep-ui-local cli-local
|
||||
BIN_MODE=$(ARGOCD_BIN_MODE) \
|
||||
ARGOCD_APPLICATION_NAMESPACES=argocd-e2e-external,argocd-e2e-external-2 \
|
||||
ARGOCD_APPLICATIONSET_CONTROLLER_NAMESPACES=argocd-e2e-external,argocd-e2e-external-2 \
|
||||
ARGOCD_APPLICATIONSET_CONTROLLER_TOKENREF_STRICT_MODE=true \
|
||||
ARGOCD_APPLICATIONSET_CONTROLLER_ALLOWED_SCM_PROVIDERS=http://127.0.0.1:8341,http://127.0.0.1:8342,http://127.0.0.1:8343,http://127.0.0.1:8344 \
|
||||
ARGOCD_E2E_TEST=true \
|
||||
goreman -f $(ARGOCD_PROCFILE) start ${ARGOCD_START}
|
||||
|
||||
2
Procfile
2
Procfile
@@ -1,7 +1,7 @@
|
||||
controller: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "GOCOVERDIR=${ARGOCD_COVERAGE_DIR:-/tmp/coverage/app-controller} HOSTNAME=testappcontroller-1 FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-application-controller $COMMAND --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081} --otlp-address=${ARGOCD_OTLP_ADDRESS} --application-namespaces=${ARGOCD_APPLICATION_NAMESPACES:-''} --server-side-diff-enabled=${ARGOCD_APPLICATION_CONTROLLER_SERVER_SIDE_DIFF:-'false'}"
|
||||
api-server: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "GOCOVERDIR=${ARGOCD_COVERAGE_DIR:-/tmp/coverage/api-server} FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-server $COMMAND --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --disable-auth=${ARGOCD_E2E_DISABLE_AUTH:-'true'} --insecure --dex-server http://localhost:${ARGOCD_E2E_DEX_PORT:-5556} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081} --port ${ARGOCD_E2E_APISERVER_PORT:-8080} --otlp-address=${ARGOCD_OTLP_ADDRESS} --application-namespaces=${ARGOCD_APPLICATION_NAMESPACES:-''}"
|
||||
dex: sh -c "ARGOCD_BINARY_NAME=argocd-dex go run github.com/argoproj/argo-cd/v2/cmd gendexcfg -o `pwd`/dist/dex.yaml && (test -f dist/dex.yaml || { echo 'Failed to generate dex configuration'; exit 1; }) && docker run --rm -p ${ARGOCD_E2E_DEX_PORT:-5556}:${ARGOCD_E2E_DEX_PORT:-5556} -v `pwd`/dist/dex.yaml:/dex.yaml ghcr.io/dexidp/dex:$(grep "image: ghcr.io/dexidp/dex" manifests/base/dex/argocd-dex-server-deployment.yaml | cut -d':' -f3) dex serve /dex.yaml"
|
||||
redis: hack/start-redis-with-password.sh
|
||||
redis: bash -c "if [ \"$ARGOCD_REDIS_LOCAL\" = 'true' ]; then redis-server --save '' --appendonly no --port ${ARGOCD_E2E_REDIS_PORT:-6379}; else docker run --rm --name argocd-redis -i -p ${ARGOCD_E2E_REDIS_PORT:-6379}:${ARGOCD_E2E_REDIS_PORT:-6379} docker.io/library/redis:$(grep "image: redis" manifests/base/redis/argocd-redis-deployment.yaml | cut -d':' -f3) --save '' --appendonly no --port ${ARGOCD_E2E_REDIS_PORT:-6379}; fi"
|
||||
repo-server: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "GOCOVERDIR=${ARGOCD_COVERAGE_DIR:-/tmp/coverage/repo-server} FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_GNUPGHOME=${ARGOCD_GNUPGHOME:-/tmp/argocd-local/gpg/keys} ARGOCD_PLUGINSOCKFILEPATH=${ARGOCD_PLUGINSOCKFILEPATH:-./test/cmp} ARGOCD_GPG_DATA_PATH=${ARGOCD_GPG_DATA_PATH:-/tmp/argocd-local/gpg/source} ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-repo-server ARGOCD_GPG_ENABLED=${ARGOCD_GPG_ENABLED:-false} $COMMAND --loglevel debug --port ${ARGOCD_E2E_REPOSERVER_PORT:-8081} --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --otlp-address=${ARGOCD_OTLP_ADDRESS}"
|
||||
cmp-server: [ "$ARGOCD_E2E_TEST" = 'true' ] && exit 0 || [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_BINARY_NAME=argocd-cmp-server ARGOCD_PLUGINSOCKFILEPATH=${ARGOCD_PLUGINSOCKFILEPATH:-./test/cmp} $COMMAND --config-dir-path ./test/cmp --loglevel debug --otlp-address=${ARGOCD_OTLP_ADDRESS}"
|
||||
ui: sh -c 'cd ui && ${ARGOCD_E2E_YARN_CMD:-yarn} start'
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
[](https://codecov.io/gh/argoproj/argo-cd)
|
||||
[](https://bestpractices.coreinfrastructure.org/projects/4486)
|
||||
[](https://scorecard.dev/viewer/?uri=github.com/argoproj/argo-cd)
|
||||
[](https://app.fossa.com/projects/git%2Bgithub.com%2Fargoproj%2Fargo-cd?ref=badge_shield)
|
||||
|
||||
**Social:**
|
||||
[](https://twitter.com/argoproj)
|
||||
@@ -56,7 +57,7 @@ Participation in the Argo CD project is governed by the [CNCF Code of Conduct](h
|
||||
### Blogs and Presentations
|
||||
|
||||
1. [Awesome-Argo: A Curated List of Awesome Projects and Resources Related to Argo](https://github.com/terrytangyuan/awesome-argo)
|
||||
1. [Unveil the Secret Ingredients of Continuous Delivery at Enterprise Scale with Argo CD](https://akuity.io/blog/secret-ingredients-of-continuous-delivery-at-enterprise-scale-with-argocd/)
|
||||
1. [Unveil the Secret Ingredients of Continuous Delivery at Enterprise Scale with Argo CD](https://akuity.io/blog/unveil-the-secret-ingredients-of-continuous-delivery-at-enterprise-scale-with-argocd-kubecon-china-2021/)
|
||||
1. [GitOps Without Pipelines With ArgoCD Image Updater](https://youtu.be/avPUQin9kzU)
|
||||
1. [Combining Argo CD (GitOps), Crossplane (Control Plane), And KubeVela (OAM)](https://youtu.be/eEcgn_gU3SM)
|
||||
1. [How to Apply GitOps to Everything - Combining Argo CD and Crossplane](https://youtu.be/yrj4lmScKHQ)
|
||||
|
||||
@@ -3,9 +3,9 @@ header:
|
||||
expiration-date: '2024-10-31T00:00:00.000Z' # One year from initial release.
|
||||
last-updated: '2023-10-27'
|
||||
last-reviewed: '2023-10-27'
|
||||
commit-hash: 74a367d10e7110209610ba3ec225539ebe5f7522
|
||||
commit-hash: fe606708859574b9b6102a505e260fac5d3fb14e
|
||||
project-url: https://github.com/argoproj/argo-cd
|
||||
project-release: v2.14.0
|
||||
project-release: v2.13.0
|
||||
changelog: https://github.com/argoproj/argo-cd/releases
|
||||
license: https://github.com/argoproj/argo-cd/blob/master/LICENSE
|
||||
project-lifecycle:
|
||||
|
||||
5
USERS.md
5
USERS.md
@@ -85,7 +85,6 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [D2iQ](https://www.d2iq.com)
|
||||
1. [DaoCloud](https://daocloud.io/)
|
||||
1. [Datarisk](https://www.datarisk.io/)
|
||||
1. [Daydream](https://daydream.ing)
|
||||
1. [Deloitte](https://www.deloitte.com/)
|
||||
1. [Deutsche Telekom AG](https://telekom.com)
|
||||
1. [Devopsi - Poland Software/DevOps Consulting](https://devopsi.pl/)
|
||||
@@ -244,7 +243,6 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Optoro](https://www.optoro.com/)
|
||||
1. [Orbital Insight](https://orbitalinsight.com/)
|
||||
1. [Oscar Health Insurance](https://hioscar.com/)
|
||||
1. [Outpost24](https://outpost24.com/)
|
||||
1. [p3r](https://www.p3r.one/)
|
||||
1. [Packlink](https://www.packlink.com/)
|
||||
1. [PagerDuty](https://www.pagerduty.com/)
|
||||
@@ -273,7 +271,6 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [PT Boer Technology (Btech)](https://btech.id/)
|
||||
1. [PUBG](https://www.pubg.com)
|
||||
1. [Puzzle ITC](https://www.puzzle.ch/)
|
||||
1. [Pvotal Technologies](https://pvotal.tech/)
|
||||
1. [Qonto](https://qonto.com)
|
||||
1. [QuintoAndar](https://quintoandar.com.br)
|
||||
1. [Quipper](https://www.quipper.com/)
|
||||
@@ -310,7 +307,6 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Skyscanner](https://www.skyscanner.net/)
|
||||
1. [Smart Pension](https://www.smartpension.co.uk/)
|
||||
1. [Smilee.io](https://smilee.io)
|
||||
1. [Smilegate Stove](https://www.onstove.com/)
|
||||
1. [Smood.ch](https://www.smood.ch/)
|
||||
1. [Snapp](https://snapp.ir/)
|
||||
1. [Snyk](https://snyk.io/)
|
||||
@@ -334,7 +330,6 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [TableCheck](https://tablecheck.com/)
|
||||
1. [Tailor Brands](https://www.tailorbrands.com)
|
||||
1. [Tamkeen Technologies](https://tamkeentech.sa/)
|
||||
1. [TBC Bank](https://tbcbank.ge/)
|
||||
1. [Techcombank](https://www.techcombank.com.vn/trang-chu)
|
||||
1. [Technacy](https://www.technacy.it/)
|
||||
1. [Telavita](https://www.telavita.com.br/)
|
||||
|
||||
@@ -52,6 +52,7 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/util/db"
|
||||
|
||||
argov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
appclientset "github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned"
|
||||
argoutil "github.com/argoproj/argo-cd/v2/util/argo"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo/normalizers"
|
||||
|
||||
@@ -78,6 +79,7 @@ type ApplicationSetReconciler struct {
|
||||
Recorder record.EventRecorder
|
||||
Generators map[string]generators.Generator
|
||||
ArgoDB db.ArgoDB
|
||||
ArgoAppClientset appclientset.Interface
|
||||
KubeClientset kubernetes.Interface
|
||||
Policy argov1alpha1.ApplicationsSyncPolicy
|
||||
EnablePolicyOverride bool
|
||||
@@ -95,7 +97,6 @@ type ApplicationSetReconciler struct {
|
||||
// +kubebuilder:rbac:groups=argoproj.io,resources=applicationsets/status,verbs=get;update;patch
|
||||
|
||||
func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
startReconcile := time.Now()
|
||||
logCtx := log.WithField("applicationset", req.NamespacedName)
|
||||
|
||||
var applicationSetInfo argov1alpha1.ApplicationSet
|
||||
@@ -333,7 +334,7 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
||||
requeueAfter = ReconcileRequeueOnValidationError
|
||||
}
|
||||
|
||||
logCtx.WithField("requeueAfter", requeueAfter).Info("end reconcile in ", time.Since(startReconcile))
|
||||
logCtx.WithField("requeueAfter", requeueAfter).Info("end reconcile")
|
||||
|
||||
return ctrl.Result{
|
||||
RequeueAfter: requeueAfter,
|
||||
@@ -471,9 +472,7 @@ func (r *ApplicationSetReconciler) validateGeneratedApplications(ctx context.Con
|
||||
errorsByIndex[i] = fmt.Errorf("ApplicationSet %s contains applications with duplicate name: %s", applicationSetInfo.Name, app.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
appProject := &argov1alpha1.AppProject{}
|
||||
err := r.Client.Get(ctx, types.NamespacedName{Name: app.Spec.Project, Namespace: r.ArgoCDNamespace}, appProject)
|
||||
_, err := r.ArgoAppClientset.ArgoprojV1alpha1().AppProjects(r.ArgoCDNamespace).Get(ctx, app.Spec.GetProject(), metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if apierr.IsNotFound(err) {
|
||||
errorsByIndex[i] = fmt.Errorf("application references project %s which does not exist", app.Spec.Project)
|
||||
@@ -1485,7 +1484,7 @@ func getOwnsHandlerPredicates(enableProgressiveSyncs bool) predicate.Funcs {
|
||||
return false
|
||||
}
|
||||
requeue := shouldRequeueApplicationSet(appOld, appNew, enableProgressiveSyncs)
|
||||
logCtx.WithField("requeue", requeue).Debugf("requeue: %t caused by application %s", requeue, appNew.Name)
|
||||
logCtx.WithField("requeue", requeue).Debugf("requeue: %t caused by application %s\n", requeue, appNew.Name)
|
||||
return requeue
|
||||
},
|
||||
GenericFunc: func(e event.GenericEvent) bool {
|
||||
|
||||
@@ -36,8 +36,8 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/utils"
|
||||
|
||||
appsetmetrics "github.com/argoproj/argo-cd/v2/applicationset/metrics"
|
||||
argocommon "github.com/argoproj/argo-cd/v2/common"
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
appclientset "github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned/fake"
|
||||
dbmocks "github.com/argoproj/argo-cd/v2/util/db/mocks"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application"
|
||||
@@ -48,6 +48,9 @@ func TestCreateOrUpdateInCluster(t *testing.T) {
|
||||
err := v1alpha1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = v1alpha1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, c := range []struct {
|
||||
// name is human-readable test name
|
||||
name string
|
||||
@@ -1088,6 +1091,9 @@ func TestRemoveFinalizerOnInvalidDestination_FinalizerTypes(t *testing.T) {
|
||||
err := v1alpha1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = v1alpha1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, c := range []struct {
|
||||
// name is human-readable test name
|
||||
name string
|
||||
@@ -1151,7 +1157,7 @@ func TestRemoveFinalizerOnInvalidDestination_FinalizerTypes(t *testing.T) {
|
||||
Name: "my-secret",
|
||||
Namespace: "namespace",
|
||||
Labels: map[string]string{
|
||||
argocommon.LabelKeySecretType: argocommon.LabelValueSecretTypeCluster,
|
||||
generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster,
|
||||
},
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
@@ -1208,6 +1214,9 @@ func TestRemoveFinalizerOnInvalidDestination_DestinationTypes(t *testing.T) {
|
||||
err := v1alpha1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = v1alpha1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, c := range []struct {
|
||||
// name is human-readable test name
|
||||
name string
|
||||
@@ -1307,7 +1316,7 @@ func TestRemoveFinalizerOnInvalidDestination_DestinationTypes(t *testing.T) {
|
||||
Name: "my-secret",
|
||||
Namespace: "namespace",
|
||||
Labels: map[string]string{
|
||||
argocommon.LabelKeySecretType: argocommon.LabelValueSecretTypeCluster,
|
||||
generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster,
|
||||
},
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
@@ -1362,6 +1371,9 @@ func TestRemoveOwnerReferencesOnDeleteAppSet(t *testing.T) {
|
||||
err := v1alpha1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = v1alpha1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, c := range []struct {
|
||||
// name is human-readable test name
|
||||
name string
|
||||
@@ -1435,6 +1447,9 @@ func TestCreateApplications(t *testing.T) {
|
||||
err := v1alpha1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = v1alpha1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
appSet v1alpha1.ApplicationSet
|
||||
@@ -1638,6 +1653,8 @@ func TestDeleteInCluster(t *testing.T) {
|
||||
scheme := runtime.NewScheme()
|
||||
err := v1alpha1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
err = v1alpha1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, c := range []struct {
|
||||
// appSet is the application set on which the delete function is called
|
||||
@@ -1792,6 +1809,8 @@ func TestGetMinRequeueAfter(t *testing.T) {
|
||||
scheme := runtime.NewScheme()
|
||||
err := v1alpha1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
err = v1alpha1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).Build()
|
||||
metrics := appsetmetrics.NewFakeAppsetMetrics(client)
|
||||
@@ -1894,6 +1913,12 @@ func TestValidateGeneratedApplications(t *testing.T) {
|
||||
err := v1alpha1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = v1alpha1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).Build()
|
||||
metrics := appsetmetrics.NewFakeAppsetMetrics(client)
|
||||
|
||||
// Valid cluster
|
||||
myCluster := v1alpha1.Cluster{
|
||||
Server: "https://kubernetes.default.svc",
|
||||
@@ -1920,9 +1945,6 @@ func TestValidateGeneratedApplications(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(myProject).Build()
|
||||
metrics := appsetmetrics.NewFakeAppsetMetrics(client)
|
||||
|
||||
// Test a subset of the validations that 'validateGeneratedApplications' performs
|
||||
for _, cc := range []struct {
|
||||
name string
|
||||
@@ -2053,7 +2075,7 @@ func TestValidateGeneratedApplications(t *testing.T) {
|
||||
Name: "my-secret",
|
||||
Namespace: "namespace",
|
||||
Labels: map[string]string{
|
||||
argocommon.LabelKeySecretType: argocommon.LabelValueSecretTypeCluster,
|
||||
generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster,
|
||||
},
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
@@ -2072,15 +2094,21 @@ func TestValidateGeneratedApplications(t *testing.T) {
|
||||
myCluster,
|
||||
}}, nil)
|
||||
|
||||
argoObjs := []runtime.Object{myProject}
|
||||
for _, app := range cc.apps {
|
||||
argoObjs = append(argoObjs, &app)
|
||||
}
|
||||
|
||||
r := ApplicationSetReconciler{
|
||||
Client: client,
|
||||
Scheme: scheme,
|
||||
Recorder: record.NewFakeRecorder(1),
|
||||
Generators: map[string]generators.Generator{},
|
||||
ArgoDB: &argoDBMock,
|
||||
ArgoCDNamespace: "namespace",
|
||||
KubeClientset: kubeclientset,
|
||||
Metrics: metrics,
|
||||
Client: client,
|
||||
Scheme: scheme,
|
||||
Recorder: record.NewFakeRecorder(1),
|
||||
Generators: map[string]generators.Generator{},
|
||||
ArgoDB: &argoDBMock,
|
||||
ArgoCDNamespace: "namespace",
|
||||
ArgoAppClientset: appclientset.NewSimpleClientset(argoObjs...),
|
||||
KubeClientset: kubeclientset,
|
||||
Metrics: metrics,
|
||||
}
|
||||
|
||||
appSetInfo := v1alpha1.ApplicationSet{}
|
||||
@@ -2122,6 +2150,8 @@ func TestReconcilerValidationProjectErrorBehaviour(t *testing.T) {
|
||||
scheme := runtime.NewScheme()
|
||||
err := v1alpha1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
err = v1alpha1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
|
||||
project := v1alpha1.AppProject{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "good-project", Namespace: "argocd"},
|
||||
@@ -2160,8 +2190,9 @@ func TestReconcilerValidationProjectErrorBehaviour(t *testing.T) {
|
||||
|
||||
kubeclientset := kubefake.NewSimpleClientset()
|
||||
argoDBMock := dbmocks.ArgoDB{}
|
||||
argoObjs := []runtime.Object{&project}
|
||||
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet, &project).WithStatusSubresource(&appSet).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet).WithStatusSubresource(&appSet).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()
|
||||
metrics := appsetmetrics.NewFakeAppsetMetrics(client)
|
||||
goodCluster := v1alpha1.Cluster{Server: "https://good-cluster", Name: "good-cluster"}
|
||||
badCluster := v1alpha1.Cluster{Server: "https://bad-cluster", Name: "bad-cluster"}
|
||||
@@ -2179,11 +2210,12 @@ func TestReconcilerValidationProjectErrorBehaviour(t *testing.T) {
|
||||
Generators: map[string]generators.Generator{
|
||||
"List": generators.NewListGenerator(),
|
||||
},
|
||||
ArgoDB: &argoDBMock,
|
||||
KubeClientset: kubeclientset,
|
||||
Policy: v1alpha1.ApplicationsSyncPolicySync,
|
||||
ArgoCDNamespace: "argocd",
|
||||
Metrics: metrics,
|
||||
ArgoDB: &argoDBMock,
|
||||
ArgoAppClientset: appclientset.NewSimpleClientset(argoObjs...),
|
||||
KubeClientset: kubeclientset,
|
||||
Policy: v1alpha1.ApplicationsSyncPolicySync,
|
||||
ArgoCDNamespace: "argocd",
|
||||
Metrics: metrics,
|
||||
}
|
||||
|
||||
req := ctrl.Request{
|
||||
@@ -2214,6 +2246,8 @@ func TestSetApplicationSetStatusCondition(t *testing.T) {
|
||||
scheme := runtime.NewScheme()
|
||||
err := v1alpha1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
err = v1alpha1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
|
||||
testCases := []struct {
|
||||
appset v1alpha1.ApplicationSet
|
||||
@@ -2361,6 +2395,7 @@ func TestSetApplicationSetStatusCondition(t *testing.T) {
|
||||
|
||||
kubeclientset := kubefake.NewSimpleClientset([]runtime.Object{}...)
|
||||
argoDBMock := dbmocks.ArgoDB{}
|
||||
argoObjs := []runtime.Object{}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&testCase.appset).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).WithStatusSubresource(&testCase.appset).Build()
|
||||
@@ -2374,9 +2409,10 @@ func TestSetApplicationSetStatusCondition(t *testing.T) {
|
||||
Generators: map[string]generators.Generator{
|
||||
"List": generators.NewListGenerator(),
|
||||
},
|
||||
ArgoDB: &argoDBMock,
|
||||
KubeClientset: kubeclientset,
|
||||
Metrics: metrics,
|
||||
ArgoDB: &argoDBMock,
|
||||
ArgoAppClientset: appclientset.NewSimpleClientset(argoObjs...),
|
||||
KubeClientset: kubeclientset,
|
||||
Metrics: metrics,
|
||||
}
|
||||
|
||||
for _, condition := range testCase.conditions {
|
||||
@@ -2392,6 +2428,8 @@ func applicationsUpdateSyncPolicyTest(t *testing.T, applicationsSyncPolicy v1alp
|
||||
scheme := runtime.NewScheme()
|
||||
err := v1alpha1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
err = v1alpha1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
|
||||
defaultProject := v1alpha1.AppProject{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "default", Namespace: "argocd"},
|
||||
@@ -2431,8 +2469,9 @@ func applicationsUpdateSyncPolicyTest(t *testing.T, applicationsSyncPolicy v1alp
|
||||
|
||||
kubeclientset := kubefake.NewSimpleClientset()
|
||||
argoDBMock := dbmocks.ArgoDB{}
|
||||
argoObjs := []runtime.Object{&defaultProject}
|
||||
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet, &defaultProject).WithStatusSubresource(&appSet).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet).WithStatusSubresource(&appSet).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()
|
||||
metrics := appsetmetrics.NewFakeAppsetMetrics(client)
|
||||
goodCluster := v1alpha1.Cluster{Server: "https://good-cluster", Name: "good-cluster"}
|
||||
argoDBMock.On("GetCluster", mock.Anything, "https://good-cluster").Return(&goodCluster, nil)
|
||||
@@ -2450,6 +2489,7 @@ func applicationsUpdateSyncPolicyTest(t *testing.T, applicationsSyncPolicy v1alp
|
||||
},
|
||||
ArgoDB: &argoDBMock,
|
||||
ArgoCDNamespace: "argocd",
|
||||
ArgoAppClientset: appclientset.NewSimpleClientset(argoObjs...),
|
||||
KubeClientset: kubeclientset,
|
||||
Policy: v1alpha1.ApplicationsSyncPolicySync,
|
||||
EnablePolicyOverride: allowPolicyOverride,
|
||||
@@ -2553,6 +2593,8 @@ func applicationsDeleteSyncPolicyTest(t *testing.T, applicationsSyncPolicy v1alp
|
||||
scheme := runtime.NewScheme()
|
||||
err := v1alpha1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
err = v1alpha1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
|
||||
defaultProject := v1alpha1.AppProject{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "default", Namespace: "argocd"},
|
||||
@@ -2592,8 +2634,9 @@ func applicationsDeleteSyncPolicyTest(t *testing.T, applicationsSyncPolicy v1alp
|
||||
|
||||
kubeclientset := kubefake.NewSimpleClientset()
|
||||
argoDBMock := dbmocks.ArgoDB{}
|
||||
argoObjs := []runtime.Object{&defaultProject}
|
||||
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet, &defaultProject).WithStatusSubresource(&appSet).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet).WithStatusSubresource(&appSet).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()
|
||||
metrics := appsetmetrics.NewFakeAppsetMetrics(client)
|
||||
goodCluster := v1alpha1.Cluster{Server: "https://good-cluster", Name: "good-cluster"}
|
||||
argoDBMock.On("GetCluster", mock.Anything, "https://good-cluster").Return(&goodCluster, nil)
|
||||
@@ -2611,6 +2654,7 @@ func applicationsDeleteSyncPolicyTest(t *testing.T, applicationsSyncPolicy v1alp
|
||||
},
|
||||
ArgoDB: &argoDBMock,
|
||||
ArgoCDNamespace: "argocd",
|
||||
ArgoAppClientset: appclientset.NewSimpleClientset(argoObjs...),
|
||||
KubeClientset: kubeclientset,
|
||||
Policy: v1alpha1.ApplicationsSyncPolicySync,
|
||||
EnablePolicyOverride: allowPolicyOverride,
|
||||
@@ -2708,6 +2752,9 @@ func TestPolicies(t *testing.T) {
|
||||
err := v1alpha1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = v1alpha1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
|
||||
defaultProject := v1alpha1.AppProject{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "default", Namespace: "argocd"},
|
||||
Spec: v1alpha1.AppProjectSpec{SourceRepos: []string{"*"}, Destinations: []v1alpha1.ApplicationDestination{{Namespace: "*", Server: "https://kubernetes.default.svc"}}},
|
||||
@@ -2720,6 +2767,7 @@ func TestPolicies(t *testing.T) {
|
||||
kubeclientset := kubefake.NewSimpleClientset()
|
||||
argoDBMock := dbmocks.ArgoDB{}
|
||||
argoDBMock.On("GetCluster", mock.Anything, "https://kubernetes.default.svc").Return(&myCluster, nil)
|
||||
argoObjs := []runtime.Object{&defaultProject}
|
||||
|
||||
for _, c := range []struct {
|
||||
name string
|
||||
@@ -2791,7 +2839,7 @@ func TestPolicies(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet, &defaultProject).WithStatusSubresource(&appSet).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet).WithStatusSubresource(&appSet).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()
|
||||
metrics := appsetmetrics.NewFakeAppsetMetrics(client)
|
||||
|
||||
r := ApplicationSetReconciler{
|
||||
@@ -2802,11 +2850,12 @@ func TestPolicies(t *testing.T) {
|
||||
Generators: map[string]generators.Generator{
|
||||
"List": generators.NewListGenerator(),
|
||||
},
|
||||
ArgoDB: &argoDBMock,
|
||||
ArgoCDNamespace: "argocd",
|
||||
KubeClientset: kubeclientset,
|
||||
Policy: policy,
|
||||
Metrics: metrics,
|
||||
ArgoDB: &argoDBMock,
|
||||
ArgoCDNamespace: "argocd",
|
||||
ArgoAppClientset: appclientset.NewSimpleClientset(argoObjs...),
|
||||
KubeClientset: kubeclientset,
|
||||
Policy: policy,
|
||||
Metrics: metrics,
|
||||
}
|
||||
|
||||
req := ctrl.Request{
|
||||
@@ -2874,9 +2923,12 @@ func TestSetApplicationSetApplicationStatus(t *testing.T) {
|
||||
scheme := runtime.NewScheme()
|
||||
err := v1alpha1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
err = v1alpha1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
|
||||
kubeclientset := kubefake.NewSimpleClientset([]runtime.Object{}...)
|
||||
argoDBMock := dbmocks.ArgoDB{}
|
||||
argoObjs := []runtime.Object{}
|
||||
|
||||
for _, cc := range []struct {
|
||||
name string
|
||||
@@ -2960,9 +3012,10 @@ func TestSetApplicationSetApplicationStatus(t *testing.T) {
|
||||
Generators: map[string]generators.Generator{
|
||||
"List": generators.NewListGenerator(),
|
||||
},
|
||||
ArgoDB: &argoDBMock,
|
||||
KubeClientset: kubeclientset,
|
||||
Metrics: metrics,
|
||||
ArgoDB: &argoDBMock,
|
||||
ArgoAppClientset: appclientset.NewSimpleClientset(argoObjs...),
|
||||
KubeClientset: kubeclientset,
|
||||
Metrics: metrics,
|
||||
}
|
||||
|
||||
err = r.setAppSetApplicationStatus(context.TODO(), log.NewEntry(log.StandardLogger()), &cc.appSet, cc.appStatuses)
|
||||
@@ -2978,6 +3031,9 @@ func TestBuildAppDependencyList(t *testing.T) {
|
||||
err := v1alpha1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = v1alpha1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).Build()
|
||||
metrics := appsetmetrics.NewFakeAppsetMetrics(client)
|
||||
|
||||
@@ -3710,15 +3766,17 @@ func TestBuildAppDependencyList(t *testing.T) {
|
||||
t.Run(cc.name, func(t *testing.T) {
|
||||
kubeclientset := kubefake.NewSimpleClientset([]runtime.Object{}...)
|
||||
argoDBMock := dbmocks.ArgoDB{}
|
||||
argoObjs := []runtime.Object{}
|
||||
|
||||
r := ApplicationSetReconciler{
|
||||
Client: client,
|
||||
Scheme: scheme,
|
||||
Recorder: record.NewFakeRecorder(1),
|
||||
Generators: map[string]generators.Generator{},
|
||||
ArgoDB: &argoDBMock,
|
||||
KubeClientset: kubeclientset,
|
||||
Metrics: metrics,
|
||||
Client: client,
|
||||
Scheme: scheme,
|
||||
Recorder: record.NewFakeRecorder(1),
|
||||
Generators: map[string]generators.Generator{},
|
||||
ArgoDB: &argoDBMock,
|
||||
ArgoAppClientset: appclientset.NewSimpleClientset(argoObjs...),
|
||||
KubeClientset: kubeclientset,
|
||||
Metrics: metrics,
|
||||
}
|
||||
|
||||
appDependencyList, appStepMap := r.buildAppDependencyList(log.NewEntry(log.StandardLogger()), cc.appSet, cc.apps)
|
||||
@@ -3733,6 +3791,9 @@ func TestBuildAppSyncMap(t *testing.T) {
|
||||
err := v1alpha1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = v1alpha1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).Build()
|
||||
metrics := appsetmetrics.NewFakeAppsetMetrics(client)
|
||||
|
||||
@@ -4296,15 +4357,17 @@ func TestBuildAppSyncMap(t *testing.T) {
|
||||
t.Run(cc.name, func(t *testing.T) {
|
||||
kubeclientset := kubefake.NewSimpleClientset([]runtime.Object{}...)
|
||||
argoDBMock := dbmocks.ArgoDB{}
|
||||
argoObjs := []runtime.Object{}
|
||||
|
||||
r := ApplicationSetReconciler{
|
||||
Client: client,
|
||||
Scheme: scheme,
|
||||
Recorder: record.NewFakeRecorder(1),
|
||||
Generators: map[string]generators.Generator{},
|
||||
ArgoDB: &argoDBMock,
|
||||
KubeClientset: kubeclientset,
|
||||
Metrics: metrics,
|
||||
Client: client,
|
||||
Scheme: scheme,
|
||||
Recorder: record.NewFakeRecorder(1),
|
||||
Generators: map[string]generators.Generator{},
|
||||
ArgoDB: &argoDBMock,
|
||||
ArgoAppClientset: appclientset.NewSimpleClientset(argoObjs...),
|
||||
KubeClientset: kubeclientset,
|
||||
Metrics: metrics,
|
||||
}
|
||||
|
||||
appSyncMap := r.buildAppSyncMap(cc.appSet, cc.appDependencyList, cc.appMap)
|
||||
@@ -4318,6 +4381,9 @@ func TestUpdateApplicationSetApplicationStatus(t *testing.T) {
|
||||
err := v1alpha1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = v1alpha1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, cc := range []struct {
|
||||
name string
|
||||
appSet v1alpha1.ApplicationSet
|
||||
@@ -5078,18 +5144,20 @@ func TestUpdateApplicationSetApplicationStatus(t *testing.T) {
|
||||
t.Run(cc.name, func(t *testing.T) {
|
||||
kubeclientset := kubefake.NewSimpleClientset([]runtime.Object{}...)
|
||||
argoDBMock := dbmocks.ArgoDB{}
|
||||
argoObjs := []runtime.Object{}
|
||||
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&cc.appSet).WithStatusSubresource(&cc.appSet).Build()
|
||||
metrics := appsetmetrics.NewFakeAppsetMetrics(client)
|
||||
|
||||
r := ApplicationSetReconciler{
|
||||
Client: client,
|
||||
Scheme: scheme,
|
||||
Recorder: record.NewFakeRecorder(1),
|
||||
Generators: map[string]generators.Generator{},
|
||||
ArgoDB: &argoDBMock,
|
||||
KubeClientset: kubeclientset,
|
||||
Metrics: metrics,
|
||||
Client: client,
|
||||
Scheme: scheme,
|
||||
Recorder: record.NewFakeRecorder(1),
|
||||
Generators: map[string]generators.Generator{},
|
||||
ArgoDB: &argoDBMock,
|
||||
ArgoAppClientset: appclientset.NewSimpleClientset(argoObjs...),
|
||||
KubeClientset: kubeclientset,
|
||||
Metrics: metrics,
|
||||
}
|
||||
|
||||
appStatuses, err := r.updateApplicationSetApplicationStatus(context.TODO(), log.NewEntry(log.StandardLogger()), &cc.appSet, cc.apps, cc.appStepMap)
|
||||
@@ -5110,6 +5178,9 @@ func TestUpdateApplicationSetApplicationStatusProgress(t *testing.T) {
|
||||
err := v1alpha1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = v1alpha1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, cc := range []struct {
|
||||
name string
|
||||
appSet v1alpha1.ApplicationSet
|
||||
@@ -5827,18 +5898,20 @@ func TestUpdateApplicationSetApplicationStatusProgress(t *testing.T) {
|
||||
t.Run(cc.name, func(t *testing.T) {
|
||||
kubeclientset := kubefake.NewSimpleClientset([]runtime.Object{}...)
|
||||
argoDBMock := dbmocks.ArgoDB{}
|
||||
argoObjs := []runtime.Object{}
|
||||
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&cc.appSet).WithStatusSubresource(&cc.appSet).Build()
|
||||
metrics := appsetmetrics.NewFakeAppsetMetrics(client)
|
||||
|
||||
r := ApplicationSetReconciler{
|
||||
Client: client,
|
||||
Scheme: scheme,
|
||||
Recorder: record.NewFakeRecorder(1),
|
||||
Generators: map[string]generators.Generator{},
|
||||
ArgoDB: &argoDBMock,
|
||||
KubeClientset: kubeclientset,
|
||||
Metrics: metrics,
|
||||
Client: client,
|
||||
Scheme: scheme,
|
||||
Recorder: record.NewFakeRecorder(1),
|
||||
Generators: map[string]generators.Generator{},
|
||||
ArgoDB: &argoDBMock,
|
||||
ArgoAppClientset: appclientset.NewSimpleClientset(argoObjs...),
|
||||
KubeClientset: kubeclientset,
|
||||
Metrics: metrics,
|
||||
}
|
||||
|
||||
appStatuses, err := r.updateApplicationSetApplicationStatusProgress(context.TODO(), log.NewEntry(log.StandardLogger()), &cc.appSet, cc.appSyncMap, cc.appStepMap)
|
||||
@@ -5859,6 +5932,9 @@ func TestUpdateResourceStatus(t *testing.T) {
|
||||
err := v1alpha1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = v1alpha1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, cc := range []struct {
|
||||
name string
|
||||
appSet v1alpha1.ApplicationSet
|
||||
@@ -6038,18 +6114,20 @@ func TestUpdateResourceStatus(t *testing.T) {
|
||||
t.Run(cc.name, func(t *testing.T) {
|
||||
kubeclientset := kubefake.NewSimpleClientset([]runtime.Object{}...)
|
||||
argoDBMock := dbmocks.ArgoDB{}
|
||||
argoObjs := []runtime.Object{}
|
||||
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).WithStatusSubresource(&cc.appSet).WithObjects(&cc.appSet).Build()
|
||||
metrics := appsetmetrics.NewFakeAppsetMetrics(client)
|
||||
|
||||
r := ApplicationSetReconciler{
|
||||
Client: client,
|
||||
Scheme: scheme,
|
||||
Recorder: record.NewFakeRecorder(1),
|
||||
Generators: map[string]generators.Generator{},
|
||||
ArgoDB: &argoDBMock,
|
||||
KubeClientset: kubeclientset,
|
||||
Metrics: metrics,
|
||||
Client: client,
|
||||
Scheme: scheme,
|
||||
Recorder: record.NewFakeRecorder(1),
|
||||
Generators: map[string]generators.Generator{},
|
||||
ArgoDB: &argoDBMock,
|
||||
ArgoAppClientset: appclientset.NewSimpleClientset(argoObjs...),
|
||||
KubeClientset: kubeclientset,
|
||||
Metrics: metrics,
|
||||
}
|
||||
|
||||
err := r.updateResourcesStatus(context.TODO(), log.NewEntry(log.StandardLogger()), &cc.appSet, cc.apps)
|
||||
@@ -6128,18 +6206,20 @@ func TestResourceStatusAreOrdered(t *testing.T) {
|
||||
t.Run(cc.name, func(t *testing.T) {
|
||||
kubeclientset := kubefake.NewSimpleClientset([]runtime.Object{}...)
|
||||
argoDBMock := dbmocks.ArgoDB{}
|
||||
argoObjs := []runtime.Object{}
|
||||
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).WithStatusSubresource(&cc.appSet).WithObjects(&cc.appSet).Build()
|
||||
metrics := appsetmetrics.NewFakeAppsetMetrics(client)
|
||||
|
||||
r := ApplicationSetReconciler{
|
||||
Client: client,
|
||||
Scheme: scheme,
|
||||
Recorder: record.NewFakeRecorder(1),
|
||||
Generators: map[string]generators.Generator{},
|
||||
ArgoDB: &argoDBMock,
|
||||
KubeClientset: kubeclientset,
|
||||
Metrics: metrics,
|
||||
Client: client,
|
||||
Scheme: scheme,
|
||||
Recorder: record.NewFakeRecorder(1),
|
||||
Generators: map[string]generators.Generator{},
|
||||
ArgoDB: &argoDBMock,
|
||||
ArgoAppClientset: appclientset.NewSimpleClientset(argoObjs...),
|
||||
KubeClientset: kubeclientset,
|
||||
Metrics: metrics,
|
||||
}
|
||||
|
||||
err := r.updateResourcesStatus(context.TODO(), log.NewEntry(log.StandardLogger()), &cc.appSet, cc.apps)
|
||||
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/common"
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/generators"
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
)
|
||||
|
||||
@@ -50,7 +50,7 @@ type addRateLimitingInterface[T comparable] interface {
|
||||
|
||||
func (h *clusterSecretEventHandler) queueRelatedAppGenerators(ctx context.Context, q addRateLimitingInterface[reconcile.Request], object client.Object) {
|
||||
// Check for label, lookup all ApplicationSets that might match the cluster, queue them all
|
||||
if object.GetLabels()[common.LabelKeySecretType] != common.LabelValueSecretTypeCluster {
|
||||
if object.GetLabels()[generators.ArgoCDSecretTypeLabel] != generators.ArgoCDSecretTypeCluster {
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -4,8 +4,6 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
argocommon "github.com/argoproj/argo-cd/v2/common"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -18,6 +16,7 @@ import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/generators"
|
||||
argov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
)
|
||||
|
||||
@@ -43,7 +42,7 @@ func TestClusterEventHandler(t *testing.T) {
|
||||
Namespace: "argocd",
|
||||
Name: "my-secret",
|
||||
Labels: map[string]string{
|
||||
argocommon.LabelKeySecretType: argocommon.LabelValueSecretTypeCluster,
|
||||
generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster,
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -71,7 +70,7 @@ func TestClusterEventHandler(t *testing.T) {
|
||||
Namespace: "argocd",
|
||||
Name: "my-secret",
|
||||
Labels: map[string]string{
|
||||
argocommon.LabelKeySecretType: argocommon.LabelValueSecretTypeCluster,
|
||||
generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster,
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -114,7 +113,7 @@ func TestClusterEventHandler(t *testing.T) {
|
||||
Namespace: "argocd",
|
||||
Name: "my-secret",
|
||||
Labels: map[string]string{
|
||||
argocommon.LabelKeySecretType: argocommon.LabelValueSecretTypeCluster,
|
||||
generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster,
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -158,7 +157,7 @@ func TestClusterEventHandler(t *testing.T) {
|
||||
Namespace: "argocd",
|
||||
Name: "my-secret",
|
||||
Labels: map[string]string{
|
||||
argocommon.LabelKeySecretType: argocommon.LabelValueSecretTypeCluster,
|
||||
generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster,
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -219,7 +218,7 @@ func TestClusterEventHandler(t *testing.T) {
|
||||
Namespace: "argocd",
|
||||
Name: "my-secret",
|
||||
Labels: map[string]string{
|
||||
argocommon.LabelKeySecretType: argocommon.LabelValueSecretTypeCluster,
|
||||
generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster,
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -255,7 +254,7 @@ func TestClusterEventHandler(t *testing.T) {
|
||||
Namespace: "argocd",
|
||||
Name: "my-secret",
|
||||
Labels: map[string]string{
|
||||
argocommon.LabelKeySecretType: argocommon.LabelValueSecretTypeCluster,
|
||||
generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster,
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -305,7 +304,7 @@ func TestClusterEventHandler(t *testing.T) {
|
||||
Namespace: "argocd",
|
||||
Name: "my-secret",
|
||||
Labels: map[string]string{
|
||||
argocommon.LabelKeySecretType: argocommon.LabelValueSecretTypeCluster,
|
||||
generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster,
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -356,7 +355,7 @@ func TestClusterEventHandler(t *testing.T) {
|
||||
Namespace: "argocd",
|
||||
Name: "my-secret",
|
||||
Labels: map[string]string{
|
||||
argocommon.LabelKeySecretType: argocommon.LabelValueSecretTypeCluster,
|
||||
generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster,
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -390,7 +389,7 @@ func TestClusterEventHandler(t *testing.T) {
|
||||
Namespace: "argocd",
|
||||
Name: "my-secret",
|
||||
Labels: map[string]string{
|
||||
argocommon.LabelKeySecretType: argocommon.LabelValueSecretTypeCluster,
|
||||
generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster,
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -426,7 +425,7 @@ func TestClusterEventHandler(t *testing.T) {
|
||||
Namespace: "argocd",
|
||||
Name: "my-secret",
|
||||
Labels: map[string]string{
|
||||
argocommon.LabelKeySecretType: argocommon.LabelValueSecretTypeCluster,
|
||||
generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster,
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -476,7 +475,7 @@ func TestClusterEventHandler(t *testing.T) {
|
||||
Namespace: "argocd",
|
||||
Name: "my-secret",
|
||||
Labels: map[string]string{
|
||||
argocommon.LabelKeySecretType: argocommon.LabelValueSecretTypeCluster,
|
||||
generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster,
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -527,7 +526,7 @@ func TestClusterEventHandler(t *testing.T) {
|
||||
Namespace: "argocd",
|
||||
Name: "my-secret",
|
||||
Labels: map[string]string{
|
||||
argocommon.LabelKeySecretType: argocommon.LabelValueSecretTypeCluster,
|
||||
generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -57,7 +57,7 @@ func TestRequeueAfter(t *testing.T) {
|
||||
},
|
||||
}
|
||||
fakeDynClient := dynfake.NewSimpleDynamicClientWithCustomListKinds(runtime.NewScheme(), gvrToListKind, duckType)
|
||||
scmConfig := generators.NewSCMConfig("", []string{""}, true, nil, true)
|
||||
scmConfig := generators.NewSCMConfig("", []string{""}, true, nil)
|
||||
terminalGenerators := map[string]generators.Generator{
|
||||
"List": generators.NewListGenerator(),
|
||||
"Clusters": generators.NewClusterGenerator(k8sClient, ctx, appClientset, "argocd"),
|
||||
@@ -100,8 +100,7 @@ func TestRequeueAfter(t *testing.T) {
|
||||
}
|
||||
|
||||
type args struct {
|
||||
appset *argov1alpha1.ApplicationSet
|
||||
requeueAfterOverride string
|
||||
appset *argov1alpha1.ApplicationSet
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -109,13 +108,11 @@ func TestRequeueAfter(t *testing.T) {
|
||||
want time.Duration
|
||||
wantErr assert.ErrorAssertionFunc
|
||||
}{
|
||||
{name: "Cluster", args: args{
|
||||
appset: &argov1alpha1.ApplicationSet{
|
||||
Spec: argov1alpha1.ApplicationSetSpec{
|
||||
Generators: []argov1alpha1.ApplicationSetGenerator{{Clusters: &argov1alpha1.ClusterGenerator{}}},
|
||||
},
|
||||
}, requeueAfterOverride: "",
|
||||
}, want: generators.NoRequeueAfter, wantErr: assert.NoError},
|
||||
{name: "Cluster", args: args{appset: &argov1alpha1.ApplicationSet{
|
||||
Spec: argov1alpha1.ApplicationSetSpec{
|
||||
Generators: []argov1alpha1.ApplicationSetGenerator{{Clusters: &argov1alpha1.ClusterGenerator{}}},
|
||||
},
|
||||
}}, want: generators.NoRequeueAfter, wantErr: assert.NoError},
|
||||
{name: "ClusterMergeNested", args: args{&argov1alpha1.ApplicationSet{
|
||||
Spec: argov1alpha1.ApplicationSetSpec{
|
||||
Generators: []argov1alpha1.ApplicationSetGenerator{
|
||||
@@ -130,7 +127,7 @@ func TestRequeueAfter(t *testing.T) {
|
||||
}},
|
||||
},
|
||||
},
|
||||
}, ""}, want: generators.DefaultRequeueAfterSeconds, wantErr: assert.NoError},
|
||||
}}, want: generators.DefaultRequeueAfterSeconds, wantErr: assert.NoError},
|
||||
{name: "ClusterMatrixNested", args: args{&argov1alpha1.ApplicationSet{
|
||||
Spec: argov1alpha1.ApplicationSetSpec{
|
||||
Generators: []argov1alpha1.ApplicationSetGenerator{
|
||||
@@ -145,65 +142,15 @@ func TestRequeueAfter(t *testing.T) {
|
||||
}},
|
||||
},
|
||||
},
|
||||
}, ""}, want: generators.DefaultRequeueAfterSeconds, wantErr: assert.NoError},
|
||||
}}, want: generators.DefaultRequeueAfterSeconds, wantErr: assert.NoError},
|
||||
{name: "ListGenerator", args: args{appset: &argov1alpha1.ApplicationSet{
|
||||
Spec: argov1alpha1.ApplicationSetSpec{
|
||||
Generators: []argov1alpha1.ApplicationSetGenerator{{List: &argov1alpha1.ListGenerator{}}},
|
||||
},
|
||||
}}, want: generators.NoRequeueAfter, wantErr: assert.NoError},
|
||||
{name: "DuckGenerator", args: args{appset: &argov1alpha1.ApplicationSet{
|
||||
Spec: argov1alpha1.ApplicationSetSpec{
|
||||
Generators: []argov1alpha1.ApplicationSetGenerator{{ClusterDecisionResource: &argov1alpha1.DuckTypeGenerator{}}},
|
||||
},
|
||||
}}, want: generators.DefaultRequeueAfterSeconds, wantErr: assert.NoError},
|
||||
{name: "OverrideRequeueDuck", args: args{
|
||||
appset: &argov1alpha1.ApplicationSet{
|
||||
Spec: argov1alpha1.ApplicationSetSpec{
|
||||
Generators: []argov1alpha1.ApplicationSetGenerator{{ClusterDecisionResource: &argov1alpha1.DuckTypeGenerator{}}},
|
||||
},
|
||||
}, requeueAfterOverride: "1h",
|
||||
}, want: 1 * time.Hour, wantErr: assert.NoError},
|
||||
{name: "OverrideRequeueGit", args: args{&argov1alpha1.ApplicationSet{
|
||||
Spec: argov1alpha1.ApplicationSetSpec{
|
||||
Generators: []argov1alpha1.ApplicationSetGenerator{
|
||||
{Git: &argov1alpha1.GitGenerator{}},
|
||||
},
|
||||
},
|
||||
}, "1h"}, want: 1 * time.Hour, wantErr: assert.NoError},
|
||||
{name: "OverrideRequeueMatrix", args: args{&argov1alpha1.ApplicationSet{
|
||||
Spec: argov1alpha1.ApplicationSetSpec{
|
||||
Generators: []argov1alpha1.ApplicationSetGenerator{
|
||||
{Clusters: &argov1alpha1.ClusterGenerator{}},
|
||||
{Merge: &argov1alpha1.MergeGenerator{
|
||||
Generators: []argov1alpha1.ApplicationSetNestedGenerator{
|
||||
{
|
||||
Clusters: &argov1alpha1.ClusterGenerator{},
|
||||
Git: &argov1alpha1.GitGenerator{},
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
},
|
||||
}, "5m"}, want: 5 * time.Minute, wantErr: assert.NoError},
|
||||
{name: "OverrideRequeueMerge", args: args{&argov1alpha1.ApplicationSet{
|
||||
Spec: argov1alpha1.ApplicationSetSpec{
|
||||
Generators: []argov1alpha1.ApplicationSetGenerator{
|
||||
{Clusters: &argov1alpha1.ClusterGenerator{}},
|
||||
{Merge: &argov1alpha1.MergeGenerator{
|
||||
Generators: []argov1alpha1.ApplicationSetNestedGenerator{
|
||||
{
|
||||
Clusters: &argov1alpha1.ClusterGenerator{},
|
||||
Git: &argov1alpha1.GitGenerator{},
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
},
|
||||
}, "12s"}, want: 12 * time.Second, wantErr: assert.NoError},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Setenv("ARGOCD_APPLICATIONSET_CONTROLLER_REQUEUE_AFTER", tt.args.requeueAfterOverride)
|
||||
assert.Equalf(t, tt.want, r.getMinRequeueAfter(tt.args.appset), "getMinRequeueAfter(%v)", tt.args.appset)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ package template
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"maps"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/mock"
|
||||
@@ -19,6 +18,7 @@ import (
|
||||
rendmock "github.com/argoproj/argo-cd/v2/applicationset/utils/mocks"
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application"
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v2/util/collections"
|
||||
)
|
||||
|
||||
func TestGenerateApplications(t *testing.T) {
|
||||
@@ -344,7 +344,7 @@ func TestGenerateAppsUsingPullRequestGenerator(t *testing.T) {
|
||||
assert.EqualValues(t, cases.expectedApp[0].ObjectMeta.Name, gotApp[0].ObjectMeta.Name)
|
||||
assert.EqualValues(t, cases.expectedApp[0].Spec.Source.TargetRevision, gotApp[0].Spec.Source.TargetRevision)
|
||||
assert.EqualValues(t, cases.expectedApp[0].Spec.Destination.Namespace, gotApp[0].Spec.Destination.Namespace)
|
||||
assert.True(t, maps.Equal(cases.expectedApp[0].ObjectMeta.Labels, gotApp[0].ObjectMeta.Labels))
|
||||
assert.True(t, collections.StringMapsEqual(cases.expectedApp[0].ObjectMeta.Labels, gotApp[0].ObjectMeta.Labels))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,10 +15,14 @@ import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/utils"
|
||||
"github.com/argoproj/argo-cd/v2/common"
|
||||
argoappsetv1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
)
|
||||
|
||||
const (
|
||||
ArgoCDSecretTypeLabel = "argocd.argoproj.io/secret-type"
|
||||
ArgoCDSecretTypeCluster = "cluster"
|
||||
)
|
||||
|
||||
var _ Generator = (*ClusterGenerator)(nil)
|
||||
|
||||
// ClusterGenerator generates Applications for some or all clusters registered with ArgoCD.
|
||||
@@ -88,10 +92,6 @@ func (g *ClusterGenerator) GenerateParams(appSetGenerator *argoappsetv1alpha1.Ap
|
||||
|
||||
secretsFound := []corev1.Secret{}
|
||||
|
||||
isFlatMode := appSetGenerator.Clusters.FlatList
|
||||
log.Debug("Using flat mode = ", isFlatMode, " for cluster generator")
|
||||
clustersParams := make([]map[string]interface{}, 0)
|
||||
|
||||
for _, cluster := range clustersFromArgoCD.Items {
|
||||
// If there is a secret for this cluster, then it's a non-local cluster, so it will be
|
||||
// handled by the next step.
|
||||
@@ -103,18 +103,13 @@ func (g *ClusterGenerator) GenerateParams(appSetGenerator *argoappsetv1alpha1.Ap
|
||||
params["name"] = cluster.Name
|
||||
params["nameNormalized"] = cluster.Name
|
||||
params["server"] = cluster.Server
|
||||
params["project"] = ""
|
||||
|
||||
err = appendTemplatedValues(appSetGenerator.Clusters.Values, params, appSet.Spec.GoTemplate, appSet.Spec.GoTemplateOptions)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error appending templated values for local cluster: %w", err)
|
||||
}
|
||||
|
||||
if isFlatMode {
|
||||
clustersParams = append(clustersParams, params)
|
||||
} else {
|
||||
res = append(res, params)
|
||||
}
|
||||
res = append(res, params)
|
||||
|
||||
log.WithField("cluster", "local cluster").Info("matched local cluster")
|
||||
}
|
||||
@@ -128,13 +123,6 @@ func (g *ClusterGenerator) GenerateParams(appSetGenerator *argoappsetv1alpha1.Ap
|
||||
params["nameNormalized"] = utils.SanitizeName(string(cluster.Data["name"]))
|
||||
params["server"] = string(cluster.Data["server"])
|
||||
|
||||
project, ok := cluster.Data["project"]
|
||||
if ok {
|
||||
params["project"] = string(project)
|
||||
} else {
|
||||
params["project"] = ""
|
||||
}
|
||||
|
||||
if appSet.Spec.GoTemplate {
|
||||
meta := map[string]interface{}{}
|
||||
|
||||
@@ -161,20 +149,11 @@ func (g *ClusterGenerator) GenerateParams(appSetGenerator *argoappsetv1alpha1.Ap
|
||||
return nil, fmt.Errorf("error appending templated values for cluster: %w", err)
|
||||
}
|
||||
|
||||
if isFlatMode {
|
||||
clustersParams = append(clustersParams, params)
|
||||
} else {
|
||||
res = append(res, params)
|
||||
}
|
||||
res = append(res, params)
|
||||
|
||||
log.WithField("cluster", cluster.Name).Info("matched cluster secret")
|
||||
}
|
||||
|
||||
if isFlatMode {
|
||||
res = append(res, map[string]interface{}{
|
||||
"clusters": clustersParams,
|
||||
})
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
@@ -182,7 +161,7 @@ func (g *ClusterGenerator) getSecretsByClusterName(appSetGenerator *argoappsetv1
|
||||
// List all Clusters:
|
||||
clusterSecretList := &corev1.SecretList{}
|
||||
|
||||
selector := metav1.AddLabelToSelector(&appSetGenerator.Clusters.Selector, common.LabelKeySecretType, common.LabelValueSecretTypeCluster)
|
||||
selector := metav1.AddLabelToSelector(&appSetGenerator.Clusters.Selector, ArgoCDSecretTypeLabel, ArgoCDSecretTypeCluster)
|
||||
secretSelector, err := metav1.LabelSelectorAsSelector(selector)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error converting label selector: %w", err)
|
||||
|
||||
@@ -76,20 +76,18 @@ func TestGenerateParams(t *testing.T) {
|
||||
},
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"config": []byte("{}"),
|
||||
"name": []byte("production_01/west"),
|
||||
"server": []byte("https://production-01.example.com"),
|
||||
"project": []byte("prod-project"),
|
||||
"config": []byte("{}"),
|
||||
"name": []byte("production_01/west"),
|
||||
"server": []byte("https://production-01.example.com"),
|
||||
},
|
||||
Type: corev1.SecretType("Opaque"),
|
||||
},
|
||||
}
|
||||
testCases := []struct {
|
||||
name string
|
||||
selector metav1.LabelSelector
|
||||
isFlatMode bool
|
||||
values map[string]string
|
||||
expected []map[string]interface{}
|
||||
name string
|
||||
selector metav1.LabelSelector
|
||||
values map[string]string
|
||||
expected []map[string]interface{}
|
||||
// clientError is true if a k8s client error should be simulated
|
||||
clientError bool
|
||||
expectedError error
|
||||
@@ -107,16 +105,17 @@ func TestGenerateParams(t *testing.T) {
|
||||
"aaa": "{{ server }}",
|
||||
"no-op": "{{ this-does-not-exist }}",
|
||||
}, expected: []map[string]interface{}{
|
||||
{"values.lol1": "lol", "values.lol2": "{{values.lol1}}{{values.lol1}}", "values.lol3": "{{values.lol2}}{{values.lol2}}{{values.lol2}}", "values.foo": "bar", "values.bar": "{{ metadata.annotations.foo.argoproj.io }}", "values.no-op": "{{ this-does-not-exist }}", "values.bat": "{{ metadata.labels.environment }}", "values.aaa": "https://kubernetes.default.svc", "nameNormalized": "in-cluster", "name": "in-cluster", "server": "https://kubernetes.default.svc", "project": ""},
|
||||
{
|
||||
"values.lol1": "lol", "values.lol2": "{{values.lol1}}{{values.lol1}}", "values.lol3": "{{values.lol2}}{{values.lol2}}{{values.lol2}}", "values.foo": "bar", "values.bar": "production", "values.no-op": "{{ this-does-not-exist }}", "values.bat": "production", "values.aaa": "https://production-01.example.com", "name": "production_01/west", "nameNormalized": "production-01-west", "server": "https://production-01.example.com", "metadata.labels.environment": "production", "metadata.labels.org": "bar",
|
||||
"metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "production", "project": "prod-project",
|
||||
"metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "production",
|
||||
},
|
||||
|
||||
{
|
||||
"values.lol1": "lol", "values.lol2": "{{values.lol1}}{{values.lol1}}", "values.lol3": "{{values.lol2}}{{values.lol2}}{{values.lol2}}", "values.foo": "bar", "values.bar": "staging", "values.no-op": "{{ this-does-not-exist }}", "values.bat": "staging", "values.aaa": "https://staging-01.example.com", "name": "staging-01", "nameNormalized": "staging-01", "server": "https://staging-01.example.com", "metadata.labels.environment": "staging", "metadata.labels.org": "foo",
|
||||
"metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "staging", "project": "",
|
||||
"metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "staging",
|
||||
},
|
||||
|
||||
{"values.lol1": "lol", "values.lol2": "{{values.lol1}}{{values.lol1}}", "values.lol3": "{{values.lol2}}{{values.lol2}}{{values.lol2}}", "values.foo": "bar", "values.bar": "{{ metadata.annotations.foo.argoproj.io }}", "values.no-op": "{{ this-does-not-exist }}", "values.bat": "{{ metadata.labels.environment }}", "values.aaa": "https://kubernetes.default.svc", "nameNormalized": "in-cluster", "name": "in-cluster", "server": "https://kubernetes.default.svc"},
|
||||
},
|
||||
clientError: false,
|
||||
expectedError: nil,
|
||||
@@ -132,12 +131,12 @@ func TestGenerateParams(t *testing.T) {
|
||||
expected: []map[string]interface{}{
|
||||
{
|
||||
"name": "production_01/west", "nameNormalized": "production-01-west", "server": "https://production-01.example.com", "metadata.labels.environment": "production", "metadata.labels.org": "bar",
|
||||
"metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "production", "project": "prod-project",
|
||||
"metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "production",
|
||||
},
|
||||
|
||||
{
|
||||
"name": "staging-01", "nameNormalized": "staging-01", "server": "https://staging-01.example.com", "metadata.labels.environment": "staging", "metadata.labels.org": "foo",
|
||||
"metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "staging", "project": "",
|
||||
"metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "staging",
|
||||
},
|
||||
},
|
||||
clientError: false,
|
||||
@@ -156,7 +155,7 @@ func TestGenerateParams(t *testing.T) {
|
||||
expected: []map[string]interface{}{
|
||||
{
|
||||
"values.foo": "bar", "name": "production_01/west", "nameNormalized": "production-01-west", "server": "https://production-01.example.com", "metadata.labels.environment": "production", "metadata.labels.org": "bar",
|
||||
"metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "production", "project": "prod-project",
|
||||
"metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "production",
|
||||
},
|
||||
},
|
||||
clientError: false,
|
||||
@@ -182,11 +181,11 @@ func TestGenerateParams(t *testing.T) {
|
||||
expected: []map[string]interface{}{
|
||||
{
|
||||
"values.foo": "bar", "name": "staging-01", "nameNormalized": "staging-01", "server": "https://staging-01.example.com", "metadata.labels.environment": "staging", "metadata.labels.org": "foo",
|
||||
"metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "staging", "project": "",
|
||||
"metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "staging",
|
||||
},
|
||||
{
|
||||
"values.foo": "bar", "name": "production_01/west", "nameNormalized": "production-01-west", "server": "https://production-01.example.com", "metadata.labels.environment": "production", "metadata.labels.org": "bar",
|
||||
"metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "production", "project": "prod-project",
|
||||
"metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "production",
|
||||
},
|
||||
},
|
||||
clientError: false,
|
||||
@@ -215,7 +214,7 @@ func TestGenerateParams(t *testing.T) {
|
||||
expected: []map[string]interface{}{
|
||||
{
|
||||
"values.name": "baz", "name": "staging-01", "nameNormalized": "staging-01", "server": "https://staging-01.example.com", "metadata.labels.environment": "staging", "metadata.labels.org": "foo",
|
||||
"metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "staging", "project": "",
|
||||
"metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "staging",
|
||||
},
|
||||
},
|
||||
clientError: false,
|
||||
@@ -229,74 +228,6 @@ func TestGenerateParams(t *testing.T) {
|
||||
clientError: true,
|
||||
expectedError: fmt.Errorf("error getting cluster secrets: could not list Secrets"),
|
||||
},
|
||||
{
|
||||
name: "flat mode without selectors",
|
||||
selector: metav1.LabelSelector{},
|
||||
values: map[string]string{
|
||||
"lol1": "lol",
|
||||
"lol2": "{{values.lol1}}{{values.lol1}}",
|
||||
"lol3": "{{values.lol2}}{{values.lol2}}{{values.lol2}}",
|
||||
"foo": "bar",
|
||||
"bar": "{{ metadata.annotations.foo.argoproj.io }}",
|
||||
"bat": "{{ metadata.labels.environment }}",
|
||||
"aaa": "{{ server }}",
|
||||
"no-op": "{{ this-does-not-exist }}",
|
||||
},
|
||||
expected: []map[string]interface{}{
|
||||
{
|
||||
"clusters": []map[string]interface{}{
|
||||
{"values.lol1": "lol", "values.lol2": "{{values.lol1}}{{values.lol1}}", "values.lol3": "{{values.lol2}}{{values.lol2}}{{values.lol2}}", "values.foo": "bar", "values.bar": "{{ metadata.annotations.foo.argoproj.io }}", "values.no-op": "{{ this-does-not-exist }}", "values.bat": "{{ metadata.labels.environment }}", "values.aaa": "https://kubernetes.default.svc", "nameNormalized": "in-cluster", "name": "in-cluster", "server": "https://kubernetes.default.svc", "project": ""},
|
||||
{
|
||||
"values.lol1": "lol", "values.lol2": "{{values.lol1}}{{values.lol1}}", "values.lol3": "{{values.lol2}}{{values.lol2}}{{values.lol2}}", "values.foo": "bar", "values.bar": "production", "values.no-op": "{{ this-does-not-exist }}", "values.bat": "production", "values.aaa": "https://production-01.example.com", "name": "production_01/west", "nameNormalized": "production-01-west", "server": "https://production-01.example.com", "metadata.labels.environment": "production", "metadata.labels.org": "bar",
|
||||
"metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "production", "project": "prod-project",
|
||||
},
|
||||
|
||||
{
|
||||
"values.lol1": "lol", "values.lol2": "{{values.lol1}}{{values.lol1}}", "values.lol3": "{{values.lol2}}{{values.lol2}}{{values.lol2}}", "values.foo": "bar", "values.bar": "staging", "values.no-op": "{{ this-does-not-exist }}", "values.bat": "staging", "values.aaa": "https://staging-01.example.com", "name": "staging-01", "nameNormalized": "staging-01", "server": "https://staging-01.example.com", "metadata.labels.environment": "staging", "metadata.labels.org": "foo",
|
||||
"metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "staging", "project": "",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
isFlatMode: true,
|
||||
clientError: false,
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "production or staging with flat mode",
|
||||
selector: metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "environment",
|
||||
Operator: "In",
|
||||
Values: []string{
|
||||
"production",
|
||||
"staging",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
isFlatMode: true,
|
||||
values: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
expected: []map[string]interface{}{
|
||||
{
|
||||
"clusters": []map[string]interface{}{
|
||||
{
|
||||
"values.foo": "bar", "name": "production_01/west", "nameNormalized": "production-01-west", "server": "https://production-01.example.com", "metadata.labels.environment": "production", "metadata.labels.org": "bar",
|
||||
"metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "production", "project": "prod-project",
|
||||
},
|
||||
{
|
||||
"values.foo": "bar", "name": "staging-01", "nameNormalized": "staging-01", "server": "https://staging-01.example.com", "metadata.labels.environment": "staging", "metadata.labels.org": "foo",
|
||||
"metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "staging", "project": "",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
clientError: false,
|
||||
expectedError: nil,
|
||||
},
|
||||
}
|
||||
|
||||
// convert []client.Object to []runtime.Object, for use by kubefake package
|
||||
@@ -328,7 +259,6 @@ func TestGenerateParams(t *testing.T) {
|
||||
Clusters: &argoprojiov1alpha1.ClusterGenerator{
|
||||
Selector: testCase.selector,
|
||||
Values: testCase.values,
|
||||
FlatList: testCase.isFlatMode,
|
||||
},
|
||||
}, &applicationSetInfo, nil)
|
||||
|
||||
@@ -394,11 +324,10 @@ func TestGenerateParamsGoTemplate(t *testing.T) {
|
||||
},
|
||||
}
|
||||
testCases := []struct {
|
||||
name string
|
||||
selector metav1.LabelSelector
|
||||
values map[string]string
|
||||
isFlatMode bool
|
||||
expected []map[string]interface{}
|
||||
name string
|
||||
selector metav1.LabelSelector
|
||||
values map[string]string
|
||||
expected []map[string]interface{}
|
||||
// clientError is true if a k8s client error should be simulated
|
||||
clientError bool
|
||||
expectedError error
|
||||
@@ -420,7 +349,6 @@ func TestGenerateParamsGoTemplate(t *testing.T) {
|
||||
"name": "production_01/west",
|
||||
"nameNormalized": "production-01-west",
|
||||
"server": "https://production-01.example.com",
|
||||
"project": "",
|
||||
"metadata": map[string]interface{}{
|
||||
"labels": map[string]string{
|
||||
"argocd.argoproj.io/secret-type": "cluster",
|
||||
@@ -446,7 +374,6 @@ func TestGenerateParamsGoTemplate(t *testing.T) {
|
||||
"name": "staging-01",
|
||||
"nameNormalized": "staging-01",
|
||||
"server": "https://staging-01.example.com",
|
||||
"project": "",
|
||||
"metadata": map[string]interface{}{
|
||||
"labels": map[string]string{
|
||||
"argocd.argoproj.io/secret-type": "cluster",
|
||||
@@ -472,7 +399,6 @@ func TestGenerateParamsGoTemplate(t *testing.T) {
|
||||
"nameNormalized": "in-cluster",
|
||||
"name": "in-cluster",
|
||||
"server": "https://kubernetes.default.svc",
|
||||
"project": "",
|
||||
"values": map[string]string{
|
||||
"lol1": "lol",
|
||||
"lol2": "<no value><no value>",
|
||||
@@ -501,7 +427,6 @@ func TestGenerateParamsGoTemplate(t *testing.T) {
|
||||
"name": "production_01/west",
|
||||
"nameNormalized": "production-01-west",
|
||||
"server": "https://production-01.example.com",
|
||||
"project": "",
|
||||
"metadata": map[string]interface{}{
|
||||
"labels": map[string]string{
|
||||
"argocd.argoproj.io/secret-type": "cluster",
|
||||
@@ -517,7 +442,6 @@ func TestGenerateParamsGoTemplate(t *testing.T) {
|
||||
"name": "staging-01",
|
||||
"nameNormalized": "staging-01",
|
||||
"server": "https://staging-01.example.com",
|
||||
"project": "",
|
||||
"metadata": map[string]interface{}{
|
||||
"labels": map[string]string{
|
||||
"argocd.argoproj.io/secret-type": "cluster",
|
||||
@@ -548,7 +472,6 @@ func TestGenerateParamsGoTemplate(t *testing.T) {
|
||||
"name": "production_01/west",
|
||||
"nameNormalized": "production-01-west",
|
||||
"server": "https://production-01.example.com",
|
||||
"project": "",
|
||||
"metadata": map[string]interface{}{
|
||||
"labels": map[string]string{
|
||||
"argocd.argoproj.io/secret-type": "cluster",
|
||||
@@ -589,7 +512,6 @@ func TestGenerateParamsGoTemplate(t *testing.T) {
|
||||
"name": "production_01/west",
|
||||
"nameNormalized": "production-01-west",
|
||||
"server": "https://production-01.example.com",
|
||||
"project": "",
|
||||
"metadata": map[string]interface{}{
|
||||
"labels": map[string]string{
|
||||
"argocd.argoproj.io/secret-type": "cluster",
|
||||
@@ -608,7 +530,6 @@ func TestGenerateParamsGoTemplate(t *testing.T) {
|
||||
"name": "staging-01",
|
||||
"nameNormalized": "staging-01",
|
||||
"server": "https://staging-01.example.com",
|
||||
"project": "",
|
||||
"metadata": map[string]interface{}{
|
||||
"labels": map[string]string{
|
||||
"argocd.argoproj.io/secret-type": "cluster",
|
||||
@@ -652,7 +573,6 @@ func TestGenerateParamsGoTemplate(t *testing.T) {
|
||||
"name": "staging-01",
|
||||
"nameNormalized": "staging-01",
|
||||
"server": "https://staging-01.example.com",
|
||||
"project": "",
|
||||
"metadata": map[string]interface{}{
|
||||
"labels": map[string]string{
|
||||
"argocd.argoproj.io/secret-type": "cluster",
|
||||
@@ -679,162 +599,6 @@ func TestGenerateParamsGoTemplate(t *testing.T) {
|
||||
clientError: true,
|
||||
expectedError: fmt.Errorf("error getting cluster secrets: could not list Secrets"),
|
||||
},
|
||||
{
|
||||
name: "Clusters with flat list mode and no selector",
|
||||
selector: metav1.LabelSelector{},
|
||||
isFlatMode: true,
|
||||
values: map[string]string{
|
||||
"lol1": "lol",
|
||||
"lol2": "{{ .values.lol1 }}{{ .values.lol1 }}",
|
||||
"lol3": "{{ .values.lol2 }}{{ .values.lol2 }}{{ .values.lol2 }}",
|
||||
"foo": "bar",
|
||||
"bar": "{{ if not (empty .metadata) }}{{index .metadata.annotations \"foo.argoproj.io\" }}{{ end }}",
|
||||
"bat": "{{ if not (empty .metadata) }}{{.metadata.labels.environment}}{{ end }}",
|
||||
"aaa": "{{ .server }}",
|
||||
"no-op": "{{ .thisDoesNotExist }}",
|
||||
},
|
||||
expected: []map[string]interface{}{
|
||||
{
|
||||
"clusters": []map[string]interface{}{
|
||||
{
|
||||
"nameNormalized": "in-cluster",
|
||||
"name": "in-cluster",
|
||||
"server": "https://kubernetes.default.svc",
|
||||
"project": "",
|
||||
"values": map[string]string{
|
||||
"lol1": "lol",
|
||||
"lol2": "<no value><no value>",
|
||||
"lol3": "<no value><no value><no value>",
|
||||
"foo": "bar",
|
||||
"bar": "",
|
||||
"bat": "",
|
||||
"aaa": "https://kubernetes.default.svc",
|
||||
"no-op": "<no value>",
|
||||
},
|
||||
},
|
||||
{
|
||||
"name": "production_01/west",
|
||||
"nameNormalized": "production-01-west",
|
||||
"server": "https://production-01.example.com",
|
||||
"project": "",
|
||||
"metadata": map[string]interface{}{
|
||||
"labels": map[string]string{
|
||||
"argocd.argoproj.io/secret-type": "cluster",
|
||||
"environment": "production",
|
||||
"org": "bar",
|
||||
},
|
||||
"annotations": map[string]string{
|
||||
"foo.argoproj.io": "production",
|
||||
},
|
||||
},
|
||||
"values": map[string]string{
|
||||
"lol1": "lol",
|
||||
"lol2": "<no value><no value>",
|
||||
"lol3": "<no value><no value><no value>",
|
||||
"foo": "bar",
|
||||
"bar": "production",
|
||||
"bat": "production",
|
||||
"aaa": "https://production-01.example.com",
|
||||
"no-op": "<no value>",
|
||||
},
|
||||
},
|
||||
{
|
||||
"name": "staging-01",
|
||||
"nameNormalized": "staging-01",
|
||||
"server": "https://staging-01.example.com",
|
||||
"project": "",
|
||||
"metadata": map[string]interface{}{
|
||||
"labels": map[string]string{
|
||||
"argocd.argoproj.io/secret-type": "cluster",
|
||||
"environment": "staging",
|
||||
"org": "foo",
|
||||
},
|
||||
"annotations": map[string]string{
|
||||
"foo.argoproj.io": "staging",
|
||||
},
|
||||
},
|
||||
"values": map[string]string{
|
||||
"lol1": "lol",
|
||||
"lol2": "<no value><no value>",
|
||||
"lol3": "<no value><no value><no value>",
|
||||
"foo": "bar",
|
||||
"bar": "staging",
|
||||
"bat": "staging",
|
||||
"aaa": "https://staging-01.example.com",
|
||||
"no-op": "<no value>",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
clientError: false,
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "production or staging with flat mode",
|
||||
selector: metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "environment",
|
||||
Operator: "In",
|
||||
Values: []string{
|
||||
"production",
|
||||
"staging",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
isFlatMode: true,
|
||||
values: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
expected: []map[string]interface{}{
|
||||
{
|
||||
"clusters": []map[string]interface{}{
|
||||
{
|
||||
"name": "production_01/west",
|
||||
"nameNormalized": "production-01-west",
|
||||
"server": "https://production-01.example.com",
|
||||
"project": "",
|
||||
"metadata": map[string]interface{}{
|
||||
"labels": map[string]string{
|
||||
"argocd.argoproj.io/secret-type": "cluster",
|
||||
"environment": "production",
|
||||
"org": "bar",
|
||||
},
|
||||
"annotations": map[string]string{
|
||||
"foo.argoproj.io": "production",
|
||||
},
|
||||
},
|
||||
"values": map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
{
|
||||
"name": "staging-01",
|
||||
"nameNormalized": "staging-01",
|
||||
"server": "https://staging-01.example.com",
|
||||
"project": "",
|
||||
"metadata": map[string]interface{}{
|
||||
"labels": map[string]string{
|
||||
"argocd.argoproj.io/secret-type": "cluster",
|
||||
"environment": "staging",
|
||||
"org": "foo",
|
||||
},
|
||||
"annotations": map[string]string{
|
||||
"foo.argoproj.io": "staging",
|
||||
},
|
||||
},
|
||||
"values": map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
clientError: false,
|
||||
expectedError: nil,
|
||||
},
|
||||
}
|
||||
|
||||
// convert []client.Object to []runtime.Object, for use by kubefake package
|
||||
@@ -868,7 +632,6 @@ func TestGenerateParamsGoTemplate(t *testing.T) {
|
||||
Clusters: &argoprojiov1alpha1.ClusterGenerator{
|
||||
Selector: testCase.selector,
|
||||
Values: testCase.values,
|
||||
FlatList: testCase.isFlatMode,
|
||||
},
|
||||
}, &applicationSetInfo, nil)
|
||||
|
||||
|
||||
@@ -52,7 +52,7 @@ func (g *DuckTypeGenerator) GetRequeueAfter(appSetGenerator *argoprojiov1alpha1.
|
||||
return time.Duration(*appSetGenerator.ClusterDecisionResource.RequeueAfterSeconds) * time.Second
|
||||
}
|
||||
|
||||
return getDefaultRequeueAfter()
|
||||
return DefaultRequeueAfterSeconds
|
||||
}
|
||||
|
||||
func (g *DuckTypeGenerator) GetTemplate(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) *argoprojiov1alpha1.ApplicationSetTemplate {
|
||||
|
||||
@@ -199,7 +199,6 @@ func TestTransForm(t *testing.T) {
|
||||
"name": "production_01/west",
|
||||
"nameNormalized": "production-01-west",
|
||||
"server": "https://production-01.example.com",
|
||||
"project": "",
|
||||
}},
|
||||
},
|
||||
{
|
||||
@@ -215,7 +214,6 @@ func TestTransForm(t *testing.T) {
|
||||
"name": "some-really-long-server-url",
|
||||
"nameNormalized": "some-really-long-server-url",
|
||||
"server": "https://some-really-long-url-that-will-exceed-63-characters.com",
|
||||
"project": "",
|
||||
}},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -48,7 +48,7 @@ func (g *GitGenerator) GetRequeueAfter(appSetGenerator *argoprojiov1alpha1.Appli
|
||||
return time.Duration(*appSetGenerator.Git.RequeueAfterSeconds) * time.Second
|
||||
}
|
||||
|
||||
return getDefaultRequeueAfter()
|
||||
return DefaultRequeueAfterSeconds
|
||||
}
|
||||
|
||||
func (g *GitGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator, appSet *argoprojiov1alpha1.ApplicationSet, client client.Client) ([]map[string]interface{}, error) {
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v2/util/env"
|
||||
)
|
||||
|
||||
// Generator defines the interface implemented by all ApplicationSet generators.
|
||||
@@ -31,11 +30,7 @@ var (
|
||||
NoRequeueAfter time.Duration
|
||||
)
|
||||
|
||||
// DefaultRequeueAfterSeconds is used when GetRequeueAfter is not specified, it is the default time to wait before the next reconcile loop
|
||||
const (
|
||||
DefaultRequeueAfterSeconds = 3 * time.Minute
|
||||
)
|
||||
|
||||
func getDefaultRequeueAfter() time.Duration {
|
||||
// Default is 3 minutes, min is 1 second, max is 1 year
|
||||
return env.ParseDurationFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_REQUEUE_AFTER", DefaultRequeueAfterSeconds, 1*time.Second, 8760*time.Hour)
|
||||
}
|
||||
|
||||
@@ -1,29 +0,0 @@
|
||||
package generators
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func Test_getDefaultRequeueAfter(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
requeueAfterEnv string
|
||||
want time.Duration
|
||||
}{
|
||||
{name: "Default", requeueAfterEnv: "", want: DefaultRequeueAfterSeconds},
|
||||
{name: "Min", requeueAfterEnv: "1s", want: 1 * time.Second},
|
||||
{name: "Max", requeueAfterEnv: "8760h", want: 8760 * time.Hour},
|
||||
{name: "Override", requeueAfterEnv: "10m", want: 10 * time.Minute},
|
||||
{name: "LessThanMin", requeueAfterEnv: "1ms", want: DefaultRequeueAfterSeconds},
|
||||
{name: "MoreThanMax", requeueAfterEnv: "8761h", want: DefaultRequeueAfterSeconds},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Setenv("ARGOCD_APPLICATIONSET_CONTROLLER_REQUEUE_AFTER", tt.requeueAfterEnv)
|
||||
assert.Equalf(t, tt.want, getDefaultRequeueAfter(), "getDefaultRequeueAfter()")
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -578,8 +578,8 @@ func TestInterpolatedMatrixGenerate(t *testing.T) {
|
||||
},
|
||||
},
|
||||
expected: []map[string]interface{}{
|
||||
{"path": "examples/git-generator-files-discovery/cluster-config/dev/config.json", "path.basename": "dev", "path.basenameNormalized": "dev", "name": "dev-01", "nameNormalized": "dev-01", "server": "https://dev-01.example.com", "metadata.labels.environment": "dev", "metadata.labels.argocd.argoproj.io/secret-type": "cluster", "project": ""},
|
||||
{"path": "examples/git-generator-files-discovery/cluster-config/prod/config.json", "path.basename": "prod", "path.basenameNormalized": "prod", "name": "prod-01", "nameNormalized": "prod-01", "server": "https://prod-01.example.com", "metadata.labels.environment": "prod", "metadata.labels.argocd.argoproj.io/secret-type": "cluster", "project": ""},
|
||||
{"path": "examples/git-generator-files-discovery/cluster-config/dev/config.json", "path.basename": "dev", "path.basenameNormalized": "dev", "name": "dev-01", "nameNormalized": "dev-01", "server": "https://dev-01.example.com", "metadata.labels.environment": "dev", "metadata.labels.argocd.argoproj.io/secret-type": "cluster"},
|
||||
{"path": "examples/git-generator-files-discovery/cluster-config/prod/config.json", "path.basename": "prod", "path.basenameNormalized": "prod", "name": "prod-01", "nameNormalized": "prod-01", "server": "https://prod-01.example.com", "metadata.labels.environment": "prod", "metadata.labels.argocd.argoproj.io/secret-type": "cluster"},
|
||||
},
|
||||
clientError: false,
|
||||
},
|
||||
@@ -734,7 +734,6 @@ func TestInterpolatedMatrixGenerateGoTemplate(t *testing.T) {
|
||||
"name": "dev-01",
|
||||
"nameNormalized": "dev-01",
|
||||
"server": "https://dev-01.example.com",
|
||||
"project": "",
|
||||
"metadata": map[string]interface{}{
|
||||
"labels": map[string]string{
|
||||
"environment": "dev",
|
||||
@@ -751,7 +750,6 @@ func TestInterpolatedMatrixGenerateGoTemplate(t *testing.T) {
|
||||
"name": "prod-01",
|
||||
"nameNormalized": "prod-01",
|
||||
"server": "https://prod-01.example.com",
|
||||
"project": "",
|
||||
"metadata": map[string]interface{}{
|
||||
"labels": map[string]string{
|
||||
"environment": "prod",
|
||||
|
||||
@@ -139,7 +139,7 @@ func (g *PullRequestGenerator) selectServiceProvider(ctx context.Context, genera
|
||||
return nil, fmt.Errorf("error fetching CA certificates from ConfigMap: %w", prErr)
|
||||
}
|
||||
}
|
||||
token, err := utils.GetSecretRef(ctx, g.client, providerConfig.TokenRef, applicationSetInfo.Namespace, g.tokenRefStrictMode)
|
||||
token, err := utils.GetSecretRef(ctx, g.client, providerConfig.TokenRef, applicationSetInfo.Namespace)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error fetching Secret token: %w", err)
|
||||
}
|
||||
@@ -147,7 +147,7 @@ func (g *PullRequestGenerator) selectServiceProvider(ctx context.Context, genera
|
||||
}
|
||||
if generatorConfig.Gitea != nil {
|
||||
providerConfig := generatorConfig.Gitea
|
||||
token, err := utils.GetSecretRef(ctx, g.client, providerConfig.TokenRef, applicationSetInfo.Namespace, g.tokenRefStrictMode)
|
||||
token, err := utils.GetSecretRef(ctx, g.client, providerConfig.TokenRef, applicationSetInfo.Namespace)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error fetching Secret token: %w", err)
|
||||
}
|
||||
@@ -164,13 +164,13 @@ func (g *PullRequestGenerator) selectServiceProvider(ctx context.Context, genera
|
||||
}
|
||||
}
|
||||
if providerConfig.BearerToken != nil {
|
||||
appToken, err := utils.GetSecretRef(ctx, g.client, providerConfig.BearerToken.TokenRef, applicationSetInfo.Namespace, g.tokenRefStrictMode)
|
||||
appToken, err := utils.GetSecretRef(ctx, g.client, providerConfig.BearerToken.TokenRef, applicationSetInfo.Namespace)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error fetching Secret Bearer token: %w", err)
|
||||
}
|
||||
return pullrequest.NewBitbucketServiceBearerToken(ctx, appToken, providerConfig.API, providerConfig.Project, providerConfig.Repo, g.scmRootCAPath, providerConfig.Insecure, caCerts)
|
||||
} else if providerConfig.BasicAuth != nil {
|
||||
password, err := utils.GetSecretRef(ctx, g.client, providerConfig.BasicAuth.PasswordRef, applicationSetInfo.Namespace, g.tokenRefStrictMode)
|
||||
password, err := utils.GetSecretRef(ctx, g.client, providerConfig.BasicAuth.PasswordRef, applicationSetInfo.Namespace)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error fetching Secret token: %w", err)
|
||||
}
|
||||
@@ -182,13 +182,13 @@ func (g *PullRequestGenerator) selectServiceProvider(ctx context.Context, genera
|
||||
if generatorConfig.Bitbucket != nil {
|
||||
providerConfig := generatorConfig.Bitbucket
|
||||
if providerConfig.BearerToken != nil {
|
||||
appToken, err := utils.GetSecretRef(ctx, g.client, providerConfig.BearerToken.TokenRef, applicationSetInfo.Namespace, g.tokenRefStrictMode)
|
||||
appToken, err := utils.GetSecretRef(ctx, g.client, providerConfig.BearerToken.TokenRef, applicationSetInfo.Namespace)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error fetching Secret Bearer token: %w", err)
|
||||
}
|
||||
return pullrequest.NewBitbucketCloudServiceBearerToken(providerConfig.API, appToken, providerConfig.Owner, providerConfig.Repo)
|
||||
} else if providerConfig.BasicAuth != nil {
|
||||
password, err := utils.GetSecretRef(ctx, g.client, providerConfig.BasicAuth.PasswordRef, applicationSetInfo.Namespace, g.tokenRefStrictMode)
|
||||
password, err := utils.GetSecretRef(ctx, g.client, providerConfig.BasicAuth.PasswordRef, applicationSetInfo.Namespace)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error fetching Secret token: %w", err)
|
||||
}
|
||||
@@ -199,7 +199,7 @@ func (g *PullRequestGenerator) selectServiceProvider(ctx context.Context, genera
|
||||
}
|
||||
if generatorConfig.AzureDevOps != nil {
|
||||
providerConfig := generatorConfig.AzureDevOps
|
||||
token, err := utils.GetSecretRef(ctx, g.client, providerConfig.TokenRef, applicationSetInfo.Namespace, g.tokenRefStrictMode)
|
||||
token, err := utils.GetSecretRef(ctx, g.client, providerConfig.TokenRef, applicationSetInfo.Namespace)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error fetching Secret token: %w", err)
|
||||
}
|
||||
@@ -219,7 +219,7 @@ func (g *PullRequestGenerator) github(ctx context.Context, cfg *argoprojiov1alph
|
||||
}
|
||||
|
||||
// always default to token, even if not set (public access)
|
||||
token, err := utils.GetSecretRef(ctx, g.client, cfg.TokenRef, applicationSetInfo.Namespace, g.tokenRefStrictMode)
|
||||
token, err := utils.GetSecretRef(ctx, g.client, cfg.TokenRef, applicationSetInfo.Namespace)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error fetching Secret token: %w", err)
|
||||
}
|
||||
|
||||
@@ -283,7 +283,7 @@ func TestAllowedSCMProviderPullRequest(t *testing.T) {
|
||||
"gitea.myorg.com",
|
||||
"bitbucket.myorg.com",
|
||||
"azuredevops.myorg.com",
|
||||
}, true, nil, true))
|
||||
}, true, nil))
|
||||
|
||||
applicationSetInfo := argoprojiov1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -306,7 +306,7 @@ func TestAllowedSCMProviderPullRequest(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSCMProviderDisabled_PRGenerator(t *testing.T) {
|
||||
generator := NewPullRequestGenerator(nil, NewSCMConfig("", []string{}, false, nil, true))
|
||||
generator := NewPullRequestGenerator(nil, NewSCMConfig("", []string{}, false, nil))
|
||||
|
||||
applicationSetInfo := argoprojiov1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
||||
@@ -35,16 +35,14 @@ type SCMConfig struct {
|
||||
allowedSCMProviders []string
|
||||
enableSCMProviders bool
|
||||
GitHubApps github_app_auth.Credentials
|
||||
tokenRefStrictMode bool
|
||||
}
|
||||
|
||||
func NewSCMConfig(scmRootCAPath string, allowedSCMProviders []string, enableSCMProviders bool, gitHubApps github_app_auth.Credentials, tokenRefStrictMode bool) SCMConfig {
|
||||
func NewSCMConfig(scmRootCAPath string, allowedSCMProviders []string, enableSCMProviders bool, gitHubApps github_app_auth.Credentials) SCMConfig {
|
||||
return SCMConfig{
|
||||
scmRootCAPath: scmRootCAPath,
|
||||
allowedSCMProviders: allowedSCMProviders,
|
||||
enableSCMProviders: enableSCMProviders,
|
||||
GitHubApps: gitHubApps,
|
||||
tokenRefStrictMode: tokenRefStrictMode,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -156,7 +154,7 @@ func (g *SCMProviderGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha
|
||||
return nil, fmt.Errorf("error fetching CA certificates from ConfigMap: %w", scmError)
|
||||
}
|
||||
}
|
||||
token, err := utils.GetSecretRef(ctx, g.client, providerConfig.TokenRef, applicationSetInfo.Namespace, g.tokenRefStrictMode)
|
||||
token, err := utils.GetSecretRef(ctx, g.client, providerConfig.TokenRef, applicationSetInfo.Namespace)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error fetching Gitlab token: %w", err)
|
||||
}
|
||||
@@ -165,7 +163,7 @@ func (g *SCMProviderGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha
|
||||
return nil, fmt.Errorf("error initializing Gitlab service: %w", err)
|
||||
}
|
||||
} else if providerConfig.Gitea != nil {
|
||||
token, err := utils.GetSecretRef(ctx, g.client, providerConfig.Gitea.TokenRef, applicationSetInfo.Namespace, g.tokenRefStrictMode)
|
||||
token, err := utils.GetSecretRef(ctx, g.client, providerConfig.Gitea.TokenRef, applicationSetInfo.Namespace)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error fetching Gitea token: %w", err)
|
||||
}
|
||||
@@ -184,13 +182,13 @@ func (g *SCMProviderGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha
|
||||
}
|
||||
}
|
||||
if providerConfig.BearerToken != nil {
|
||||
appToken, err := utils.GetSecretRef(ctx, g.client, providerConfig.BearerToken.TokenRef, applicationSetInfo.Namespace, g.tokenRefStrictMode)
|
||||
appToken, err := utils.GetSecretRef(ctx, g.client, providerConfig.BearerToken.TokenRef, applicationSetInfo.Namespace)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error fetching Secret Bearer token: %w", err)
|
||||
}
|
||||
provider, scmError = scm_provider.NewBitbucketServerProviderBearerToken(ctx, appToken, providerConfig.API, providerConfig.Project, providerConfig.AllBranches, g.scmRootCAPath, providerConfig.Insecure, caCerts)
|
||||
} else if providerConfig.BasicAuth != nil {
|
||||
password, err := utils.GetSecretRef(ctx, g.client, providerConfig.BasicAuth.PasswordRef, applicationSetInfo.Namespace, g.tokenRefStrictMode)
|
||||
password, err := utils.GetSecretRef(ctx, g.client, providerConfig.BasicAuth.PasswordRef, applicationSetInfo.Namespace)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error fetching Secret token: %w", err)
|
||||
}
|
||||
@@ -202,7 +200,7 @@ func (g *SCMProviderGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha
|
||||
return nil, fmt.Errorf("error initializing Bitbucket Server service: %w", scmError)
|
||||
}
|
||||
} else if providerConfig.AzureDevOps != nil {
|
||||
token, err := utils.GetSecretRef(ctx, g.client, providerConfig.AzureDevOps.AccessTokenRef, applicationSetInfo.Namespace, g.tokenRefStrictMode)
|
||||
token, err := utils.GetSecretRef(ctx, g.client, providerConfig.AzureDevOps.AccessTokenRef, applicationSetInfo.Namespace)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error fetching Azure Devops access token: %w", err)
|
||||
}
|
||||
@@ -211,7 +209,7 @@ func (g *SCMProviderGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha
|
||||
return nil, fmt.Errorf("error initializing Azure Devops service: %w", err)
|
||||
}
|
||||
} else if providerConfig.Bitbucket != nil {
|
||||
appPassword, err := utils.GetSecretRef(ctx, g.client, providerConfig.Bitbucket.AppPasswordRef, applicationSetInfo.Namespace, g.tokenRefStrictMode)
|
||||
appPassword, err := utils.GetSecretRef(ctx, g.client, providerConfig.Bitbucket.AppPasswordRef, applicationSetInfo.Namespace)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error fetching Bitbucket cloud appPassword: %w", err)
|
||||
}
|
||||
@@ -285,7 +283,7 @@ func (g *SCMProviderGenerator) githubProvider(ctx context.Context, github *argop
|
||||
)
|
||||
}
|
||||
|
||||
token, err := utils.GetSecretRef(ctx, g.client, github.TokenRef, applicationSetInfo.Namespace, g.tokenRefStrictMode)
|
||||
token, err := utils.GetSecretRef(ctx, g.client, github.TokenRef, applicationSetInfo.Namespace)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error fetching Github token: %w", err)
|
||||
}
|
||||
|
||||
@@ -82,7 +82,6 @@ func (a *AzureDevOpsService) List(ctx context.Context) ([]*PullRequest, error) {
|
||||
pr.Repository.Name == nil ||
|
||||
pr.PullRequestId == nil ||
|
||||
pr.SourceRefName == nil ||
|
||||
pr.TargetRefName == nil ||
|
||||
pr.LastMergeSourceCommit == nil ||
|
||||
pr.LastMergeSourceCommit.CommitId == nil {
|
||||
continue
|
||||
@@ -95,13 +94,12 @@ func (a *AzureDevOpsService) List(ctx context.Context) ([]*PullRequest, error) {
|
||||
|
||||
if *pr.Repository.Name == a.repo {
|
||||
pullRequests = append(pullRequests, &PullRequest{
|
||||
Number: *pr.PullRequestId,
|
||||
Title: *pr.Title,
|
||||
Branch: strings.Replace(*pr.SourceRefName, "refs/heads/", "", 1),
|
||||
TargetBranch: strings.Replace(*pr.TargetRefName, "refs/heads/", "", 1),
|
||||
HeadSHA: *pr.LastMergeSourceCommit.CommitId,
|
||||
Labels: azureDevOpsLabels,
|
||||
Author: strings.Split(*pr.CreatedBy.UniqueName, "@")[0], // Get the part before the @ in the email-address
|
||||
Number: *pr.PullRequestId,
|
||||
Title: *pr.Title,
|
||||
Branch: strings.Replace(*pr.SourceRefName, "refs/heads/", "", 1),
|
||||
HeadSHA: *pr.LastMergeSourceCommit.CommitId,
|
||||
Labels: azureDevOpsLabels,
|
||||
Author: strings.Split(*pr.CreatedBy.UniqueName, "@")[0], // Get the part before the @ in the email-address
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -72,7 +72,6 @@ func TestListPullRequest(t *testing.T) {
|
||||
PullRequestId: createIntPtr(pr_id),
|
||||
Title: createStringPtr(pr_title),
|
||||
SourceRefName: createStringPtr("refs/heads/feature-branch"),
|
||||
TargetRefName: createStringPtr("refs/heads/main"),
|
||||
LastMergeSourceCommit: &git.GitCommitRef{
|
||||
CommitId: createStringPtr(pr_head_sha),
|
||||
},
|
||||
@@ -107,7 +106,6 @@ func TestListPullRequest(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, list, 1)
|
||||
assert.Equal(t, "feature-branch", list[0].Branch)
|
||||
assert.Equal(t, "main", list[0].TargetBranch)
|
||||
assert.Equal(t, pr_head_sha, list[0].HeadSHA)
|
||||
assert.Equal(t, "feat(123)", list[0].Title)
|
||||
assert.Equal(t, pr_id, list[0].Number)
|
||||
|
||||
@@ -30,7 +30,7 @@ func Test_secretToCluster(t *testing.T) {
|
||||
Data: map[string][]byte{
|
||||
"name": []byte("test"),
|
||||
"server": []byte("http://mycluster"),
|
||||
"config": []byte("{\"username\":\"foo\", \"disableCompression\":true}"),
|
||||
"config": []byte("{\"username\":\"foo\"}"),
|
||||
},
|
||||
}
|
||||
cluster, err := secretToCluster(secret)
|
||||
@@ -39,8 +39,7 @@ func Test_secretToCluster(t *testing.T) {
|
||||
Name: "test",
|
||||
Server: "http://mycluster",
|
||||
Config: argoappv1.ClusterConfig{
|
||||
Username: "foo",
|
||||
DisableCompression: true,
|
||||
Username: "foo",
|
||||
},
|
||||
}, *cluster)
|
||||
}
|
||||
|
||||
@@ -4,18 +4,14 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/common"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
)
|
||||
|
||||
var ErrDisallowedSecretAccess = fmt.Errorf("secret must have label %q=%q", common.LabelKeySecretType, common.LabelValueSecretTypeSCMCreds)
|
||||
|
||||
// getSecretRef gets the value of the key for the specified Secret resource.
|
||||
func GetSecretRef(ctx context.Context, k8sClient client.Client, ref *argoprojiov1alpha1.SecretRef, namespace string, tokenRefStrictMode bool) (string, error) {
|
||||
func GetSecretRef(ctx context.Context, k8sClient client.Client, ref *argoprojiov1alpha1.SecretRef, namespace string) (string, error) {
|
||||
if ref == nil {
|
||||
return "", nil
|
||||
}
|
||||
@@ -31,11 +27,6 @@ func GetSecretRef(ctx context.Context, k8sClient client.Client, ref *argoprojiov
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error fetching secret %s/%s: %w", namespace, ref.SecretName, err)
|
||||
}
|
||||
|
||||
if tokenRefStrictMode && secret.GetLabels()[common.LabelKeySecretType] != common.LabelValueSecretTypeSCMCreds {
|
||||
return "", fmt.Errorf("secret %s/%s is not a valid SCM creds secret: %w", namespace, ref.SecretName, ErrDisallowedSecretAccess)
|
||||
}
|
||||
|
||||
tokenBytes, ok := secret.Data[ref.Key]
|
||||
if !ok {
|
||||
return "", fmt.Errorf("key %q in secret %s/%s not found", ref.Key, namespace, ref.SecretName)
|
||||
|
||||
@@ -67,7 +67,7 @@ func TestGetSecretRef(t *testing.T) {
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
token, err := GetSecretRef(ctx, client, c.ref, c.namespace, false)
|
||||
token, err := GetSecretRef(ctx, client, c.ref, c.namespace)
|
||||
if c.hasError {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
|
||||
24
assets/swagger.json
generated
24
assets/swagger.json
generated
@@ -4719,9 +4719,6 @@
|
||||
"impersonationEnabled": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"installationID": {
|
||||
"type": "string"
|
||||
},
|
||||
"kustomizeOptions": {
|
||||
"$ref": "#/definitions/v1alpha1KustomizeOptions"
|
||||
},
|
||||
@@ -6617,10 +6614,6 @@
|
||||
"type": "boolean",
|
||||
"title": "SkipCrds skips custom resource definition installation step (Helm's --skip-crds)"
|
||||
},
|
||||
"skipTests": {
|
||||
"description": "SkipTests skips test manifest installation step (Helm's --skip-tests).",
|
||||
"type": "boolean"
|
||||
},
|
||||
"valueFiles": {
|
||||
"type": "array",
|
||||
"title": "ValuesFiles is a list of Helm value files to use when generating a template",
|
||||
@@ -7144,20 +7137,12 @@
|
||||
"description": "Server requires Bearer authentication. This client will not attempt to use\nrefresh tokens for an OAuth2 flow.\nTODO: demonstrate an OAuth2 compatible client.",
|
||||
"type": "string"
|
||||
},
|
||||
"disableCompression": {
|
||||
"description": "DisableCompression bypasses automatic GZip compression requests to the server.",
|
||||
"type": "boolean"
|
||||
},
|
||||
"execProviderConfig": {
|
||||
"$ref": "#/definitions/v1alpha1ExecProviderConfig"
|
||||
},
|
||||
"password": {
|
||||
"type": "string"
|
||||
},
|
||||
"proxyUrl": {
|
||||
"type": "string",
|
||||
"title": "ProxyURL is the URL to the proxy to be used for all requests send to the server"
|
||||
},
|
||||
"tlsClientConfig": {
|
||||
"$ref": "#/definitions/v1alpha1TLSClientConfig"
|
||||
},
|
||||
@@ -7171,10 +7156,6 @@
|
||||
"description": "ClusterGenerator defines a generator to match against clusters registered with ArgoCD.",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"flatList": {
|
||||
"type": "boolean",
|
||||
"title": "returns the clusters a single 'clusters' value in the template"
|
||||
},
|
||||
"selector": {
|
||||
"$ref": "#/definitions/v1LabelSelector"
|
||||
},
|
||||
@@ -9186,11 +9167,6 @@
|
||||
"description": "SyncOperation contains details about a sync operation.",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"autoHealAttemptsCount": {
|
||||
"type": "integer",
|
||||
"format": "int64",
|
||||
"title": "SelfHealAttemptsCount contains the number of auto-heal attempts"
|
||||
},
|
||||
"dryRun": {
|
||||
"type": "boolean",
|
||||
"title": "DryRun specifies to perform a `kubectl apply --dry-run` without actually performing the sync"
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"github.com/redis/go-redis/v9"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
|
||||
@@ -25,7 +24,6 @@ import (
|
||||
appclientset "github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned"
|
||||
"github.com/argoproj/argo-cd/v2/pkg/ratelimiter"
|
||||
"github.com/argoproj/argo-cd/v2/reposerver/apiclient"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo/normalizers"
|
||||
cacheutil "github.com/argoproj/argo-cd/v2/util/cache"
|
||||
appstatecache "github.com/argoproj/argo-cd/v2/util/cache/appstate"
|
||||
@@ -58,9 +56,6 @@ func NewCommand() *cobra.Command {
|
||||
repoServerAddress string
|
||||
repoServerTimeoutSeconds int
|
||||
selfHealTimeoutSeconds int
|
||||
selfHealBackoffTimeoutSeconds int
|
||||
selfHealBackoffFactor int
|
||||
selfHealBackoffCapSeconds int
|
||||
statusProcessors int
|
||||
operationProcessors int
|
||||
glogLevel int
|
||||
@@ -83,9 +78,6 @@ func NewCommand() *cobra.Command {
|
||||
enableDynamicClusterDistribution bool
|
||||
serverSideDiff bool
|
||||
ignoreNormalizerOpts normalizers.IgnoreNormalizerOpts
|
||||
|
||||
// argocd k8s event logging flag
|
||||
enableK8sEvent []string
|
||||
)
|
||||
command := cobra.Command{
|
||||
Use: cliName,
|
||||
@@ -160,14 +152,6 @@ func NewCommand() *cobra.Command {
|
||||
kubectl := kubeutil.NewKubectl()
|
||||
clusterSharding, err := sharding.GetClusterSharding(kubeClient, settingsMgr, shardingAlgorithm, enableDynamicClusterDistribution)
|
||||
errors.CheckError(err)
|
||||
var selfHealBackoff *wait.Backoff
|
||||
if selfHealBackoffTimeoutSeconds != 0 {
|
||||
selfHealBackoff = &wait.Backoff{
|
||||
Duration: time.Duration(selfHealBackoffTimeoutSeconds) * time.Second,
|
||||
Factor: float64(selfHealBackoffFactor),
|
||||
Cap: time.Duration(selfHealBackoffCapSeconds) * time.Second,
|
||||
}
|
||||
}
|
||||
appController, err = controller.NewApplicationController(
|
||||
namespace,
|
||||
settingsMgr,
|
||||
@@ -180,7 +164,6 @@ func NewCommand() *cobra.Command {
|
||||
hardResyncDuration,
|
||||
time.Duration(appResyncJitter)*time.Second,
|
||||
time.Duration(selfHealTimeoutSeconds)*time.Second,
|
||||
selfHealBackoff,
|
||||
time.Duration(repoErrorGracePeriod)*time.Second,
|
||||
metricsPort,
|
||||
metricsCacheExpiration,
|
||||
@@ -194,7 +177,6 @@ func NewCommand() *cobra.Command {
|
||||
serverSideDiff,
|
||||
enableDynamicClusterDistribution,
|
||||
ignoreNormalizerOpts,
|
||||
enableK8sEvent,
|
||||
)
|
||||
errors.CheckError(err)
|
||||
cacheutil.CollectMetrics(redisClient, appController.GetMetricsServer())
|
||||
@@ -244,10 +226,7 @@ func NewCommand() *cobra.Command {
|
||||
command.Flags().IntVar(&glogLevel, "gloglevel", 0, "Set the glog logging level")
|
||||
command.Flags().IntVar(&metricsPort, "metrics-port", common.DefaultPortArgoCDMetrics, "Start metrics server on given port")
|
||||
command.Flags().DurationVar(&metricsCacheExpiration, "metrics-cache-expiration", env.ParseDurationFromEnv("ARGOCD_APPLICATION_CONTROLLER_METRICS_CACHE_EXPIRATION", 0*time.Second, 0, math.MaxInt64), "Prometheus metrics cache expiration (disabled by default. e.g. 24h0m0s)")
|
||||
command.Flags().IntVar(&selfHealTimeoutSeconds, "self-heal-timeout-seconds", env.ParseNumFromEnv("ARGOCD_APPLICATION_CONTROLLER_SELF_HEAL_TIMEOUT_SECONDS", 0, 0, math.MaxInt32), "Specifies timeout between application self heal attempts")
|
||||
command.Flags().IntVar(&selfHealBackoffTimeoutSeconds, "self-heal-backoff-timeout-seconds", env.ParseNumFromEnv("ARGOCD_APPLICATION_CONTROLLER_SELF_HEAL_BACKOFF_TIMEOUT_SECONDS", 2, 0, math.MaxInt32), "Specifies initial timeout of exponential backoff between self heal attempts")
|
||||
command.Flags().IntVar(&selfHealBackoffFactor, "self-heal-backoff-factor", env.ParseNumFromEnv("ARGOCD_APPLICATION_CONTROLLER_SELF_HEAL_BACKOFF_FACTOR", 3, 0, math.MaxInt32), "Specifies factor of exponential timeout between application self heal attempts")
|
||||
command.Flags().IntVar(&selfHealBackoffCapSeconds, "self-heal-backoff-cap-seconds", env.ParseNumFromEnv("ARGOCD_APPLICATION_CONTROLLER_SELF_HEAL_BACKOFF_CAP_SECONDS", 300, 0, math.MaxInt32), "Specifies max timeout of exponential backoff between application self heal attempts")
|
||||
command.Flags().IntVar(&selfHealTimeoutSeconds, "self-heal-timeout-seconds", env.ParseNumFromEnv("ARGOCD_APPLICATION_CONTROLLER_SELF_HEAL_TIMEOUT_SECONDS", 5, 0, math.MaxInt32), "Specifies timeout between application self heal attempts")
|
||||
command.Flags().Int64Var(&kubectlParallelismLimit, "kubectl-parallelism-limit", env.ParseInt64FromEnv("ARGOCD_APPLICATION_CONTROLLER_KUBECTL_PARALLELISM_LIMIT", 20, 0, math.MaxInt64), "Number of allowed concurrent kubectl fork/execs. Any value less than 1 means no limit.")
|
||||
command.Flags().BoolVar(&repoServerPlaintext, "repo-server-plaintext", env.ParseBoolFromEnv("ARGOCD_APPLICATION_CONTROLLER_REPO_SERVER_PLAINTEXT", false), "Disable TLS on connections to repo server")
|
||||
command.Flags().BoolVar(&repoServerStrictTLS, "repo-server-strict-tls", env.ParseBoolFromEnv("ARGOCD_APPLICATION_CONTROLLER_REPO_SERVER_STRICT_TLS", false), "Whether to use strict validation of the TLS cert presented by the repo server")
|
||||
@@ -272,9 +251,6 @@ func NewCommand() *cobra.Command {
|
||||
command.Flags().BoolVar(&enableDynamicClusterDistribution, "dynamic-cluster-distribution-enabled", env.ParseBoolFromEnv(common.EnvEnableDynamicClusterDistribution, false), "Enables dynamic cluster distribution.")
|
||||
command.Flags().BoolVar(&serverSideDiff, "server-side-diff-enabled", env.ParseBoolFromEnv(common.EnvServerSideDiff, false), "Feature flag to enable ServerSide diff. Default (\"false\")")
|
||||
command.Flags().DurationVar(&ignoreNormalizerOpts.JQExecutionTimeout, "ignore-normalizer-jq-execution-timeout-seconds", env.ParseDurationFromEnv("ARGOCD_IGNORE_NORMALIZER_JQ_TIMEOUT", 0*time.Second, 0, math.MaxInt64), "Set ignore normalizer JQ execution timeout")
|
||||
// argocd k8s event logging flag
|
||||
command.Flags().StringSliceVar(&enableK8sEvent, "enable-k8s-event", env.StringsFromEnv("ARGOCD_ENABLE_K8S_EVENT", argo.DefaultEnableEventList(), ","), "Enable ArgoCD to use k8s event. For disabling all events, set the value as `none`. (e.g --enable-k8s-event=none), For enabling specific events, set the value as `event reason`. (e.g --enable-k8s-event=StatusRefreshed,ResourceCreated)")
|
||||
|
||||
cacheSource = appstatecache.AddCacheFlagsToCmd(&command, cacheutil.Options{
|
||||
OnClientCreated: func(client *redis.Client) {
|
||||
redisClient = client
|
||||
|
||||
@@ -38,6 +38,7 @@ import (
|
||||
appsetmetrics "github.com/argoproj/argo-cd/v2/applicationset/metrics"
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/services"
|
||||
appv1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
appclientset "github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned"
|
||||
"github.com/argoproj/argo-cd/v2/util/cli"
|
||||
"github.com/argoproj/argo-cd/v2/util/db"
|
||||
"github.com/argoproj/argo-cd/v2/util/errors"
|
||||
@@ -72,7 +73,6 @@ func NewCommand() *cobra.Command {
|
||||
metricsAplicationsetLabels []string
|
||||
enableScmProviders bool
|
||||
webhookParallelism int
|
||||
tokenRefStrictMode bool
|
||||
)
|
||||
scheme := runtime.NewScheme()
|
||||
_ = clientgoscheme.AddToScheme(scheme)
|
||||
@@ -162,9 +162,10 @@ func NewCommand() *cobra.Command {
|
||||
errors.CheckError(err)
|
||||
|
||||
argoSettingsMgr := argosettings.NewSettingsManager(ctx, k8sClient, namespace)
|
||||
appSetConfig := appclientset.NewForConfigOrDie(mgr.GetConfig())
|
||||
argoCDDB := db.NewDB(namespace, argoSettingsMgr, k8sClient)
|
||||
|
||||
scmConfig := generators.NewSCMConfig(scmRootCAPath, allowedScmProviders, enableScmProviders, github_app.NewAuthCredentials(argoCDDB.(db.RepoCredsDB)), tokenRefStrictMode)
|
||||
scmConfig := generators.NewSCMConfig(scmRootCAPath, allowedScmProviders, enableScmProviders, github_app.NewAuthCredentials(argoCDDB.(db.RepoCredsDB)))
|
||||
|
||||
tlsConfig := apiclient.TLSConfiguration{
|
||||
DisableTLS: repoServerPlaintext,
|
||||
@@ -210,6 +211,7 @@ func NewCommand() *cobra.Command {
|
||||
Renderer: &utils.Render{},
|
||||
Policy: policyObj,
|
||||
EnablePolicyOverride: enablePolicyOverride,
|
||||
ArgoAppClientset: appSetConfig,
|
||||
KubeClientset: k8sClient,
|
||||
ArgoDB: argoCDDB,
|
||||
ArgoCDNamespace: namespace,
|
||||
@@ -250,7 +252,6 @@ func NewCommand() *cobra.Command {
|
||||
command.Flags().StringSliceVar(&allowedScmProviders, "allowed-scm-providers", env.StringsFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_ALLOWED_SCM_PROVIDERS", []string{}, ","), "The list of allowed custom SCM provider API URLs. This restriction does not apply to SCM or PR generators which do not accept a custom API URL. (Default: Empty = all)")
|
||||
command.Flags().BoolVar(&enableScmProviders, "enable-scm-providers", env.ParseBoolFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_SCM_PROVIDERS", true), "Enable retrieving information from SCM providers, used by the SCM and PR generators (Default: true)")
|
||||
command.Flags().BoolVar(&dryRun, "dry-run", env.ParseBoolFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_DRY_RUN", false), "Enable dry run mode")
|
||||
command.Flags().BoolVar(&tokenRefStrictMode, "token-ref-strict-mode", env.ParseBoolFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_TOKENREF_STRICT_MODE", false), fmt.Sprintf("Set to true to require secrets referenced by SCM providers to have the %s=%s label set (Default: false)", common.LabelKeySecretType, common.LabelValueSecretTypeSCMCreds))
|
||||
command.Flags().BoolVar(&enableProgressiveSyncs, "enable-progressive-syncs", env.ParseBoolFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_PROGRESSIVE_SYNCS", false), "Enable use of the experimental progressive syncs feature.")
|
||||
command.Flags().BoolVar(&enableNewGitFileGlobbing, "enable-new-git-file-globbing", env.ParseBoolFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_NEW_GIT_FILE_GLOBBING", false), "Enable new globbing in Git files generator.")
|
||||
command.Flags().BoolVar(&repoServerPlaintext, "repo-server-plaintext", env.ParseBoolFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_REPO_SERVER_PLAINTEXT", false), "Disable TLS on connections to repo server")
|
||||
|
||||
@@ -75,7 +75,6 @@ func NewCommand() *cobra.Command {
|
||||
helmRegistryMaxIndexSize string
|
||||
disableManifestMaxExtractedSize bool
|
||||
includeHiddenDirectories bool
|
||||
cmpUseManifestGeneratePaths bool
|
||||
)
|
||||
command := cobra.Command{
|
||||
Use: cliName,
|
||||
@@ -137,7 +136,6 @@ func NewCommand() *cobra.Command {
|
||||
HelmManifestMaxExtractedSize: helmManifestMaxExtractedSizeQuantity.ToDec().Value(),
|
||||
HelmRegistryMaxIndexSize: helmRegistryMaxIndexSizeQuantity.ToDec().Value(),
|
||||
IncludeHiddenDirectories: includeHiddenDirectories,
|
||||
CMPUseManifestGeneratePaths: cmpUseManifestGeneratePaths,
|
||||
}, askPassServer)
|
||||
errors.CheckError(err)
|
||||
|
||||
@@ -243,7 +241,6 @@ func NewCommand() *cobra.Command {
|
||||
command.Flags().StringVar(&helmRegistryMaxIndexSize, "helm-registry-max-index-size", env.StringFromEnv("ARGOCD_REPO_SERVER_HELM_MANIFEST_MAX_INDEX_SIZE", "1G"), "Maximum size of registry index file")
|
||||
command.Flags().BoolVar(&disableManifestMaxExtractedSize, "disable-helm-manifest-max-extracted-size", env.ParseBoolFromEnv("ARGOCD_REPO_SERVER_DISABLE_HELM_MANIFEST_MAX_EXTRACTED_SIZE", false), "Disable maximum size of helm manifest archives when extracted")
|
||||
command.Flags().BoolVar(&includeHiddenDirectories, "include-hidden-directories", env.ParseBoolFromEnv("ARGOCD_REPO_SERVER_INCLUDE_HIDDEN_DIRECTORIES", false), "Include hidden directories from Git")
|
||||
command.Flags().BoolVar(&cmpUseManifestGeneratePaths, "plugin-use-manifest-generate-paths", env.ParseBoolFromEnv("ARGOCD_REPO_SERVER_PLUGIN_USE_MANIFEST_GENERATE_PATHS", false), "Pass the resources described in argocd.argoproj.io/manifest-generate-paths value to the cmpserver to generate the application manifests.")
|
||||
tlsConfigCustomizerSrc = tls.AddTLSFlagsToCmd(&command)
|
||||
cacheSrc = reposervercache.AddCacheFlagsToCmd(&command, cacheutil.Options{
|
||||
OnClientCreated: func(client *redis.Client) {
|
||||
|
||||
@@ -27,7 +27,6 @@ import (
|
||||
reposervercache "github.com/argoproj/argo-cd/v2/reposerver/cache"
|
||||
"github.com/argoproj/argo-cd/v2/server"
|
||||
servercache "github.com/argoproj/argo-cd/v2/server/cache"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo"
|
||||
cacheutil "github.com/argoproj/argo-cd/v2/util/cache"
|
||||
"github.com/argoproj/argo-cd/v2/util/cli"
|
||||
"github.com/argoproj/argo-cd/v2/util/dex"
|
||||
@@ -92,9 +91,6 @@ func NewCommand() *cobra.Command {
|
||||
scmRootCAPath string
|
||||
allowedScmProviders []string
|
||||
enableScmProviders bool
|
||||
|
||||
// argocd k8s event logging flag
|
||||
enableK8sEvent []string
|
||||
)
|
||||
command := &cobra.Command{
|
||||
Use: cliName,
|
||||
@@ -233,7 +229,6 @@ func NewCommand() *cobra.Command {
|
||||
ApplicationNamespaces: applicationNamespaces,
|
||||
EnableProxyExtension: enableProxyExtension,
|
||||
WebhookParallelism: webhookParallelism,
|
||||
EnableK8sEvent: enableK8sEvent,
|
||||
}
|
||||
|
||||
appsetOpts := server.ApplicationSetOpts{
|
||||
@@ -308,7 +303,6 @@ func NewCommand() *cobra.Command {
|
||||
command.Flags().StringSliceVar(&applicationNamespaces, "application-namespaces", env.StringsFromEnv("ARGOCD_APPLICATION_NAMESPACES", []string{}, ","), "List of additional namespaces where application resources can be managed in")
|
||||
command.Flags().BoolVar(&enableProxyExtension, "enable-proxy-extension", env.ParseBoolFromEnv("ARGOCD_SERVER_ENABLE_PROXY_EXTENSION", false), "Enable Proxy Extension feature")
|
||||
command.Flags().IntVar(&webhookParallelism, "webhook-parallelism-limit", env.ParseNumFromEnv("ARGOCD_SERVER_WEBHOOK_PARALLELISM_LIMIT", 50, 1, 1000), "Number of webhook requests processed concurrently")
|
||||
command.Flags().StringSliceVar(&enableK8sEvent, "enable-k8s-event", env.StringsFromEnv("ARGOCD_ENABLE_K8S_EVENT", argo.DefaultEnableEventList(), ","), "Enable ArgoCD to use k8s event. For disabling all events, set the value as `none`. (e.g --enable-k8s-event=none), For enabling specific events, set the value as `event reason`. (e.g --enable-k8s-event=StatusRefreshed,ResourceCreated)")
|
||||
|
||||
// Flags related to the applicationSet component.
|
||||
command.Flags().StringVar(&scmRootCAPath, "appset-scm-root-ca-path", env.StringFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_SCM_ROOT_CA_PATH", ""), "Provide Root CA Path for self-signed TLS Certificates")
|
||||
|
||||
@@ -565,9 +565,7 @@ argocd admin cluster kubeconfig https://cluster-api-url:6443 /path/to/output/kub
|
||||
|
||||
cluster, err := db.NewDB(namespace, settings.NewSettingsManager(ctx, kubeclientset, namespace), kubeclientset).GetCluster(ctx, serverUrl)
|
||||
errors.CheckError(err)
|
||||
rawConfig, err := cluster.RawRestConfig()
|
||||
errors.CheckError(err)
|
||||
err = kube.WriteKubeConfig(rawConfig, namespace, output)
|
||||
err = kube.WriteKubeConfig(cluster.RawRestConfig(), namespace, output)
|
||||
errors.CheckError(err)
|
||||
},
|
||||
}
|
||||
|
||||
@@ -45,14 +45,9 @@ func NewRedisInitialPasswordCommand() *cobra.Command {
|
||||
namespace, _, err := clientConfig.Namespace()
|
||||
errors.CheckError(err)
|
||||
|
||||
// redisInitialCredentials is the kubernetes secret containing
|
||||
// the redis password
|
||||
redisInitialCredentials := common.RedisInitialCredentials
|
||||
|
||||
// redisInitialCredentialsKey is the key in the redisInitialCredentials
|
||||
// secret which maps to the redis password
|
||||
redisInitialCredentialsKey := common.RedisInitialCredentialsKey
|
||||
fmt.Printf("Checking for initial Redis password in secret %s/%s at key %s. \n", namespace, redisInitialCredentials, redisInitialCredentialsKey)
|
||||
redisInitialPasswordSecretName := common.DefaultRedisInitialPasswordSecretName
|
||||
redisInitialPasswordKey := common.DefaultRedisInitialPasswordKey
|
||||
fmt.Printf("Checking for initial Redis password in secret %s/%s at key %s. \n", namespace, redisInitialPasswordSecretName, redisInitialPasswordKey)
|
||||
|
||||
config, err := clientConfig.ClientConfig()
|
||||
errors.CheckError(err)
|
||||
@@ -64,11 +59,11 @@ func NewRedisInitialPasswordCommand() *cobra.Command {
|
||||
errors.CheckError(err)
|
||||
|
||||
data := map[string][]byte{
|
||||
redisInitialCredentialsKey: []byte(randomPassword),
|
||||
redisInitialPasswordKey: []byte(randomPassword),
|
||||
}
|
||||
secret := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: redisInitialCredentials,
|
||||
Name: redisInitialPasswordSecretName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Data: data,
|
||||
@@ -79,14 +74,14 @@ func NewRedisInitialPasswordCommand() *cobra.Command {
|
||||
errors.CheckError(err)
|
||||
}
|
||||
|
||||
fmt.Printf("Argo CD Redis secret state confirmed: secret name %s.\n", redisInitialCredentials)
|
||||
secret, err = kubeClientset.CoreV1().Secrets(namespace).Get(context.Background(), redisInitialCredentials, v1.GetOptions{})
|
||||
fmt.Println("Argo CD Redis secret state confirmed: secret name argocd-redis.")
|
||||
secret, err = kubeClientset.CoreV1().Secrets(namespace).Get(context.Background(), redisInitialPasswordSecretName, v1.GetOptions{})
|
||||
errors.CheckError(err)
|
||||
|
||||
if _, ok := secret.Data[redisInitialCredentialsKey]; ok {
|
||||
if _, ok := secret.Data[redisInitialPasswordKey]; ok {
|
||||
fmt.Println("Password secret is configured properly.")
|
||||
} else {
|
||||
err := fmt.Errorf("key %s doesn't exist in secret %s. \n", redisInitialCredentialsKey, redisInitialCredentials)
|
||||
err := fmt.Errorf("key %s doesn't exist in secret %s. \n", redisInitialPasswordKey, redisInitialPasswordSecretName)
|
||||
errors.CheckError(err)
|
||||
}
|
||||
},
|
||||
|
||||
@@ -200,7 +200,8 @@ admissionregistration.k8s.io/MutatingWebhookConfiguration:
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, summary, tc.containsSummary)
|
||||
} else if tc.containsError != "" {
|
||||
assert.ErrorContains(t, err, tc.containsError)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), tc.containsError)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1081,18 +1081,17 @@ func getLocalObjectsString(ctx context.Context, app *argoappv1.Application, proj
|
||||
) []string {
|
||||
source := app.Spec.GetSource()
|
||||
res, err := repository.GenerateManifests(ctx, local, localRepoRoot, source.TargetRevision, &repoapiclient.ManifestRequest{
|
||||
Repo: &argoappv1.Repository{Repo: source.RepoURL},
|
||||
AppLabelKey: appLabelKey,
|
||||
AppName: app.Name,
|
||||
Namespace: app.Spec.Destination.Namespace,
|
||||
ApplicationSource: &source,
|
||||
KustomizeOptions: kustomizeOptions,
|
||||
KubeVersion: kubeVersion,
|
||||
ApiVersions: apiVersions,
|
||||
TrackingMethod: trackingMethod,
|
||||
ProjectName: proj.Name,
|
||||
ProjectSourceRepos: proj.Spec.SourceRepos,
|
||||
AnnotationManifestGeneratePaths: app.GetAnnotation(argoappv1.AnnotationKeyManifestGeneratePaths),
|
||||
Repo: &argoappv1.Repository{Repo: source.RepoURL},
|
||||
AppLabelKey: appLabelKey,
|
||||
AppName: app.Name,
|
||||
Namespace: app.Spec.Destination.Namespace,
|
||||
ApplicationSource: &source,
|
||||
KustomizeOptions: kustomizeOptions,
|
||||
KubeVersion: kubeVersion,
|
||||
ApiVersions: apiVersions,
|
||||
TrackingMethod: trackingMethod,
|
||||
ProjectName: proj.Name,
|
||||
ProjectSourceRepos: proj.Spec.SourceRepos,
|
||||
}, true, &git.NoopCredsStore{}, resource.MustParse("0"), nil)
|
||||
errors.CheckError(err)
|
||||
|
||||
@@ -1141,7 +1140,6 @@ func NewApplicationDiffCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
refresh bool
|
||||
hardRefresh bool
|
||||
exitCode bool
|
||||
diffExitCode int
|
||||
local string
|
||||
revision string
|
||||
localRepoRoot string
|
||||
@@ -1244,14 +1242,13 @@ func NewApplicationDiffCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
proj := getProject(c, clientOpts, ctx, app.Spec.Project)
|
||||
foundDiffs := findandPrintDiff(ctx, app, proj.Project, resources, argoSettings, diffOption, ignoreNormalizerOpts)
|
||||
if foundDiffs && exitCode {
|
||||
os.Exit(diffExitCode)
|
||||
os.Exit(1)
|
||||
}
|
||||
},
|
||||
}
|
||||
command.Flags().BoolVar(&refresh, "refresh", false, "Refresh application data when retrieving")
|
||||
command.Flags().BoolVar(&hardRefresh, "hard-refresh", false, "Refresh application data as well as target manifests cache")
|
||||
command.Flags().BoolVar(&exitCode, "exit-code", true, "Return non-zero exit code when there is a diff. May also return non-zero exit code if there is an error.")
|
||||
command.Flags().IntVar(&diffExitCode, "diff-exit-code", 1, "Return specified exit code when there is a diff. Typical error code is 20.")
|
||||
command.Flags().BoolVar(&exitCode, "exit-code", true, "Return non-zero exit code when there is a diff")
|
||||
command.Flags().StringVar(&local, "local", "", "Compare live app to a local manifests")
|
||||
command.Flags().StringVar(&revision, "revision", "", "Compare live app to a particular revision")
|
||||
command.Flags().StringVar(&localRepoRoot, "local-repo-root", "/", "Path to the repository root. Used together with --local allows setting the repository root")
|
||||
@@ -1378,7 +1375,7 @@ func groupObjsForDiff(resources *application.ManagedResourcesResponse, objs map[
|
||||
}
|
||||
if local, ok := objs[key]; ok || live != nil {
|
||||
if local != nil && !kube.IsCRD(local) {
|
||||
err = resourceTracking.SetAppInstance(local, argoSettings.AppLabelKey, appName, namespace, argoappv1.TrackingMethod(argoSettings.GetTrackingMethod()), argoSettings.GetInstallationID())
|
||||
err = resourceTracking.SetAppInstance(local, argoSettings.AppLabelKey, appName, namespace, argoappv1.TrackingMethod(argoSettings.GetTrackingMethod()))
|
||||
errors.CheckError(err)
|
||||
}
|
||||
|
||||
|
||||
@@ -2,7 +2,6 @@ package commands
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
@@ -107,11 +106,6 @@ func NewClusterAddCommand(clientOpts *argocdclient.ClientOptions, pathOpts *clie
|
||||
contextName := args[0]
|
||||
conf, err := getRestConfig(pathOpts, contextName)
|
||||
errors.CheckError(err)
|
||||
if clusterOpts.ProxyUrl != "" {
|
||||
u, err := argoappv1.ParseProxyUrl(clusterOpts.ProxyUrl)
|
||||
errors.CheckError(err)
|
||||
conf.Proxy = http.ProxyURL(u)
|
||||
}
|
||||
clientset, err := kubernetes.NewForConfig(conf)
|
||||
errors.CheckError(err)
|
||||
managerBearerToken := ""
|
||||
@@ -197,7 +191,6 @@ func NewClusterAddCommand(clientOpts *argocdclient.ClientOptions, pathOpts *clie
|
||||
command.Flags().BoolVarP(&skipConfirmation, "yes", "y", false, "Skip explicit confirmation")
|
||||
command.Flags().StringArrayVar(&labels, "label", nil, "Set metadata labels (e.g. --label key=value)")
|
||||
command.Flags().StringArrayVar(&annotations, "annotation", nil, "Set metadata annotations (e.g. --annotation key=value)")
|
||||
command.Flags().StringVar(&clusterOpts.ProxyUrl, "proxy-url", "", "use proxy to connect cluster")
|
||||
cmdutil.AddClusterFlags(command, &clusterOpts)
|
||||
return command
|
||||
}
|
||||
@@ -380,8 +373,6 @@ func printClusterDetails(clusters []argoappv1.Cluster) {
|
||||
fmt.Printf(" Basic authentication: %v\n", cluster.Config.Username != "")
|
||||
fmt.Printf(" oAuth authentication: %v\n", cluster.Config.BearerToken != "")
|
||||
fmt.Printf(" AWS authentication: %v\n", cluster.Config.AWSAuthConfig != nil)
|
||||
fmt.Printf("\nDisable compression: %v\n", cluster.Config.DisableCompression)
|
||||
fmt.Printf("\nUse proxy: %v\n", cluster.Config.ProxyUrl != "")
|
||||
fmt.Println()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -32,12 +32,11 @@ func Test_printClusterTable(t *testing.T) {
|
||||
Server: "my-server",
|
||||
Name: "my-name",
|
||||
Config: v1alpha1.ClusterConfig{
|
||||
Username: "my-username",
|
||||
Password: "my-password",
|
||||
BearerToken: "my-bearer-token",
|
||||
TLSClientConfig: v1alpha1.TLSClientConfig{},
|
||||
AWSAuthConfig: nil,
|
||||
DisableCompression: false,
|
||||
Username: "my-username",
|
||||
Password: "my-password",
|
||||
BearerToken: "my-bearer-token",
|
||||
TLSClientConfig: v1alpha1.TLSClientConfig{},
|
||||
AWSAuthConfig: nil,
|
||||
},
|
||||
ConnectionState: v1alpha1.ConnectionState{
|
||||
Status: "my-status",
|
||||
|
||||
@@ -70,7 +70,7 @@ func NewCommand() *cobra.Command {
|
||||
command.PersistentFlags().StringVar(&clientOpts.CertFile, "server-crt", config.GetFlag("server-crt", ""), "Server certificate file")
|
||||
command.PersistentFlags().StringVar(&clientOpts.ClientCertFile, "client-crt", config.GetFlag("client-crt", ""), "Client certificate file")
|
||||
command.PersistentFlags().StringVar(&clientOpts.ClientCertKeyFile, "client-crt-key", config.GetFlag("client-crt-key", ""), "Client certificate key file")
|
||||
command.PersistentFlags().StringVar(&clientOpts.AuthToken, "auth-token", config.GetFlag("auth-token", env.StringFromEnv(common.EnvAuthToken, "")), fmt.Sprintf("Authentication token; set this or the %s environment variable", common.EnvAuthToken))
|
||||
command.PersistentFlags().StringVar(&clientOpts.AuthToken, "auth-token", config.GetFlag("auth-token", ""), "Authentication token")
|
||||
command.PersistentFlags().BoolVar(&clientOpts.GRPCWeb, "grpc-web", config.GetBoolFlag("grpc-web"), "Enables gRPC-web protocol. Useful if Argo CD server is behind proxy which does not support HTTP2.")
|
||||
command.PersistentFlags().StringVar(&clientOpts.GRPCWebRootPath, "grpc-web-root-path", config.GetFlag("grpc-web-root-path", ""), "Enables gRPC-web protocol. Useful if Argo CD server is behind proxy which does not support HTTP2. Set web root.")
|
||||
command.PersistentFlags().StringVar(&cmdutil.LogFormat, "logformat", config.GetFlag("logformat", "text"), "Set the logging format. One of: text|json")
|
||||
|
||||
12
cmd/main.go
12
cmd/main.go
@@ -4,10 +4,10 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/cmd/util"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
_ "go.uber.org/automaxprocs"
|
||||
|
||||
appcontroller "github.com/argoproj/argo-cd/v2/cmd/argocd-application-controller/commands"
|
||||
applicationset "github.com/argoproj/argo-cd/v2/cmd/argocd-applicationset-controller/commands"
|
||||
cmpserver "github.com/argoproj/argo-cd/v2/cmd/argocd-cmp-server/commands"
|
||||
@@ -31,12 +31,9 @@ func main() {
|
||||
if val := os.Getenv(binaryNameEnv); val != "" {
|
||||
binaryName = val
|
||||
}
|
||||
|
||||
isCLI := false
|
||||
switch binaryName {
|
||||
case "argocd", "argocd-linux-amd64", "argocd-darwin-amd64", "argocd-windows-amd64.exe":
|
||||
command = cli.NewCommand()
|
||||
isCLI = true
|
||||
case "argocd-server":
|
||||
command = apiserver.NewCommand()
|
||||
case "argocd-application-controller":
|
||||
@@ -45,24 +42,19 @@ func main() {
|
||||
command = reposerver.NewCommand()
|
||||
case "argocd-cmp-server":
|
||||
command = cmpserver.NewCommand()
|
||||
isCLI = true
|
||||
case "argocd-dex":
|
||||
command = dex.NewCommand()
|
||||
case "argocd-notifications":
|
||||
command = notification.NewCommand()
|
||||
case "argocd-git-ask-pass":
|
||||
command = gitaskpass.NewCommand()
|
||||
isCLI = true
|
||||
case "argocd-applicationset-controller":
|
||||
command = applicationset.NewCommand()
|
||||
case "argocd-k8s-auth":
|
||||
command = k8sauth.NewCommand()
|
||||
isCLI = true
|
||||
default:
|
||||
command = cli.NewCommand()
|
||||
isCLI = true
|
||||
}
|
||||
util.SetAutoMaxProcs(isCLI)
|
||||
|
||||
if err := command.Execute(); err != nil {
|
||||
os.Exit(1)
|
||||
|
||||
@@ -9,8 +9,6 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"go.uber.org/automaxprocs/maxprocs"
|
||||
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
@@ -50,7 +48,6 @@ type AppOptions struct {
|
||||
helmVersion string
|
||||
helmPassCredentials bool
|
||||
helmSkipCrds bool
|
||||
helmSkipTests bool
|
||||
helmNamespace string
|
||||
helmKubeVersion string
|
||||
helmApiVersions []string
|
||||
@@ -91,19 +88,6 @@ type AppOptions struct {
|
||||
ref string
|
||||
}
|
||||
|
||||
// SetAutoMaxProcs sets the GOMAXPROCS value based on the binary name.
|
||||
// It suppresses logs for CLI binaries and logs the setting for services.
|
||||
func SetAutoMaxProcs(isCLI bool) {
|
||||
if isCLI {
|
||||
_, _ = maxprocs.Set() // Intentionally ignore errors for CLI binaries
|
||||
} else {
|
||||
_, err := maxprocs.Set(maxprocs.Logger(log.Infof))
|
||||
if err != nil {
|
||||
log.Errorf("Error setting GOMAXPROCS: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func AddAppFlags(command *cobra.Command, opts *AppOptions) {
|
||||
command.Flags().StringVar(&opts.repoURL, "repo", "", "Repository URL, ignored if a file is set")
|
||||
command.Flags().StringVar(&opts.appPath, "path", "", "Path in repository to the app directory, ignored if a file is set")
|
||||
@@ -125,7 +109,6 @@ func AddAppFlags(command *cobra.Command, opts *AppOptions) {
|
||||
command.Flags().StringArrayVar(&opts.helmSetStrings, "helm-set-string", []string{}, "Helm set STRING values on the command line (can be repeated to set several values: --helm-set-string key1=val1 --helm-set-string key2=val2)")
|
||||
command.Flags().StringArrayVar(&opts.helmSetFiles, "helm-set-file", []string{}, "Helm set values from respective files specified via the command line (can be repeated to set several values: --helm-set-file key1=path1 --helm-set-file key2=path2)")
|
||||
command.Flags().BoolVar(&opts.helmSkipCrds, "helm-skip-crds", false, "Skip helm crd installation step")
|
||||
command.Flags().BoolVar(&opts.helmSkipTests, "helm-skip-tests", false, "Skip helm test manifests installation step")
|
||||
command.Flags().StringVar(&opts.helmNamespace, "helm-namespace", "", "Helm namespace to use when running helm template. If not set, use app.spec.destination.namespace")
|
||||
command.Flags().StringVar(&opts.helmKubeVersion, "helm-kube-version", "", "Helm kube-version to use when running helm template. If not set, use the kube version from the destination cluster")
|
||||
command.Flags().StringArrayVar(&opts.helmApiVersions, "helm-api-versions", []string{}, "Helm api-versions (in format [group/]version/kind) to use when running helm template (Can be repeated to set several values: --helm-api-versions traefik.io/v1alpha1/TLSOption --helm-api-versions v1/Service). If not set, use the api-versions from the destination cluster")
|
||||
@@ -375,7 +358,6 @@ type helmOpts struct {
|
||||
helmSetFiles []string
|
||||
passCredentials bool
|
||||
skipCrds bool
|
||||
skipTests bool
|
||||
namespace string
|
||||
kubeVersion string
|
||||
apiVersions []string
|
||||
@@ -409,9 +391,6 @@ func setHelmOpt(src *argoappv1.ApplicationSource, opts helmOpts) {
|
||||
if opts.skipCrds {
|
||||
src.Helm.SkipCrds = opts.skipCrds
|
||||
}
|
||||
if opts.skipTests {
|
||||
src.Helm.SkipTests = opts.skipTests
|
||||
}
|
||||
if opts.namespace != "" {
|
||||
src.Helm.Namespace = opts.namespace
|
||||
}
|
||||
@@ -679,8 +658,6 @@ func ConstructSource(source *argoappv1.ApplicationSource, appOpts AppOptions, fl
|
||||
setHelmOpt(source, helmOpts{helmSetFiles: appOpts.helmSetFiles})
|
||||
case "helm-skip-crds":
|
||||
setHelmOpt(source, helmOpts{skipCrds: appOpts.helmSkipCrds})
|
||||
case "helm-skip-tests":
|
||||
setHelmOpt(source, helmOpts{skipTests: appOpts.helmSkipTests})
|
||||
case "helm-namespace":
|
||||
setHelmOpt(source, helmOpts{namespace: appOpts.helmNamespace})
|
||||
case "helm-kube-version":
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
package util
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"log"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -66,11 +65,6 @@ func Test_setHelmOpt(t *testing.T) {
|
||||
setHelmOpt(&src, helmOpts{skipCrds: true})
|
||||
assert.True(t, src.Helm.SkipCrds)
|
||||
})
|
||||
t.Run("HelmSkipTests", func(t *testing.T) {
|
||||
src := v1alpha1.ApplicationSource{}
|
||||
setHelmOpt(&src, helmOpts{skipTests: true})
|
||||
assert.True(t, src.Helm.SkipTests)
|
||||
})
|
||||
t.Run("HelmNamespace", func(t *testing.T) {
|
||||
src := v1alpha1.ApplicationSource{}
|
||||
setHelmOpt(&src, helmOpts{namespace: "custom-namespace"})
|
||||
@@ -535,27 +529,3 @@ func TestFilterResources(t *testing.T) {
|
||||
assert.Nil(t, filteredResources)
|
||||
})
|
||||
}
|
||||
|
||||
func TestSetAutoMaxProcs(t *testing.T) {
|
||||
t.Run("CLI mode ignores errors", func(t *testing.T) {
|
||||
logBuffer := &bytes.Buffer{}
|
||||
oldLogger := log.Default()
|
||||
log.SetOutput(logBuffer)
|
||||
defer log.SetOutput(oldLogger.Writer())
|
||||
|
||||
SetAutoMaxProcs(true)
|
||||
|
||||
assert.Empty(t, logBuffer.String(), "Expected no log output when isCLI is true")
|
||||
})
|
||||
|
||||
t.Run("Non-CLI mode logs error on failure", func(t *testing.T) {
|
||||
logBuffer := &bytes.Buffer{}
|
||||
oldLogger := log.Default()
|
||||
log.SetOutput(logBuffer)
|
||||
defer log.SetOutput(oldLogger.Writer())
|
||||
|
||||
SetAutoMaxProcs(false)
|
||||
|
||||
assert.NotContains(t, logBuffer.String(), "Error setting GOMAXPROCS", "Unexpected log output detected")
|
||||
})
|
||||
}
|
||||
|
||||
@@ -100,18 +100,11 @@ func NewCluster(name string, namespaces []string, clusterResources bool, conf *r
|
||||
TLSClientConfig: tlsClientConfig,
|
||||
AWSAuthConfig: awsAuthConf,
|
||||
ExecProviderConfig: execProviderConf,
|
||||
DisableCompression: conf.DisableCompression,
|
||||
},
|
||||
Labels: labels,
|
||||
Annotations: annotations,
|
||||
}
|
||||
// it's a tradeoff to get proxy url from rest config
|
||||
// more detail: https://github.com/kubernetes/kubernetes/pull/81443
|
||||
if conf.Proxy != nil {
|
||||
if url, err := conf.Proxy(nil); err == nil {
|
||||
clst.Config.ProxyUrl = url.String()
|
||||
}
|
||||
}
|
||||
|
||||
// Bearer token will preferentially be used for auth if present,
|
||||
// Even in presence of key/cert credentials
|
||||
// So set bearer token only if the key/cert data is absent
|
||||
@@ -165,8 +158,6 @@ type ClusterOptions struct {
|
||||
ExecProviderAPIVersion string
|
||||
ExecProviderInstallHint string
|
||||
ClusterEndpoint string
|
||||
DisableCompression bool
|
||||
ProxyUrl string
|
||||
}
|
||||
|
||||
// InClusterEndpoint returns true if ArgoCD should reference the in-cluster
|
||||
@@ -191,5 +182,4 @@ func AddClusterFlags(command *cobra.Command, opts *ClusterOptions) {
|
||||
command.Flags().StringVar(&opts.ExecProviderAPIVersion, "exec-command-api-version", "", "Preferred input version of the ExecInfo for the --exec-command executable")
|
||||
command.Flags().StringVar(&opts.ExecProviderInstallHint, "exec-command-install-hint", "", "Text shown to the user when the --exec-command executable doesn't seem to be present")
|
||||
command.Flags().StringVar(&opts.ClusterEndpoint, "cluster-endpoint", "", "Cluster endpoint to use. Can be one of the following: 'kubeconfig', 'kube-public', or 'internal'.")
|
||||
command.Flags().BoolVar(&opts.DisableCompression, "disable-compression", false, "Bypasses automatic GZip compression requests to the server")
|
||||
}
|
||||
|
||||
@@ -37,7 +37,6 @@ func Test_newCluster(t *testing.T) {
|
||||
assert.Equal(t, "", clusterWithData.Config.BearerToken)
|
||||
assert.Equal(t, labels, clusterWithData.Labels)
|
||||
assert.Equal(t, annotations, clusterWithData.Annotations)
|
||||
assert.False(t, clusterWithData.Config.DisableCompression)
|
||||
|
||||
clusterWithFiles := NewCluster("test-cluster", []string{"test-namespace"}, false, &rest.Config{
|
||||
TLSClientConfig: rest.TLSClientConfig{
|
||||
@@ -74,20 +73,6 @@ func Test_newCluster(t *testing.T) {
|
||||
assert.Equal(t, "test-bearer-token", clusterWithBearerToken.Config.BearerToken)
|
||||
assert.Nil(t, clusterWithBearerToken.Labels)
|
||||
assert.Nil(t, clusterWithBearerToken.Annotations)
|
||||
|
||||
clusterWithDisableCompression := NewCluster("test-cluster", []string{"test-namespace"}, false, &rest.Config{
|
||||
TLSClientConfig: rest.TLSClientConfig{
|
||||
Insecure: false,
|
||||
ServerName: "test-endpoint.example.com",
|
||||
CAData: []byte("test-ca-data"),
|
||||
},
|
||||
DisableCompression: true,
|
||||
Host: "test-endpoint.example.com",
|
||||
}, "test-bearer-token",
|
||||
&v1alpha1.AWSAuthConfig{},
|
||||
&v1alpha1.ExecProviderConfig{}, labels, annotations)
|
||||
|
||||
assert.True(t, clusterWithDisableCompression.Config.DisableCompression)
|
||||
}
|
||||
|
||||
func TestGetKubePublicEndpoint(t *testing.T) {
|
||||
|
||||
@@ -451,7 +451,8 @@ func Test_getParametersAnnouncement_invalid_json(t *testing.T) {
|
||||
Args: []string{`[`},
|
||||
}
|
||||
_, err := getParametersAnnouncement(context.Background(), "", []*repoclient.ParameterAnnouncement{}, command, []*apiclient.EnvEntry{})
|
||||
assert.ErrorContains(t, err, "unexpected end of JSON input")
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "unexpected end of JSON input")
|
||||
}
|
||||
|
||||
func Test_getParametersAnnouncement_bad_command(t *testing.T) {
|
||||
@@ -460,7 +461,8 @@ func Test_getParametersAnnouncement_bad_command(t *testing.T) {
|
||||
Args: []string{"1"},
|
||||
}
|
||||
_, err := getParametersAnnouncement(context.Background(), "", []*repoclient.ParameterAnnouncement{}, command, []*apiclient.EnvEntry{})
|
||||
assert.ErrorContains(t, err, "error executing dynamic parameter output command")
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "error executing dynamic parameter output command")
|
||||
}
|
||||
|
||||
func Test_getTempDirMustCleanup(t *testing.T) {
|
||||
|
||||
@@ -175,12 +175,9 @@ const (
|
||||
LabelValueSecretTypeRepository = "repository"
|
||||
// LabelValueSecretTypeRepoCreds indicates a secret type of repository credentials
|
||||
LabelValueSecretTypeRepoCreds = "repo-creds"
|
||||
// LabelValueSecretTypeSCMCreds indicates a secret type of SCM credentials
|
||||
LabelValueSecretTypeSCMCreds = "scm-creds"
|
||||
|
||||
// AnnotationKeyAppInstance is the Argo CD application name is used as the instance name
|
||||
AnnotationKeyAppInstance = "argocd.argoproj.io/tracking-id"
|
||||
AnnotationInstallationID = "argocd.argoproj.io/installation-id"
|
||||
|
||||
// AnnotationCompareOptions is a comma-separated list of options for comparison
|
||||
AnnotationCompareOptions = "argocd.argoproj.io/compare-options"
|
||||
@@ -257,8 +254,6 @@ const (
|
||||
EnvHelmIndexCacheDuration = "ARGOCD_HELM_INDEX_CACHE_DURATION"
|
||||
// EnvAppConfigPath allows to override the configuration path for repo server
|
||||
EnvAppConfigPath = "ARGOCD_APP_CONF_PATH"
|
||||
// EnvAuthToken is the environment variable name for the auth token used by the CLI
|
||||
EnvAuthToken = "ARGOCD_AUTH_TOKEN"
|
||||
// EnvLogFormat log format that is defined by `--logformat` option
|
||||
EnvLogFormat = "ARGOCD_LOG_FORMAT"
|
||||
// EnvLogLevel log level that is defined by `--loglevel` option
|
||||
@@ -318,10 +313,7 @@ const (
|
||||
// Constants used by util/clusterauth package
|
||||
const (
|
||||
ClusterAuthRequestTimeout = 10 * time.Second
|
||||
)
|
||||
|
||||
const (
|
||||
BearerTokenTimeout = 30 * time.Second
|
||||
BearerTokenTimeout = 30 * time.Second
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -431,10 +423,8 @@ var PermissionDeniedAPIError = status.Error(codes.PermissionDenied, "permission
|
||||
|
||||
// Redis password consts
|
||||
const (
|
||||
// RedisInitialCredentials is the name for the argocd kubernetes secret which will have the redis password
|
||||
RedisInitialCredentials = "argocd-redis"
|
||||
// RedisInitialCredentialsKey is the key for the argocd kubernetes secret that maps to the redis password
|
||||
RedisInitialCredentialsKey = "auth"
|
||||
DefaultRedisInitialPasswordSecretName = "argocd-redis"
|
||||
DefaultRedisInitialPasswordKey = "auth"
|
||||
)
|
||||
|
||||
/*
|
||||
@@ -443,17 +433,17 @@ SetOptionalRedisPasswordFromKubeConfig sets the optional Redis password if it ex
|
||||
We specify kubeClient as kubernetes.Interface to allow for mocking in tests, but this should be treated as a kubernetes.Clientset param.
|
||||
*/
|
||||
func SetOptionalRedisPasswordFromKubeConfig(ctx context.Context, kubeClient kubernetes.Interface, namespace string, redisOptions *redis.Options) error {
|
||||
secret, err := kubeClient.CoreV1().Secrets(namespace).Get(ctx, RedisInitialCredentials, v1.GetOptions{})
|
||||
secret, err := kubeClient.CoreV1().Secrets(namespace).Get(ctx, DefaultRedisInitialPasswordSecretName, v1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get secret %s/%s: %w", namespace, RedisInitialCredentials, err)
|
||||
return fmt.Errorf("failed to get secret %s/%s: %w", namespace, DefaultRedisInitialPasswordSecretName, err)
|
||||
}
|
||||
if secret == nil {
|
||||
return fmt.Errorf("failed to get secret %s/%s: secret is nil", namespace, RedisInitialCredentials)
|
||||
return fmt.Errorf("failed to get secret %s/%s: secret is nil", namespace, DefaultRedisInitialPasswordSecretName)
|
||||
}
|
||||
_, ok := secret.Data[RedisInitialCredentialsKey]
|
||||
_, ok := secret.Data[DefaultRedisInitialPasswordKey]
|
||||
if !ok {
|
||||
return fmt.Errorf("secret %s/%s does not contain key %s", namespace, RedisInitialCredentials, RedisInitialCredentialsKey)
|
||||
return fmt.Errorf("secret %s/%s does not contain key %s", namespace, DefaultRedisInitialPasswordSecretName, DefaultRedisInitialPasswordKey)
|
||||
}
|
||||
redisOptions.Password = string(secret.Data[RedisInitialCredentialsKey])
|
||||
redisOptions.Password = string(secret.Data[DefaultRedisInitialPasswordKey])
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -63,24 +63,24 @@ func TestSetOptionalRedisPasswordFromKubeConfig(t *testing.T) {
|
||||
expectedPassword: "password123",
|
||||
expectedErr: "",
|
||||
secret: &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: RedisInitialCredentials},
|
||||
Data: map[string][]byte{RedisInitialCredentialsKey: []byte("password123")},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: DefaultRedisInitialPasswordSecretName},
|
||||
Data: map[string][]byte{DefaultRedisInitialPasswordKey: []byte("password123")},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Secret does not exist",
|
||||
namespace: "default",
|
||||
expectedPassword: "",
|
||||
expectedErr: fmt.Sprintf("failed to get secret default/%s", RedisInitialCredentials),
|
||||
expectedErr: fmt.Sprintf("failed to get secret default/%s", DefaultRedisInitialPasswordSecretName),
|
||||
secret: nil,
|
||||
},
|
||||
{
|
||||
name: "Secret exists without correct key",
|
||||
namespace: "default",
|
||||
expectedPassword: "",
|
||||
expectedErr: fmt.Sprintf("secret default/%s does not contain key %s", RedisInitialCredentials, RedisInitialCredentialsKey),
|
||||
expectedErr: fmt.Sprintf("secret default/%s does not contain key %s", DefaultRedisInitialPasswordSecretName, DefaultRedisInitialPasswordKey),
|
||||
secret: &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: RedisInitialCredentials},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: DefaultRedisInitialPasswordSecretName},
|
||||
Data: map[string][]byte{},
|
||||
},
|
||||
},
|
||||
@@ -101,7 +101,8 @@ func TestSetOptionalRedisPasswordFromKubeConfig(t *testing.T) {
|
||||
}
|
||||
err := SetOptionalRedisPasswordFromKubeConfig(ctx, kubeClient, tc.namespace, redisOptions)
|
||||
if tc.expectedErr != "" {
|
||||
require.ErrorContains(t, err, tc.expectedErr)
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), tc.expectedErr)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -130,7 +130,7 @@ type ApplicationController struct {
|
||||
statusHardRefreshTimeout time.Duration
|
||||
statusRefreshJitter time.Duration
|
||||
selfHealTimeout time.Duration
|
||||
selfHealBackOff *wait.Backoff
|
||||
repoClientset apiclient.Clientset
|
||||
db db.ArgoDB
|
||||
settingsMgr *settings_util.SettingsManager
|
||||
refreshRequestedApps map[string]CompareWith
|
||||
@@ -160,7 +160,6 @@ func NewApplicationController(
|
||||
appHardResyncPeriod time.Duration,
|
||||
appResyncJitter time.Duration,
|
||||
selfHealTimeout time.Duration,
|
||||
selfHealBackoff *wait.Backoff,
|
||||
repoErrorGracePeriod time.Duration,
|
||||
metricsPort int,
|
||||
metricsCacheExpiration time.Duration,
|
||||
@@ -174,7 +173,6 @@ func NewApplicationController(
|
||||
serverSideDiff bool,
|
||||
dynamicClusterDistributionEnabled bool,
|
||||
ignoreNormalizerOpts normalizers.IgnoreNormalizerOpts,
|
||||
enableK8sEvent []string,
|
||||
) (*ApplicationController, error) {
|
||||
log.Infof("appResyncPeriod=%v, appHardResyncPeriod=%v, appResyncJitter=%v", appResyncPeriod, appHardResyncPeriod, appResyncJitter)
|
||||
db := db.NewDB(namespace, settingsMgr, kubeClientset)
|
||||
@@ -188,6 +186,7 @@ func NewApplicationController(
|
||||
kubeClientset: kubeClientset,
|
||||
kubectl: kubectl,
|
||||
applicationClientset: applicationClientset,
|
||||
repoClientset: repoClientset,
|
||||
appRefreshQueue: workqueue.NewTypedRateLimitingQueueWithConfig(ratelimiter.NewCustomAppControllerRateLimiter(rateLimiterConfig), workqueue.TypedRateLimitingQueueConfig[string]{Name: "app_reconciliation_queue"}),
|
||||
appOperationQueue: workqueue.NewTypedRateLimitingQueueWithConfig(ratelimiter.NewCustomAppControllerRateLimiter(rateLimiterConfig), workqueue.TypedRateLimitingQueueConfig[string]{Name: "app_operation_processing_queue"}),
|
||||
projectRefreshQueue: workqueue.NewTypedRateLimitingQueueWithConfig(ratelimiter.NewCustomAppControllerRateLimiter(rateLimiterConfig), workqueue.TypedRateLimitingQueueConfig[string]{Name: "project_reconciliation_queue"}),
|
||||
@@ -198,10 +197,9 @@ func NewApplicationController(
|
||||
statusRefreshJitter: appResyncJitter,
|
||||
refreshRequestedApps: make(map[string]CompareWith),
|
||||
refreshRequestedAppsMutex: &sync.Mutex{},
|
||||
auditLogger: argo.NewAuditLogger(namespace, kubeClientset, common.ApplicationController, enableK8sEvent),
|
||||
auditLogger: argo.NewAuditLogger(namespace, kubeClientset, common.ApplicationController),
|
||||
settingsMgr: settingsMgr,
|
||||
selfHealTimeout: selfHealTimeout,
|
||||
selfHealBackOff: selfHealBackoff,
|
||||
clusterSharding: clusterSharding,
|
||||
projByNameCache: sync.Map{},
|
||||
applicationNamespaces: applicationNamespaces,
|
||||
@@ -1164,11 +1162,7 @@ func (ctrl *ApplicationController) finalizeApplicationDeletion(app *appv1.Applic
|
||||
logCtx.Infof("Resource entries removed from undefined cluster")
|
||||
return nil
|
||||
}
|
||||
clusterRESTConfig, err := cluster.RESTConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
config := metrics.AddMetricsTransportWrapper(ctrl.metricsServer, app, clusterRESTConfig)
|
||||
config := metrics.AddMetricsTransportWrapper(ctrl.metricsServer, app, cluster.RESTConfig())
|
||||
|
||||
if app.CascadedDeletion() {
|
||||
logCtx.Infof("Deleting resources")
|
||||
@@ -1987,9 +1981,6 @@ func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus *
|
||||
InitiatedBy: appv1.OperationInitiator{Automated: true},
|
||||
Retry: appv1.RetryStrategy{Limit: 5},
|
||||
}
|
||||
if app.Status.OperationState != nil && app.Status.OperationState.Operation.Sync != nil {
|
||||
op.Sync.SelfHealAttemptsCount = app.Status.OperationState.Operation.Sync.SelfHealAttemptsCount
|
||||
}
|
||||
if app.Spec.SyncPolicy.Retry != nil {
|
||||
op.Retry = *app.Spec.SyncPolicy.Retry
|
||||
}
|
||||
@@ -2007,7 +1998,6 @@ func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus *
|
||||
return nil, 0
|
||||
} else if alreadyAttempted && selfHeal {
|
||||
if shouldSelfHeal, retryAfter := ctrl.shouldSelfHeal(app); shouldSelfHeal {
|
||||
op.Sync.SelfHealAttemptsCount++
|
||||
for _, resource := range resources {
|
||||
if resource.Status != appv1.SyncStatusCodeSynced {
|
||||
op.Sync.Resources = append(op.Sync.Resources, appv1.SyncOperationResource{
|
||||
@@ -2126,24 +2116,10 @@ func (ctrl *ApplicationController) shouldSelfHeal(app *appv1.Application) (bool,
|
||||
}
|
||||
|
||||
var retryAfter time.Duration
|
||||
if ctrl.selfHealBackOff == nil {
|
||||
if app.Status.OperationState.FinishedAt == nil {
|
||||
retryAfter = ctrl.selfHealTimeout
|
||||
} else {
|
||||
retryAfter = ctrl.selfHealTimeout - time.Since(app.Status.OperationState.FinishedAt.Time)
|
||||
}
|
||||
if app.Status.OperationState.FinishedAt == nil {
|
||||
retryAfter = ctrl.selfHealTimeout
|
||||
} else {
|
||||
backOff := *ctrl.selfHealBackOff
|
||||
backOff.Steps = int(app.Status.OperationState.Operation.Sync.SelfHealAttemptsCount)
|
||||
var delay time.Duration
|
||||
for backOff.Steps > 0 {
|
||||
delay = backOff.Step()
|
||||
}
|
||||
if app.Status.OperationState.FinishedAt == nil {
|
||||
retryAfter = delay
|
||||
} else {
|
||||
retryAfter = delay - time.Since(app.Status.OperationState.FinishedAt.Time)
|
||||
}
|
||||
retryAfter = ctrl.selfHealTimeout - time.Since(app.Status.OperationState.FinishedAt.Time)
|
||||
}
|
||||
return retryAfter <= 0, retryAfter
|
||||
}
|
||||
|
||||
@@ -4,18 +4,16 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
clustercache "github.com/argoproj/gitops-engine/pkg/cache"
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube/kubetest"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/require"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
clustercache "github.com/argoproj/gitops-engine/pkg/cache"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/common"
|
||||
statecache "github.com/argoproj/argo-cd/v2/controller/cache"
|
||||
@@ -45,15 +43,12 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/reposerver/apiclient"
|
||||
mockrepoclient "github.com/argoproj/argo-cd/v2/reposerver/apiclient/mocks"
|
||||
"github.com/argoproj/argo-cd/v2/test"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo/normalizers"
|
||||
cacheutil "github.com/argoproj/argo-cd/v2/util/cache"
|
||||
appstatecache "github.com/argoproj/argo-cd/v2/util/cache/appstate"
|
||||
"github.com/argoproj/argo-cd/v2/util/settings"
|
||||
)
|
||||
|
||||
var testEnableEventList []string = argo.DefaultEnableEventList()
|
||||
|
||||
type namespacedResource struct {
|
||||
v1alpha1.ResourceNode
|
||||
AppName string
|
||||
@@ -159,7 +154,6 @@ func newFakeController(data *fakeData, repoErr error) *ApplicationController {
|
||||
time.Hour,
|
||||
time.Second,
|
||||
time.Minute,
|
||||
nil,
|
||||
time.Second*10,
|
||||
common.DefaultPortArgoCDMetrics,
|
||||
data.metricsCacheExpiration,
|
||||
@@ -173,7 +167,6 @@ func newFakeController(data *fakeData, repoErr error) *ApplicationController {
|
||||
false,
|
||||
false,
|
||||
normalizers.IgnoreNormalizerOpts{},
|
||||
testEnableEventList,
|
||||
)
|
||||
db := &dbmocks.ArgoDB{}
|
||||
db.On("GetApplicationControllerReplicas").Return(1)
|
||||
@@ -2194,66 +2187,3 @@ func TestAlreadyAttemptSync(t *testing.T) {
|
||||
assert.False(t, attempted)
|
||||
})
|
||||
}
|
||||
|
||||
func assertDurationAround(t *testing.T, expected time.Duration, actual time.Duration) {
|
||||
delta := time.Second / 2
|
||||
assert.GreaterOrEqual(t, expected, actual-delta)
|
||||
assert.LessOrEqual(t, expected, actual+delta)
|
||||
}
|
||||
|
||||
func TestSelfHealExponentialBackoff(t *testing.T) {
|
||||
ctrl := newFakeController(&fakeData{}, nil)
|
||||
ctrl.selfHealBackOff = &wait.Backoff{
|
||||
Factor: 3,
|
||||
Duration: 2 * time.Second,
|
||||
Cap: 5 * time.Minute,
|
||||
}
|
||||
|
||||
app := &v1alpha1.Application{
|
||||
Status: v1alpha1.ApplicationStatus{
|
||||
OperationState: &v1alpha1.OperationState{
|
||||
Operation: v1alpha1.Operation{
|
||||
Sync: &v1alpha1.SyncOperation{},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
attempts int64
|
||||
finishedAt *metav1.Time
|
||||
expectedDuration time.Duration
|
||||
shouldSelfHeal bool
|
||||
}{{
|
||||
attempts: 0,
|
||||
finishedAt: ptr.To(metav1.Now()),
|
||||
expectedDuration: 0,
|
||||
shouldSelfHeal: true,
|
||||
}, {
|
||||
attempts: 1,
|
||||
finishedAt: ptr.To(metav1.Now()),
|
||||
expectedDuration: 2 * time.Second,
|
||||
shouldSelfHeal: false,
|
||||
}, {
|
||||
attempts: 2,
|
||||
finishedAt: ptr.To(metav1.Now()),
|
||||
expectedDuration: 6 * time.Second,
|
||||
shouldSelfHeal: false,
|
||||
}, {
|
||||
attempts: 3,
|
||||
finishedAt: nil,
|
||||
expectedDuration: 18 * time.Second,
|
||||
shouldSelfHeal: false,
|
||||
}}
|
||||
|
||||
for i := range testCases {
|
||||
tc := testCases[i]
|
||||
t.Run(fmt.Sprintf("test case %d", i), func(t *testing.T) {
|
||||
app.Status.OperationState.Operation.Sync.SelfHealAttemptsCount = tc.attempts
|
||||
app.Status.OperationState.FinishedAt = tc.finishedAt
|
||||
ok, duration := ctrl.shouldSelfHeal(app)
|
||||
require.Equal(t, ok, tc.shouldSelfHeal)
|
||||
assertDurationAround(t, tc.expectedDuration, duration)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
21
controller/cache/cache.go
vendored
21
controller/cache/cache.go
vendored
@@ -197,7 +197,6 @@ type cacheSettings struct {
|
||||
clusterSettings clustercache.Settings
|
||||
appInstanceLabelKey string
|
||||
trackingMethod appv1.TrackingMethod
|
||||
installationID string
|
||||
// resourceOverrides provides a list of ignored differences to ignore watched resource updates
|
||||
resourceOverrides map[string]appv1.ResourceOverride
|
||||
|
||||
@@ -226,10 +225,6 @@ func (c *liveStateCache) loadCacheSettings() (*cacheSettings, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
installationID, err := c.settingsMgr.GetInstallationID()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resourceUpdatesOverrides, err := c.settingsMgr.GetIgnoreResourceUpdatesOverrides()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -251,7 +246,7 @@ func (c *liveStateCache) loadCacheSettings() (*cacheSettings, error) {
|
||||
ResourcesFilter: resourcesFilter,
|
||||
}
|
||||
|
||||
return &cacheSettings{clusterSettings, appInstanceLabelKey, argo.GetTrackingMethod(c.settingsMgr), installationID, resourceUpdatesOverrides, ignoreResourceUpdatesEnabled}, nil
|
||||
return &cacheSettings{clusterSettings, appInstanceLabelKey, argo.GetTrackingMethod(c.settingsMgr), resourceUpdatesOverrides, ignoreResourceUpdatesEnabled}, nil
|
||||
}
|
||||
|
||||
func asResourceNode(r *clustercache.Resource) appv1.ResourceNode {
|
||||
@@ -495,10 +490,7 @@ func (c *liveStateCache) getCluster(server string) (clustercache.ClusterCache, e
|
||||
return nil, fmt.Errorf("error getting value for %v: %w", settings.RespectRBAC, err)
|
||||
}
|
||||
|
||||
clusterCacheConfig, err := cluster.RESTConfig()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error getting cluster RESTConfig: %w", err)
|
||||
}
|
||||
clusterCacheConfig := cluster.RESTConfig()
|
||||
// Controller dynamically fetches all resource types available on the cluster
|
||||
// using a discovery API that may contain deprecated APIs.
|
||||
// This causes log flooding when managing a large number of clusters.
|
||||
@@ -531,7 +523,7 @@ func (c *liveStateCache) getCluster(server string) (clustercache.ClusterCache, e
|
||||
|
||||
res.Health, _ = health.GetResourceHealth(un, cacheSettings.clusterSettings.ResourceHealthOverride)
|
||||
|
||||
appName := c.resourceTracking.GetAppName(un, cacheSettings.appInstanceLabelKey, cacheSettings.trackingMethod, cacheSettings.installationID)
|
||||
appName := c.resourceTracking.GetAppName(un, cacheSettings.appInstanceLabelKey, cacheSettings.trackingMethod)
|
||||
if isRoot && appName != "" {
|
||||
res.AppName = appName
|
||||
}
|
||||
@@ -829,12 +821,7 @@ func (c *liveStateCache) handleModEvent(oldCluster *appv1.Cluster, newCluster *a
|
||||
|
||||
var updateSettings []clustercache.UpdateSettingsFunc
|
||||
if !reflect.DeepEqual(oldCluster.Config, newCluster.Config) {
|
||||
newClusterRESTConfig, err := newCluster.RESTConfig()
|
||||
if err == nil {
|
||||
updateSettings = append(updateSettings, clustercache.SetConfig(newClusterRESTConfig))
|
||||
} else {
|
||||
log.Errorf("error getting cluster REST config: %v", err)
|
||||
}
|
||||
updateSettings = append(updateSettings, clustercache.SetConfig(newCluster.RESTConfig()))
|
||||
}
|
||||
if !reflect.DeepEqual(oldCluster.Namespaces, newCluster.Namespaces) {
|
||||
updateSettings = append(updateSettings, clustercache.SetNamespaces(newCluster.Namespaces))
|
||||
|
||||
18
controller/cache/info.go
vendored
18
controller/cache/info.go
vendored
@@ -66,8 +66,6 @@ func populateNodeInfo(un *unstructured.Unstructured, res *ResourceInfo, customLa
|
||||
switch gvk.Kind {
|
||||
case "VirtualService":
|
||||
populateIstioVirtualServiceInfo(un, res)
|
||||
case "ServiceEntry":
|
||||
populateIstioServiceEntryInfo(un, res)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -280,22 +278,6 @@ func populateIstioVirtualServiceInfo(un *unstructured.Unstructured, res *Resourc
|
||||
res.NetworkingInfo = &v1alpha1.ResourceNetworkingInfo{TargetRefs: targets, ExternalURLs: urls}
|
||||
}
|
||||
|
||||
func populateIstioServiceEntryInfo(un *unstructured.Unstructured, res *ResourceInfo) {
|
||||
targetLabels, ok, err := unstructured.NestedStringMap(un.Object, "spec", "workloadSelector", "labels")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
res.NetworkingInfo = &v1alpha1.ResourceNetworkingInfo{
|
||||
TargetLabels: targetLabels,
|
||||
TargetRefs: []v1alpha1.ResourceRef{{
|
||||
Kind: kube.PodKind,
|
||||
}},
|
||||
}
|
||||
}
|
||||
|
||||
func populatePodInfo(un *unstructured.Unstructured, res *ResourceInfo) {
|
||||
pod := v1.Pod{}
|
||||
err := runtime.DefaultUnstructuredConverter.FromUnstructured(un.Object, &pod)
|
||||
|
||||
38
controller/cache/info_test.go
vendored
38
controller/cache/info_test.go
vendored
@@ -246,29 +246,6 @@ spec:
|
||||
- destination:
|
||||
host: service
|
||||
`)
|
||||
|
||||
testIstioServiceEntry = strToUnstructured(`
|
||||
apiVersion: networking.istio.io/v1beta1
|
||||
kind: ServiceEntry
|
||||
metadata:
|
||||
name: echo
|
||||
spec:
|
||||
exportTo:
|
||||
- '*'
|
||||
hosts:
|
||||
- echo.internal
|
||||
location: MESH_INTERNAL
|
||||
ports:
|
||||
- name: http
|
||||
number: 80
|
||||
protocol: HTTP
|
||||
targetPort: 5678
|
||||
resolution: DNS
|
||||
|
||||
workloadSelector:
|
||||
labels:
|
||||
app.kubernetes.io/name: echo-2
|
||||
`)
|
||||
)
|
||||
|
||||
func TestGetPodInfo(t *testing.T) {
|
||||
@@ -378,21 +355,6 @@ func TestGetIstioVirtualServiceInfo(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetIstioServiceEntryInfo(t *testing.T) {
|
||||
info := &ResourceInfo{}
|
||||
populateNodeInfo(testIstioServiceEntry, info, []string{})
|
||||
assert.Empty(t, info.Info)
|
||||
require.NotNil(t, info.NetworkingInfo)
|
||||
require.NotNil(t, info.NetworkingInfo.TargetRefs)
|
||||
assert.Contains(t, info.NetworkingInfo.TargetRefs, v1alpha1.ResourceRef{
|
||||
Kind: kube.PodKind,
|
||||
})
|
||||
|
||||
assert.Equal(t, map[string]string{
|
||||
"app.kubernetes.io/name": "echo-2",
|
||||
}, info.NetworkingInfo.TargetLabels)
|
||||
}
|
||||
|
||||
func TestGetIngressInfo(t *testing.T) {
|
||||
tests := []struct {
|
||||
Ingress *unstructured.Unstructured
|
||||
|
||||
@@ -161,11 +161,6 @@ func (m *appStateManager) GetRepoObjs(app *v1alpha1.Application, sources []v1alp
|
||||
return nil, nil, false, fmt.Errorf("failed to get Helm settings: %w", err)
|
||||
}
|
||||
|
||||
installationID, err := m.settingsMgr.GetInstallationID()
|
||||
if err != nil {
|
||||
return nil, nil, false, fmt.Errorf("failed to get installation ID: %w", err)
|
||||
}
|
||||
|
||||
ts.AddCheckpoint("build_options_ms")
|
||||
serverVersion, apiResources, err := m.liveStateCache.GetVersionsInfo(app.Spec.Destination.Server)
|
||||
if err != nil {
|
||||
@@ -235,7 +230,6 @@ func (m *appStateManager) GetRepoObjs(app *v1alpha1.Application, sources []v1alp
|
||||
TrackingMethod: string(argo.GetTrackingMethod(m.settingsMgr)),
|
||||
RefSources: refSources,
|
||||
HasMultipleSources: app.Spec.HasMultipleSources(),
|
||||
InstallationID: installationID,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, false, fmt.Errorf("failed to compare revisions for source %d of %d: %w", i+1, len(sources), err)
|
||||
@@ -255,29 +249,27 @@ func (m *appStateManager) GetRepoObjs(app *v1alpha1.Application, sources []v1alp
|
||||
|
||||
log.Debugf("Generating Manifest for source %s revision %s", source, revision)
|
||||
manifestInfo, err := repoClient.GenerateManifest(context.Background(), &apiclient.ManifestRequest{
|
||||
Repo: repo,
|
||||
Repos: permittedHelmRepos,
|
||||
Revision: revision,
|
||||
NoCache: noCache,
|
||||
NoRevisionCache: noRevisionCache,
|
||||
AppLabelKey: appLabelKey,
|
||||
AppName: app.InstanceName(m.namespace),
|
||||
Namespace: app.Spec.Destination.Namespace,
|
||||
ApplicationSource: &source,
|
||||
KustomizeOptions: kustomizeOptions,
|
||||
KubeVersion: serverVersion,
|
||||
ApiVersions: argo.APIResourcesToStrings(apiResources, true),
|
||||
VerifySignature: verifySignature,
|
||||
HelmRepoCreds: permittedHelmCredentials,
|
||||
TrackingMethod: string(argo.GetTrackingMethod(m.settingsMgr)),
|
||||
EnabledSourceTypes: enabledSourceTypes,
|
||||
HelmOptions: helmOptions,
|
||||
HasMultipleSources: app.Spec.HasMultipleSources(),
|
||||
RefSources: refSources,
|
||||
ProjectName: proj.Name,
|
||||
ProjectSourceRepos: proj.Spec.SourceRepos,
|
||||
AnnotationManifestGeneratePaths: app.GetAnnotation(v1alpha1.AnnotationKeyManifestGeneratePaths),
|
||||
InstallationID: installationID,
|
||||
Repo: repo,
|
||||
Repos: permittedHelmRepos,
|
||||
Revision: revision,
|
||||
NoCache: noCache,
|
||||
NoRevisionCache: noRevisionCache,
|
||||
AppLabelKey: appLabelKey,
|
||||
AppName: app.InstanceName(m.namespace),
|
||||
Namespace: app.Spec.Destination.Namespace,
|
||||
ApplicationSource: &source,
|
||||
KustomizeOptions: kustomizeOptions,
|
||||
KubeVersion: serverVersion,
|
||||
ApiVersions: argo.APIResourcesToStrings(apiResources, true),
|
||||
VerifySignature: verifySignature,
|
||||
HelmRepoCreds: permittedHelmCredentials,
|
||||
TrackingMethod: string(argo.GetTrackingMethod(m.settingsMgr)),
|
||||
EnabledSourceTypes: enabledSourceTypes,
|
||||
HelmOptions: helmOptions,
|
||||
HasMultipleSources: app.Spec.HasMultipleSources(),
|
||||
RefSources: refSources,
|
||||
ProjectName: proj.Name,
|
||||
ProjectSourceRepos: proj.Spec.SourceRepos,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, false, fmt.Errorf("failed to generate manifest for source %d of %d: %w", i+1, len(sources), err)
|
||||
@@ -361,24 +353,20 @@ func DeduplicateTargetObjects(
|
||||
|
||||
// getComparisonSettings will return the system level settings related to the
|
||||
// diff/normalization process.
|
||||
func (m *appStateManager) getComparisonSettings() (string, map[string]v1alpha1.ResourceOverride, *settings.ResourcesFilter, string, error) {
|
||||
func (m *appStateManager) getComparisonSettings() (string, map[string]v1alpha1.ResourceOverride, *settings.ResourcesFilter, error) {
|
||||
resourceOverrides, err := m.settingsMgr.GetResourceOverrides()
|
||||
if err != nil {
|
||||
return "", nil, nil, "", err
|
||||
return "", nil, nil, err
|
||||
}
|
||||
appLabelKey, err := m.settingsMgr.GetAppInstanceLabelKey()
|
||||
if err != nil {
|
||||
return "", nil, nil, "", err
|
||||
return "", nil, nil, err
|
||||
}
|
||||
resFilter, err := m.settingsMgr.GetResourcesFilter()
|
||||
if err != nil {
|
||||
return "", nil, nil, "", err
|
||||
return "", nil, nil, err
|
||||
}
|
||||
installationID, err := m.settingsMgr.GetInstallationID()
|
||||
if err != nil {
|
||||
return "", nil, nil, "", err
|
||||
}
|
||||
return appLabelKey, resourceOverrides, resFilter, installationID, nil
|
||||
return appLabelKey, resourceOverrides, resFilter, nil
|
||||
}
|
||||
|
||||
// verifyGnuPGSignature verifies the result of a GnuPG operation for a given git
|
||||
@@ -429,7 +417,7 @@ func isManagedNamespace(ns *unstructured.Unstructured, app *v1alpha1.Application
|
||||
// revision and overrides in the app spec.
|
||||
func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1alpha1.AppProject, revisions []string, sources []v1alpha1.ApplicationSource, noCache bool, noRevisionCache bool, localManifests []string, hasMultipleSources bool, rollback bool) (*comparisonResult, error) {
|
||||
ts := stats.NewTimingStats()
|
||||
appLabelKey, resourceOverrides, resFilter, installationID, err := m.getComparisonSettings()
|
||||
appLabelKey, resourceOverrides, resFilter, err := m.getComparisonSettings()
|
||||
|
||||
ts.AddCheckpoint("settings_ms")
|
||||
|
||||
@@ -597,7 +585,7 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1
|
||||
|
||||
for _, liveObj := range liveObjByKey {
|
||||
if liveObj != nil {
|
||||
appInstanceName := m.resourceTracking.GetAppName(liveObj, appLabelKey, trackingMethod, installationID)
|
||||
appInstanceName := m.resourceTracking.GetAppName(liveObj, appLabelKey, trackingMethod)
|
||||
if appInstanceName != "" && appInstanceName != app.InstanceName(m.namespace) {
|
||||
fqInstanceName := strings.ReplaceAll(appInstanceName, "_", "/")
|
||||
conditions = append(conditions, v1alpha1.ApplicationCondition{
|
||||
@@ -736,7 +724,7 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1
|
||||
}
|
||||
gvk := obj.GroupVersionKind()
|
||||
|
||||
isSelfReferencedObj := m.isSelfReferencedObj(liveObj, targetObj, app.GetName(), appLabelKey, trackingMethod, installationID)
|
||||
isSelfReferencedObj := m.isSelfReferencedObj(liveObj, targetObj, app.GetName(), appLabelKey, trackingMethod)
|
||||
|
||||
resState := v1alpha1.ResourceStatus{
|
||||
Namespace: obj.GetNamespace(),
|
||||
@@ -1041,7 +1029,7 @@ func NewAppStateManager(
|
||||
// group and kind) match the properties of the live object, or if the tracking method
|
||||
// used does not provide the required properties for matching.
|
||||
// Reference: https://github.com/argoproj/argo-cd/issues/8683
|
||||
func (m *appStateManager) isSelfReferencedObj(live, config *unstructured.Unstructured, appName, appLabelKey string, trackingMethod v1alpha1.TrackingMethod, installationID string) bool {
|
||||
func (m *appStateManager) isSelfReferencedObj(live, config *unstructured.Unstructured, appName, appLabelKey string, trackingMethod v1alpha1.TrackingMethod) bool {
|
||||
if live == nil {
|
||||
return true
|
||||
}
|
||||
@@ -1074,7 +1062,7 @@ func (m *appStateManager) isSelfReferencedObj(live, config *unstructured.Unstruc
|
||||
// to match the properties from the live object. Cluster scoped objects
|
||||
// carry the app's destination namespace in the tracking annotation,
|
||||
// but are unique in GVK + name combination.
|
||||
appInstance := m.resourceTracking.GetAppInstance(live, appLabelKey, trackingMethod, installationID)
|
||||
appInstance := m.resourceTracking.GetAppInstance(live, appLabelKey, trackingMethod)
|
||||
if appInstance != nil {
|
||||
return isSelfReferencedObj(live, *appInstance)
|
||||
}
|
||||
|
||||
@@ -28,6 +28,7 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
argoappv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v2/reposerver/apiclient"
|
||||
mockrepoclient "github.com/argoproj/argo-cd/v2/reposerver/apiclient/mocks"
|
||||
"github.com/argoproj/argo-cd/v2/test"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo"
|
||||
)
|
||||
@@ -679,6 +680,7 @@ func TestCompareAppStateWithManifestGeneratePath(t *testing.T) {
|
||||
assert.NotNil(t, compRes)
|
||||
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
|
||||
assert.Equal(t, "abc123", compRes.syncStatus.Revision)
|
||||
ctrl.repoClientset.(*mockrepoclient.Clientset).RepoServerServiceClient.(*mockrepoclient.RepoServerServiceClient).AssertNumberOfCalls(t, "UpdateRevisionForPaths", 1)
|
||||
}
|
||||
|
||||
func TestSetHealth(t *testing.T) {
|
||||
@@ -1370,8 +1372,8 @@ func TestIsLiveResourceManaged(t *testing.T) {
|
||||
configObj := managedObj.DeepCopy()
|
||||
|
||||
// then
|
||||
assert.True(t, manager.isSelfReferencedObj(managedObj, configObj, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel, ""))
|
||||
assert.True(t, manager.isSelfReferencedObj(managedObj, configObj, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation, ""))
|
||||
assert.True(t, manager.isSelfReferencedObj(managedObj, configObj, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel))
|
||||
assert.True(t, manager.isSelfReferencedObj(managedObj, configObj, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation))
|
||||
})
|
||||
t.Run("will return true if tracked with label", func(t *testing.T) {
|
||||
// given
|
||||
@@ -1379,43 +1381,43 @@ func TestIsLiveResourceManaged(t *testing.T) {
|
||||
configObj := managedObjWithLabel.DeepCopy()
|
||||
|
||||
// then
|
||||
assert.True(t, manager.isSelfReferencedObj(managedObjWithLabel, configObj, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel, ""))
|
||||
assert.True(t, manager.isSelfReferencedObj(managedObjWithLabel, configObj, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel))
|
||||
})
|
||||
t.Run("will handle if trackingId has wrong resource name and config is nil", func(t *testing.T) {
|
||||
// given
|
||||
t.Parallel()
|
||||
|
||||
// then
|
||||
assert.True(t, manager.isSelfReferencedObj(unmanagedObjWrongName, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel, ""))
|
||||
assert.False(t, manager.isSelfReferencedObj(unmanagedObjWrongName, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation, ""))
|
||||
assert.True(t, manager.isSelfReferencedObj(unmanagedObjWrongName, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel))
|
||||
assert.False(t, manager.isSelfReferencedObj(unmanagedObjWrongName, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation))
|
||||
})
|
||||
t.Run("will handle if trackingId has wrong resource group and config is nil", func(t *testing.T) {
|
||||
// given
|
||||
t.Parallel()
|
||||
|
||||
// then
|
||||
assert.True(t, manager.isSelfReferencedObj(unmanagedObjWrongGroup, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel, ""))
|
||||
assert.False(t, manager.isSelfReferencedObj(unmanagedObjWrongGroup, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation, ""))
|
||||
assert.True(t, manager.isSelfReferencedObj(unmanagedObjWrongGroup, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel))
|
||||
assert.False(t, manager.isSelfReferencedObj(unmanagedObjWrongGroup, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation))
|
||||
})
|
||||
t.Run("will handle if trackingId has wrong kind and config is nil", func(t *testing.T) {
|
||||
// given
|
||||
t.Parallel()
|
||||
|
||||
// then
|
||||
assert.True(t, manager.isSelfReferencedObj(unmanagedObjWrongKind, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel, ""))
|
||||
assert.False(t, manager.isSelfReferencedObj(unmanagedObjWrongKind, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation, ""))
|
||||
assert.True(t, manager.isSelfReferencedObj(unmanagedObjWrongKind, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel))
|
||||
assert.False(t, manager.isSelfReferencedObj(unmanagedObjWrongKind, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation))
|
||||
})
|
||||
t.Run("will handle if trackingId has wrong namespace and config is nil", func(t *testing.T) {
|
||||
// given
|
||||
t.Parallel()
|
||||
|
||||
// then
|
||||
assert.True(t, manager.isSelfReferencedObj(unmanagedObjWrongNamespace, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel, ""))
|
||||
assert.False(t, manager.isSelfReferencedObj(unmanagedObjWrongNamespace, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotationAndLabel, ""))
|
||||
assert.True(t, manager.isSelfReferencedObj(unmanagedObjWrongNamespace, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel))
|
||||
assert.False(t, manager.isSelfReferencedObj(unmanagedObjWrongNamespace, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotationAndLabel))
|
||||
})
|
||||
t.Run("will return true if live is nil", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
assert.True(t, manager.isSelfReferencedObj(nil, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation, ""))
|
||||
assert.True(t, manager.isSelfReferencedObj(nil, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation))
|
||||
})
|
||||
|
||||
t.Run("will handle upgrade in desired state APIGroup", func(t *testing.T) {
|
||||
@@ -1425,7 +1427,7 @@ func TestIsLiveResourceManaged(t *testing.T) {
|
||||
delete(config.GetAnnotations(), common.AnnotationKeyAppInstance)
|
||||
|
||||
// then
|
||||
assert.True(t, manager.isSelfReferencedObj(managedWrongAPIGroup, config, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation, ""))
|
||||
assert.True(t, manager.isSelfReferencedObj(managedWrongAPIGroup, config, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation))
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -80,12 +80,7 @@ func (m *appStateManager) getResourceOperations(server string) (kube.ResourceOpe
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("error getting cluster: %w", err)
|
||||
}
|
||||
|
||||
rawConfig, err := cluster.RawRestConfig()
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("error getting cluster REST config: %w", err)
|
||||
}
|
||||
ops, cleanup, err := m.kubectl.ManageResources(rawConfig, clusterCache.GetOpenAPISchema())
|
||||
ops, cleanup, err := m.kubectl.ManageResources(cluster.RawRestConfig(), clusterCache.GetOpenAPISchema())
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("error creating kubectl ResourceOperations: %w", err)
|
||||
}
|
||||
@@ -228,20 +223,8 @@ func (m *appStateManager) SyncAppState(app *v1alpha1.Application, state *v1alpha
|
||||
return
|
||||
}
|
||||
|
||||
rawConfig, err := clst.RawRestConfig()
|
||||
if err != nil {
|
||||
state.Phase = common.OperationError
|
||||
state.Message = err.Error()
|
||||
return
|
||||
}
|
||||
|
||||
clusterRESTConfig, err := clst.RESTConfig()
|
||||
if err != nil {
|
||||
state.Phase = common.OperationError
|
||||
state.Message = err.Error()
|
||||
return
|
||||
}
|
||||
restConfig := metrics.AddMetricsTransportWrapper(m.metricsServer, app, clusterRESTConfig)
|
||||
rawConfig := clst.RawRestConfig()
|
||||
restConfig := metrics.AddMetricsTransportWrapper(m.metricsServer, app, clst.RESTConfig())
|
||||
|
||||
resourceOverrides, err := m.settingsMgr.GetResourceOverrides()
|
||||
if err != nil {
|
||||
@@ -312,11 +295,6 @@ func (m *appStateManager) SyncAppState(app *v1alpha1.Application, state *v1alpha
|
||||
log.Errorf("Could not get appInstanceLabelKey: %v", err)
|
||||
return
|
||||
}
|
||||
installationID, err := m.settingsMgr.GetInstallationID()
|
||||
if err != nil {
|
||||
log.Errorf("Could not get installation ID: %v", err)
|
||||
return
|
||||
}
|
||||
trackingMethod := argo.GetTrackingMethod(m.settingsMgr)
|
||||
|
||||
impersonationEnabled, err := m.settingsMgr.IsImpersonationEnabled()
|
||||
@@ -368,7 +346,7 @@ func (m *appStateManager) SyncAppState(app *v1alpha1.Application, state *v1alpha
|
||||
return (len(syncOp.Resources) == 0 ||
|
||||
isPostDeleteHook(target) ||
|
||||
argo.ContainsSyncResource(key.Name, key.Namespace, schema.GroupVersionKind{Kind: key.Kind, Group: key.Group}, syncOp.Resources)) &&
|
||||
m.isSelfReferencedObj(live, target, app.GetName(), appLabelKey, trackingMethod, installationID)
|
||||
m.isSelfReferencedObj(live, target, app.GetName(), appLabelKey, trackingMethod)
|
||||
}),
|
||||
sync.WithManifestValidation(!syncOp.SyncOptions.HasOption(common.SyncOptionsDisableValidation)),
|
||||
sync.WithSyncWaveHook(delayBetweenSyncWaves),
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 62 KiB |
@@ -21,16 +21,10 @@ cd $GOPATH/src/github.com/argoproj &&
|
||||
git clone https://github.com/argoproj/argo-cd.git
|
||||
```
|
||||
|
||||
### Install Docker or Podman
|
||||
|
||||
#### Installation guide for docker:
|
||||
### Install Docker
|
||||
|
||||
<https://docs.docker.com/engine/install/>
|
||||
|
||||
#### Installation guide for podman:
|
||||
|
||||
<https://podman.io/docs/installation>
|
||||
|
||||
### Install or Upgrade a Tool for Running Local Clusters (e.g. kind or minikube)
|
||||
|
||||
#### Installation guide for kind:
|
||||
@@ -54,12 +48,6 @@ Or, if you are using minikube:
|
||||
minikube start
|
||||
```
|
||||
|
||||
Or, if you are using minikube with podman driver:
|
||||
|
||||
```shell
|
||||
minikube start --driver=podman
|
||||
```
|
||||
|
||||
### Install Argo CD
|
||||
|
||||
```shell
|
||||
@@ -89,13 +77,6 @@ cd argo-cd
|
||||
make start-local ARGOCD_GPG_ENABLED=false
|
||||
```
|
||||
|
||||
By default, Argo CD uses Docker. To use Podman instead, set the `DOCKER` environment variable to `podman` before running the `make` command:
|
||||
|
||||
```shell
|
||||
cd argo-cd
|
||||
DOCKER=podman make start-local ARGOCD_GPG_ENABLED=false
|
||||
```
|
||||
|
||||
- Navigate to [localhost:4000](http://localhost:4000) in your browser to load the Argo CD UI
|
||||
- It may take a few minutes for the UI to be responsive
|
||||
|
||||
@@ -105,13 +86,6 @@ DOCKER=podman make start-local ARGOCD_GPG_ENABLED=false
|
||||
|
||||
## Making Changes
|
||||
|
||||
### Docs Changes
|
||||
|
||||
Modifying the docs auto-reloads the changes on the [documentation website](https://argo-cd.readthedocs.io/) that can be locally built using `make serve-docs` command.
|
||||
Once running, you can view your locally built documentation on port 8000.
|
||||
|
||||
Read more about this [here](https://argo-cd.readthedocs.io/en/latest/developer-guide/docs-site/).
|
||||
|
||||
### UI Changes
|
||||
|
||||
Modifying the User-Interface (by editing .tsx or .scss files) auto-reloads the changes on port 4000.
|
||||
|
||||
@@ -17,25 +17,6 @@ Before submitting a PR build the website, to verify that there are no errors bui
|
||||
make build-docs
|
||||
```
|
||||
|
||||
If you want to build and test the site directly on your local machine without the use of docker container, follow the below steps:
|
||||
|
||||
1. Install the `mkdocs` using the `pip` command
|
||||
```bash
|
||||
pip install mkdocs
|
||||
```
|
||||
2. Install the required dependencies using the below command
|
||||
```bash
|
||||
pip install $(mkdocs get-deps)
|
||||
```
|
||||
3. Build the docs site locally from the root
|
||||
```bash
|
||||
make build-docs-local
|
||||
```
|
||||
4. Start the docs site locally
|
||||
```bash
|
||||
make serve-docs-local
|
||||
```
|
||||
|
||||
## Analytics
|
||||
|
||||
!!! tip
|
||||
|
||||
@@ -1,8 +1,5 @@
|
||||
# Proxy Extensions
|
||||
|
||||
!!! warning "Alpha Feature (Since 2.7.0)"
|
||||
This is an experimental, [alpha-quality](https://github.com/argoproj/argoproj/blob/main/community/feature-status.md#alpha)
|
||||
feature. It may be removed in future releases or modified in backwards-incompatible ways.
|
||||
*Current Status: [Alpha][1] (Since v2.7.0)*
|
||||
|
||||
## Overview
|
||||
|
||||
|
||||
@@ -1,21 +1,21 @@
|
||||
# E2E Tests
|
||||
|
||||
The test [directory](https://github.com/argoproj/argo-cd/tree/master/test) contains E2E tests and test applications. The tests assume that Argo CD services are installed into `argocd-e2e` namespace or cluster in current context. A throw-away
|
||||
namespace `argocd-e2e***` is created prior to the execution of the tests. The throw-away namespace is used as a target namespace for test applications.
|
||||
The directory contains E2E tests and test applications. The test assume that Argo CD services are installed into `argocd-e2e` namespace or cluster in current context. One throw-away
|
||||
namespace `argocd-e2e***` is created prior to tests execute. The throw-away namespace is used as a target namespace for test applications.
|
||||
|
||||
The [/test/e2e/testdata](https://github.com/argoproj/argo-cd/tree/master/test/e2e/testdata) directory contains various Argo CD applications. Before test execution directory is copies into `/tmp/argocd-e2e***` temp directory and used in tests as a
|
||||
The `test/e2e/testdata` directory contains various Argo CD applications. Before test execution directory is copies into `/tmp/argocd-e2e***` temp directory and used in tests as a
|
||||
Git repository via file url: `file:///tmp/argocd-e2e***`.
|
||||
|
||||
## Running Tests Locally
|
||||
|
||||
1. Start the e2e version `make start-e2e`
|
||||
2. Run the tests: `make test-e2e`
|
||||
1. Start the e2e version `make start-e2e`
|
||||
1. Run the tests: `make test-e2e`
|
||||
|
||||
You can observe the tests by using the UI [http://localhost:8080/applications](http://localhost:8080/applications) with username `"admin"` and password `"password"`.
|
||||
|
||||
## Configuration of E2E Tests execution
|
||||
|
||||
The Makefile's `start-e2e` target starts instances of ArgoCD on your local machine, of which the most will require a network listener. If, for any reason, your machine already has network services listening on the same ports, then the e2e tests will not run. You can derive from the defaults by setting the following environment variables before you run `make start-e2e`:
|
||||
The Makefile's `start-e2e` target starts instances of ArgoCD on your local machine, of which the most will require a network listener. If for whatever reason you already have network services on your machine listening on the same ports, the e2e tests will not be able to run. You can derive from the defaults by setting the following environment variables before you run `make start-e2e`:
|
||||
|
||||
* `ARGOCD_E2E_APISERVER_PORT`: Listener port for `argocd-server` (default: `8080`)
|
||||
* `ARGOCD_E2E_REPOSERVER_PORT`: Listener port for `argocd-reposerver` (default: `8081`)
|
||||
|
||||
@@ -141,9 +141,6 @@ service account token to perform its management tasks (i.e. deploy/monitoring).
|
||||
An example repository containing a guestbook application is available at
|
||||
[https://github.com/argoproj/argocd-example-apps.git](https://github.com/argoproj/argocd-example-apps.git) to demonstrate how Argo CD works.
|
||||
|
||||
!!! note
|
||||
Note: The following example application may only be compatible with AMD64 architecture. If you are running on a different architecture (such as ARM64 or ARMv7), you may encounter issues with dependencies or container images that are not built for your platform. Consider verifying the compatibility of the application or building architecture-specific images if necessary.
|
||||
|
||||
### Creating Apps Via CLI
|
||||
|
||||
First we need to set the current namespace to argocd running the following command:
|
||||
|
||||
@@ -1,10 +1,7 @@
|
||||
# Application Sync using impersonation
|
||||
|
||||
!!! warning "Alpha Feature (Since 2.13.0)"
|
||||
This is an experimental, [alpha-quality](https://github.com/argoproj/argoproj/blob/main/community/feature-status.md#alpha)
|
||||
feature that allows you to control the service account used for the sync operation. The configured service account
|
||||
could have lesser privileges required for creating resources compared to the highly privileged access required for
|
||||
the control plane operations.
|
||||
!!! warning "Alpha Feature"
|
||||
This is an experimental, alpha-quality feature that allows you to control the service account used for the sync operation. The configured service account could have lesser privileges required for creating resources compared to the highly privileged access required for the control plane operations.
|
||||
|
||||
!!! warning
|
||||
Please read this documentation carefully before you enable this feature. Misconfiguration could lead to potential security issues.
|
||||
|
||||
@@ -3,6 +3,16 @@ kind: ApplicationSet
|
||||
metadata:
|
||||
name: test-hello-world-appset
|
||||
namespace: argocd
|
||||
# To preserve this annotation and label we can use the preservedFields property
|
||||
preservedFields:
|
||||
# This annotation and label exists only on this Application, and not in
|
||||
# the parent ApplicationSet template:
|
||||
# ignoreApplicationDifferences is the preferred way to accomplish this now.
|
||||
annotations:
|
||||
my-custom-annotation: some-value
|
||||
labels:
|
||||
my-custom-label: some-value
|
||||
|
||||
spec:
|
||||
generators:
|
||||
|
||||
@@ -158,17 +168,29 @@ spec:
|
||||
applicationsSync: create-only
|
||||
|
||||
# Prevents ApplicationSet controller from deleting Applications. Update is allowed
|
||||
# applicationsSync: create-update
|
||||
# applicationsSync: create-update
|
||||
|
||||
# Prevents ApplicationSet controller from modifying Applications. Delete is allowed.
|
||||
# applicationsSync: create-delete
|
||||
# applicationsSync: create-delete
|
||||
|
||||
syncOptions:
|
||||
- CreateNamespace=true
|
||||
# Prevent an Application's child resources from being deleted, when the parent Application is deleted
|
||||
preserveResourcesOnDeletion: true
|
||||
|
||||
# which fields of the ApplicationSet should be ignored when comparing Applications.
|
||||
ignoreApplicationDifferences:
|
||||
- jsonPointers:
|
||||
- /spec/source/targetRevision
|
||||
- name: some-app
|
||||
jqExpressions:
|
||||
- .spec.source.helm.values
|
||||
|
||||
strategy:
|
||||
# The RollingSync update strategy allows you to group Applications by labels present on the generated Application resources
|
||||
# See documentation for "Progressive Syncs"
|
||||
# This field lets you define fields which should be ignored when applying Application resources. This is helpful if you
|
||||
# want to use ApplicationSets to create apps, but also want to allow users to modify those apps without having their
|
||||
# changes overwritten by the ApplicationSet.
|
||||
# This update strategy allows you to group Applications by labels present on the generated Application resources
|
||||
type: RollingSync
|
||||
rollingSync:
|
||||
steps:
|
||||
@@ -192,13 +214,6 @@ spec:
|
||||
- env-prod
|
||||
maxUpdate: 10% # maxUpdate supports both integer and percentage string values (rounds down, but floored at 1 Application for >0%)
|
||||
|
||||
# Define annotations and labels of the Application that this ApplicationSet will ignore
|
||||
# ignoreApplicationDifferences is the preferred way to accomplish this now.
|
||||
preservedFields:
|
||||
annotations: [ some-annotation-key ]
|
||||
labels: [ some-label-key ]
|
||||
|
||||
# Define fields of the that should be ignored when comparing Applications
|
||||
ignoreApplicationDifferences:
|
||||
- jsonPointers:
|
||||
- /spec/source/targetRevision
|
||||
@@ -296,4 +311,4 @@ spec:
|
||||
operator: In
|
||||
values:
|
||||
- https://kubernetes.default.svc
|
||||
- https://some-other-cluster
|
||||
- https://some-other-cluster
|
||||
@@ -1,8 +1,6 @@
|
||||
# ApplicationSet in any namespace
|
||||
|
||||
!!! warning "Beta Feature (Since v2.8.0)"
|
||||
This feature is in the [Beta](https://github.com/argoproj/argoproj/blob/main/community/feature-status.md#beta) stage.
|
||||
It is generally considered stable, but there may be unhandled edge cases.
|
||||
**Current feature state**: Beta
|
||||
|
||||
!!! warning
|
||||
Please read this documentation carefully before you enable this feature. Misconfiguration could lead to potential security issues.
|
||||
@@ -79,29 +77,6 @@ data:
|
||||
|
||||
If you do not intend to allow users to use the SCM or PR generators, you can disable them entirely by setting the environment variable `ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_SCM_PROVIDERS` to argocd-cmd-params-cm `applicationsetcontroller.enable.scm.providers` to `false`.
|
||||
|
||||
#### `tokenRef` Restrictions
|
||||
|
||||
It is **highly recommended** to enable SCM Providers secrets restrictions to avoid any secrets exfiltration. This
|
||||
recommendation applies even when AppSets-in-any-namespace is disabled, but is especially important when it is enabled,
|
||||
since non-Argo-admins may attempt to reference out-of-bounds secrets in the `argocd` namespace from an AppSet
|
||||
`tokenRef`.
|
||||
|
||||
When this mode is enabled, the referenced secret must have a label `argocd.argoproj.io/secret-type` with value
|
||||
`scm-creds`.
|
||||
|
||||
To enable this mode, set the `ARGOCD_APPLICATIONSET_CONTROLLER_TOKENREF_STRICT_MODE` environment variable to `true` in the
|
||||
`argocd-application-controller` deployment. You can do this by adding the following to your `argocd-cmd-paramscm`
|
||||
ConfigMap:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: argocd-cmd-params-cm
|
||||
data:
|
||||
applicationsetcontroller.tokenref.strict.mode: "true"
|
||||
```
|
||||
|
||||
### Overview
|
||||
|
||||
In order for an ApplicationSet to be managed and reconciled outside the Argo CD's control plane namespace, two prerequisites must match:
|
||||
|
||||
@@ -9,7 +9,6 @@ It automatically provides the following parameter values to the Application temp
|
||||
- `name`
|
||||
- `nameNormalized` *('name' but normalized to contain only lowercase alphanumeric characters, '-' or '.')*
|
||||
- `server`
|
||||
- `project` *(the Secret's 'project' field, if present; otherwise, it defaults to '')*
|
||||
- `metadata.labels.<key>` *(for each label in the Secret)*
|
||||
- `metadata.annotations.<key>` *(for each annotation in the Secret)*
|
||||
|
||||
@@ -253,63 +252,3 @@ spec:
|
||||
server: '{{.values.clusterName}}'
|
||||
namespace: guestbook
|
||||
```
|
||||
### Gather cluster information as a flat list
|
||||
|
||||
You may sometimes need to gather your clusters information, without having to deploy one application per cluster found.
|
||||
For that, you can use the option `flatList` in the cluster generator.
|
||||
|
||||
Here is an example of cluster generator using this option:
|
||||
```yaml
|
||||
spec:
|
||||
goTemplate: true
|
||||
goTemplateOptions: ["missingkey=error"]
|
||||
generators:
|
||||
- clusters:
|
||||
selector:
|
||||
matchLabels:
|
||||
type: 'staging'
|
||||
flatList: true
|
||||
template:
|
||||
metadata:
|
||||
name: 'flat-list-guestbook'
|
||||
spec:
|
||||
project: "my-project"
|
||||
source:
|
||||
repoURL: https://github.com/argoproj/argocd-example-apps/
|
||||
# The cluster values field for each generator will be substituted here:
|
||||
targetRevision: 'HEAD'
|
||||
path: helm-guestbook
|
||||
helm:
|
||||
values: |
|
||||
clusters:
|
||||
{{- range .clusters }}
|
||||
- name: {{ .name }}
|
||||
{{- end }}
|
||||
destination:
|
||||
# In this case this is equivalent to just using {{name}}
|
||||
server: 'my-cluster'
|
||||
namespace: guestbook
|
||||
```
|
||||
|
||||
Given that you have two cluster secrets matching with names cluster1 and cluster2, this would generate the **single** following Application:
|
||||
|
||||
```yaml
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: flat-list-guestbook
|
||||
namespace: guestbook
|
||||
spec:
|
||||
project: "my-project"
|
||||
source:
|
||||
repoURL: https://github.com/argoproj/argocd-example-apps/
|
||||
targetRevision: 'HEAD'
|
||||
path: helm-guestbook
|
||||
helm:
|
||||
values: |
|
||||
clusters:
|
||||
- name: cluster1
|
||||
- name: cluster1
|
||||
```
|
||||
|
||||
In case you are using several cluster generators, each with the flatList option, one Application would be generated by cluster generator, as we can't simply merge values and templates that would potentially differ in each generator.
|
||||
@@ -1,9 +1,7 @@
|
||||
# Progressive Syncs
|
||||
|
||||
!!! warning "Alpha Feature (Since v2.6.0)"
|
||||
This is an experimental, [alpha-quality](https://github.com/argoproj/argoproj/blob/main/community/feature-status.md#alpha)
|
||||
feature that allows you to control the order in which the ApplicationSet controller will create or update the Applications
|
||||
owned by an ApplicationSet resource. It may be removed in future releases or modified in backwards-incompatible ways.
|
||||
!!! warning "Alpha Feature"
|
||||
This is an experimental, alpha-quality feature that allows you to control the order in which the ApplicationSet controller will create or update the Applications owned by an ApplicationSet resource. It may be removed in future releases or modified in backwards-incompatible ways.
|
||||
|
||||
## Use Cases
|
||||
The Progressive Syncs feature set is intended to be light and flexible. The feature only interacts with the health of managed Applications. It is not intended to support direct integrations with other Rollout controllers (such as the native ReplicaSet controller or Argo Rollouts).
|
||||
|
||||
@@ -107,8 +107,8 @@ data:
|
||||
- /spec/replicas
|
||||
|
||||
# Enable resource.customizations.ignoreResourceUpdates rules. If "false," those rules are not applied, and all updates
|
||||
# to resources are applied to the cluster cache. Default is true.
|
||||
resource.ignoreResourceUpdatesEnabled: "true"
|
||||
# to resources are applied to the cluster cache. Default is false.
|
||||
resource.ignoreResourceUpdatesEnabled: "false"
|
||||
|
||||
# Configuration to define customizations ignoring differences during watched resource updates to skip application reconciles.
|
||||
resource.customizations.ignoreResourceUpdates.all: |
|
||||
@@ -283,9 +283,6 @@ data:
|
||||
# - annotation+label : Also uses an annotation for tracking, but additionally labels the resource with the application name
|
||||
application.resourceTrackingMethod: annotation
|
||||
|
||||
# Optional installation id. Allows to have multiple installations of Argo CD in the same cluster.
|
||||
installationID: "my-unique-id"
|
||||
|
||||
# disables admin user. Admin is enabled by default
|
||||
admin.enabled: "false"
|
||||
# add an additional local user with apiKey and login capabilities
|
||||
|
||||
@@ -47,11 +47,8 @@ data:
|
||||
controller.log.level: "info"
|
||||
# Prometheus metrics cache expiration (disabled by default. e.g. 24h0m0s)
|
||||
controller.metrics.cache.expiration: "24h0m0s"
|
||||
# Specifies exponential backoff timeout parameters between application self heal attempts
|
||||
controller.self.heal.timeout.seconds: "2"
|
||||
controller.self.heal.backoff.factor: "3"
|
||||
controller.self.heal.backoff.cap.seconds: "300"
|
||||
|
||||
# Specifies timeout between application self heal attempts (default 5)
|
||||
controller.self.heal.timeout.seconds: "5"
|
||||
# Cache expiration for app state (default 1h0m0s)
|
||||
controller.app.state.cache.expiration: "1h0m0s"
|
||||
# Specifies if resource health should be persisted in app CRD (default true)
|
||||
@@ -174,8 +171,6 @@ data:
|
||||
reposerver.max.combined.directory.manifests.size: '10M'
|
||||
# Paths to be excluded from the tarball streamed to plugins. Separate with ;
|
||||
reposerver.plugin.tar.exclusions: ""
|
||||
# Enable the repo server to use the 'argocd.argoproj.io/manifest-generate-paths' annotation to guide manifest generation.
|
||||
reposerver.plugin.use.manifest.generate.paths: "false"
|
||||
# Allow repositories to contain symlinks that leave the boundaries of the repository.
|
||||
# Changing this to "true" will not allow _all_ out-of-bounds symlinks. Those will still be blocked for things like values
|
||||
# files in Helm charts. But symlinks which are not explicitly blocked by other checks will be allowed.
|
||||
@@ -244,10 +239,6 @@ data:
|
||||
applicationsetcontroller.enable.scm.providers: "false"
|
||||
# Number of webhook requests processed concurrently (default 50)
|
||||
applicationsetcontroller.webhook.parallelism.limit: "50"
|
||||
# Override the default requeue time for the controller. (default 3m)
|
||||
applicationsetcontroller.requeue.after: "3m"
|
||||
# Enable strict mode for tokenRef in ApplicationSet resources. When enabled, the referenced secret must have a label `argocd.argoproj.io/secret-type` with value `scm-creds`.
|
||||
applicationsetcontroller.enable.tokenref.strict.mode: "false"
|
||||
|
||||
## Argo CD Notifications Controller Properties
|
||||
# Set the logging level. One of: debug|info|warn|error (default "info")
|
||||
|
||||
@@ -12,7 +12,6 @@ stringData:
|
||||
url: https://github.com/argoproj/argocd-example-apps
|
||||
password: my-password
|
||||
username: my-username
|
||||
project: my-project
|
||||
insecure: "true" # Ignore validity of server's TLS certificate. Defaults to "false"
|
||||
forceHttpBasicAuth: "true" # Skip auth method negotiation and force usage of HTTP basic auth. Defaults to "false"
|
||||
enableLfs: "true" # Enable git-lfs for this repository. Defaults to "false"
|
||||
@@ -43,7 +42,6 @@ metadata:
|
||||
stringData:
|
||||
url: https://storage.googleapis.com/istio-prerelease/daily-build/master-latest-daily/charts
|
||||
name: istio.io
|
||||
project: my-project
|
||||
type: helm
|
||||
---
|
||||
apiVersion: v1
|
||||
|
||||
@@ -359,16 +359,6 @@ You can set it one of three ways:
|
||||
For option 1, the flag can be repeated multiple times. For option 2 and 3, you can specify multiple globs by separating
|
||||
them with semicolons.
|
||||
|
||||
## Application manifests generation using argocd.argoproj.io/manifest-generate-paths
|
||||
|
||||
To enhance the application manifests generation process, you can enable the use of the `argocd.argoproj.io/manifest-generate-paths` annotation. When this flag is enabled, the resources specified by this annotation will be passed to the CMP server for generating application manifests, rather than sending the entire repository. This can be particularly useful for monorepos.
|
||||
|
||||
You can set it one of three ways:
|
||||
|
||||
1. The `--plugin-use-manifest-generate-paths` argument on the repo server.
|
||||
2. The `reposerver.plugin.use.manifest.generate.paths` key if you are using `argocd-cmd-params-cm`
|
||||
3. Directly setting `ARGOCD_REPO_SERVER_PLUGIN_USE_MANIFEST_GENERATE_PATHS` environment variable on the repo server to `true`.
|
||||
|
||||
## Migrating from argocd-cm plugins
|
||||
|
||||
Installing plugins by modifying the argocd-cm ConfigMap is deprecated as of v2.4 and has been completely removed starting in v2.8.
|
||||
|
||||
@@ -30,7 +30,7 @@ For each specific kind of ConfigMap and Secret resource, there is only a single
|
||||
|------------------------------------------------------------------|-------------|--------------------------|
|
||||
| [`application.yaml`](../user-guide/application-specification.md) | Application | Example application spec |
|
||||
| [`project.yaml`](./project-specification.md) | AppProject | Example project spec |
|
||||
| [`argocd-repositories.yaml`](./argocd-repositories-yaml.md) | Secret | Repository credentials |
|
||||
| - | Secret | Repository credentials |
|
||||
|
||||
For `Application` and `AppProject` resources, the name of the resource equals the name of the application or project within Argo CD. This also means that application and project names are unique within a given Argo CD installation - you cannot have the same application name for two different applications.
|
||||
|
||||
@@ -176,7 +176,6 @@ spec:
|
||||
Repository details are stored in secrets. To configure a repo, create a secret which contains repository details.
|
||||
Consider using [bitnami-labs/sealed-secrets](https://github.com/bitnami-labs/sealed-secrets) to store an encrypted secret definition as a Kubernetes manifest.
|
||||
Each repository must have a `url` field and, depending on whether you connect using HTTPS, SSH, or GitHub App, `username` and `password` (for HTTPS), `sshPrivateKey` (for SSH), or `githubAppPrivateKey` (for GitHub App).
|
||||
Credentials can be scoped to a project using the optional `project` field. When omitted, the credential will be used as the default for all projects without a scoped credential.
|
||||
|
||||
!!!warning
|
||||
When using [bitnami-labs/sealed-secrets](https://github.com/bitnami-labs/sealed-secrets) the labels will be removed and have to be readded as described here: https://github.com/bitnami-labs/sealed-secrets#sealedsecrets-as-templates-for-secrets
|
||||
@@ -196,7 +195,6 @@ stringData:
|
||||
url: https://github.com/argoproj/private-repo
|
||||
password: my-password
|
||||
username: my-username
|
||||
project: my-project
|
||||
```
|
||||
|
||||
Example for SSH:
|
||||
@@ -567,8 +565,6 @@ execProviderConfig:
|
||||
}
|
||||
apiVersion: string
|
||||
installHint: string
|
||||
# Proxy URL for the kubernetes client to use when connecting to the cluster api server
|
||||
proxyUrl: string
|
||||
# Transport layer security configuration settings
|
||||
tlsClientConfig:
|
||||
# Base64 encoded PEM-encoded bytes (typically read from a client certificate file).
|
||||
@@ -583,8 +579,6 @@ tlsClientConfig:
|
||||
# certificates against. If ServerName is empty, the hostname used to contact the
|
||||
# server is used.
|
||||
serverName: string
|
||||
# Disable automatic compression for requests to the cluster
|
||||
disableCompression: boolean
|
||||
```
|
||||
|
||||
Note that if you specify a command to run under `execProviderConfig`, that command must be available in the Argo CD image. See [BYOI (Build Your Own Image)](custom_tools.md#byoi-build-your-own-image).
|
||||
@@ -791,7 +785,7 @@ the role can be appended to the `args` section like so:
|
||||
|
||||
```yaml
|
||||
...
|
||||
"args": ["aws", "--cluster-name", "my-eks-cluster", "--role-arn", "arn:aws:iam::<AWS_ACCOUNT_ID>:role/<IAM_ROLE_NAME>"],
|
||||
"args": ["aws", "--cluster-name", "my-eks-cluster", "--roleARN", "arn:aws:iam::<AWS_ACCOUNT_ID>:role/<IAM_ROLE_NAME>"],
|
||||
...
|
||||
```
|
||||
This construct can be used in conjunction with something like the External Secrets Operator to avoid storing the keys in
|
||||
@@ -1050,7 +1044,7 @@ stringData:
|
||||
|
||||
## Resource Exclusion/Inclusion
|
||||
|
||||
Resources can be excluded from discovery and sync so that Argo CD is unaware of them. For example, the apiGroup/kind `events.k8s.io/*`, `metrics.k8s.io/*` and `coordination.k8s.io/Lease` are always excluded. Use cases:
|
||||
Resources can be excluded from discovery and sync so that Argo CD is unaware of them. For example, the apiGroup/kind `events.k8s.io/*`, `metrics.k8s.io/*`, `coordination.k8s.io/Lease`, and `""/Endpoints` are always excluded. Use cases:
|
||||
|
||||
* You have temporal issues and you want to exclude problematic resources.
|
||||
* There are many of a kind of resources that impacts Argo CD's performance.
|
||||
|
||||
@@ -1,9 +1,5 @@
|
||||
# Dynamic Cluster Distribution
|
||||
|
||||
!!! warning "Alpha Feature (Since v2.9.0)"
|
||||
This is an experimental, [alpha-quality](https://github.com/argoproj/argoproj/blob/main/community/feature-status.md#alpha) feature.
|
||||
It may be removed in future releases or modified in backwards-incompatible ways.
|
||||
|
||||
*Current Status: [Alpha][1] (Since v2.9.0)*
|
||||
|
||||
By default, clusters are assigned to shards indefinitely. For users of the default, hash-based sharding algorithm, this
|
||||
@@ -53,3 +49,5 @@ The new sharding mechanism does not monitor the environment variable `ARGOCD_CON
|
||||
In the scenario when the number of Application Controller replicas increases, a new entry is added to the list of mappings in the `argocd-app-controller-shard-cm` ConfigMap and the cluster distribution is triggered to re-distribute the clusters.
|
||||
|
||||
In the scenario when the number of Application Controller replicas decreases, the mappings in the `argocd-app-controller-shard-cm` ConfigMap are reset and every controller acquires the shard again thus triggering the re-distribution of the clusters.
|
||||
|
||||
[1]: https://github.com/argoproj/argoproj/blob/master/community/feature-status.md
|
||||
|
||||
@@ -1,24 +0,0 @@
|
||||
# Feature Maturity
|
||||
|
||||
Argo CD features may be marked with a certain [status](https://github.com/argoproj/argoproj/blob/main/community/feature-status.md)
|
||||
to indicate their stability and maturity. These are the statuses of non-stable features in Argo CD:
|
||||
|
||||
| Feature | Introduced | Status |
|
||||
|-------------------------------------|------------|--------|
|
||||
| [Structured Merge Diff Strategy][1] | v2.5.0 | Beta |
|
||||
| [AppSet Progressive Syncs][2] | v2.6.0 | Alpha |
|
||||
| [Proxy Extensions][3] | v2.7.0 | Alpha |
|
||||
| [Skip Application Reconcile][4] | v2.7.0 | Alpha |
|
||||
| [AppSets in any Namespace][5] | v2.8.0 | Beta |
|
||||
| [Dynamic Cluster Distribution][6] | v2.9.0 | Alpha |
|
||||
| [Server Side Diff][7] | v2.10.0 | Beta |
|
||||
| [Service Account Impersonation][8] | v2.13.0 | Alpha |
|
||||
|
||||
[1]: ../user-guide/diff-strategies.md#structured-merge-diff
|
||||
[2]: applicationset/Progressive-Syncs.md
|
||||
[3]: ../developer-guide/extensions/proxy-extensions.md
|
||||
[4]: ../user-guide/skip_reconcile.md
|
||||
[5]: applicationset/Appset-Any-Namespace.md
|
||||
[6]: dynamic-cluster-distribution.md
|
||||
[7]: ../user-guide/diff-strategies.md#server-side-diff
|
||||
[8]: app-sync-using-impersonation.md
|
||||
@@ -277,9 +277,6 @@ spec:
|
||||
# ...
|
||||
```
|
||||
|
||||
!!! note
|
||||
If application manifest generation using the `argocd.argoproj.io/manifest-generate-paths` annotation feature is enabled, only the resources specified by this annotation will be sent to the CMP server for manifest generation, rather than the entire repository. To determine the appropriate resources, a common root path is calculated based on the paths provided in the annotation. The application path serves as the deepest path that can be selected as the root.
|
||||
|
||||
### Application Sync Timeout & Jitter
|
||||
|
||||
Argo CD has a timeout for application syncs. It will trigger a refresh for each application periodically when the timeout expires.
|
||||
|
||||
@@ -471,7 +471,7 @@ Once we create this service, we can configure the Ingress to conditionally route
|
||||
```
|
||||
|
||||
## [Istio](https://www.istio.io)
|
||||
You can put Argo CD behind Istio using following configurations. Here we will achieve both serving Argo CD behind istio and using subpath on Istio
|
||||
You can put Argo CD behind Istio using following configurations. Here we will achive both serving Argo CD behind istio and using subpath on Istio
|
||||
|
||||
First we need to make sure that we can run Argo CD with subpath (ie /argocd). For this we have used install.yaml from argocd project as is
|
||||
|
||||
|
||||
@@ -57,8 +57,8 @@ slack:
|
||||
"short": true
|
||||
},
|
||||
{
|
||||
"title": {{- if .app.spec.source }} "Repository" {{- else if .app.spec.sources }} "Repositories" {{- end }},
|
||||
"value": {{- if .app.spec.source }} ":arrow_heading_up: {{ .app.spec.source.repoURL }}" {{- else if .app.spec.sources }} "{{- range $index, $source := .app.spec.sources }}{{ if $index }}\n{{ end }}:arrow_heading_up: {{ $source.repoURL }}{{- end }}" {{- end }},
|
||||
"title": "Repository",
|
||||
"value": "{{.app.spec.source.repoURL}}",
|
||||
"short": true
|
||||
},
|
||||
{
|
||||
@@ -86,8 +86,8 @@ teams:
|
||||
"value": "{{.app.status.sync.status}}"
|
||||
},
|
||||
{
|
||||
"name": {{- if .app.spec.source }} "Repository" {{- else if .app.spec.sources }} "Repositories" {{- end }},
|
||||
"value": {{- if .app.spec.source }} ":arrow_heading_up: {{ .app.spec.source.repoURL }}" {{- else if .app.spec.sources }} "{{- range $index, $source := .app.spec.sources }}{{ if $index }}\n{{ end }}:arrow_heading_up: {{ $source.repoURL }}{{- end }}" {{- end }},
|
||||
"name": "Repository",
|
||||
"value": "{{.app.spec.source.repoURL}}"
|
||||
},
|
||||
{
|
||||
"name": "Revision",
|
||||
@@ -115,7 +115,7 @@ teams:
|
||||
"name":"Open Repository",
|
||||
"targets":[{
|
||||
"os":"default",
|
||||
"uri":{{- if .app.spec.source }} ":arrow_heading_up: {{ .app.spec.source.repoURL }}" {{- else if .app.spec.sources }} "{{- range $index, $source := .app.spec.sources }}{{ if $index }}\n{{ end }}:arrow_heading_up: {{ $source.repoURL }}{{- end }}" {{- end }},}}" {{- end }},
|
||||
"uri":"{{.app.spec.source.repoURL | call .repo.RepoURLToHTTPS}}"
|
||||
}]
|
||||
}]
|
||||
themeColor: '#000080'
|
||||
@@ -143,8 +143,8 @@ slack:
|
||||
"short": true
|
||||
},
|
||||
{
|
||||
"title": {{- if .app.spec.source }} "Repository" {{- else if .app.spec.sources }} "Repositories" {{- end }},
|
||||
"value": {{- if .app.spec.source }} ":arrow_heading_up: {{ .app.spec.source.repoURL }}" {{- else if .app.spec.sources }} "{{- range $index, $source := .app.spec.sources }}{{ if $index }}\n{{ end }}:arrow_heading_up: {{ $source.repoURL }}{{- end }}" {{- end }},
|
||||
"title": "Repository",
|
||||
"value": "{{.app.spec.source.repoURL}}",
|
||||
"short": true
|
||||
}
|
||||
{{range $index, $c := .app.status.conditions}}
|
||||
@@ -167,8 +167,8 @@ teams:
|
||||
"value": "{{.app.status.health.status}}"
|
||||
},
|
||||
{
|
||||
"name": {{- if .app.spec.source }} "Repository" {{- else if .app.spec.sources }} "Repositories" {{- end }},
|
||||
"value": {{- if .app.spec.source }} ":arrow_heading_up: {{ .app.spec.source.repoURL }}" {{- else if .app.spec.sources }} "{{- range $index, $source := .app.spec.sources }}{{ if $index }}\n{{ end }}:arrow_heading_up: {{ $source.repoURL }}{{- end }}" {{- end }},
|
||||
"name": "Repository",
|
||||
"value": "{{.app.spec.source.repoURL}}"
|
||||
}
|
||||
{{range $index, $c := .app.status.conditions}}
|
||||
,
|
||||
@@ -192,7 +192,7 @@ teams:
|
||||
"name":"Open Repository",
|
||||
"targets":[{
|
||||
"os":"default",
|
||||
"uri":{{- if .app.spec.source }} ":arrow_heading_up: {{ .app.spec.source.repoURL }}" {{- else if .app.spec.sources }} "{{- range $index, $source := .app.spec.sources }}{{ if $index }}\n{{ end }}:arrow_heading_up: {{ $source.repoURL }}{{- end }}" {{- end }},}}" {{- end }},
|
||||
"uri":"{{.app.spec.source.repoURL | call .repo.RepoURLToHTTPS}}"
|
||||
}]
|
||||
}]
|
||||
themeColor: '#FF0000'
|
||||
@@ -220,8 +220,8 @@ slack:
|
||||
"short": true
|
||||
},
|
||||
{
|
||||
"title": {{- if .app.spec.source }} "Repository" {{- else if .app.spec.sources }} "Repositories" {{- end }},
|
||||
"value": {{- if .app.spec.source }} ":arrow_heading_up: {{ .app.spec.source.repoURL }}" {{- else if .app.spec.sources }} "{{- range $index, $source := .app.spec.sources }}{{ if $index }}\n{{ end }}:arrow_heading_up: {{ $source.repoURL }}{{- end }}" {{- end }},
|
||||
"title": "Repository",
|
||||
"value": "{{.app.spec.source.repoURL}}",
|
||||
"short": true
|
||||
}
|
||||
{{range $index, $c := .app.status.conditions}}
|
||||
@@ -248,8 +248,8 @@ teams:
|
||||
"value": "{{.app.status.operationState.finishedAt}}"
|
||||
},
|
||||
{
|
||||
"name": {{- if .app.spec.source }} "Repository" {{- else if .app.spec.sources }} "Repositories" {{- end }},
|
||||
"value": {{- if .app.spec.source }} ":arrow_heading_up: {{ .app.spec.source.repoURL }}" {{- else if .app.spec.sources }} "{{- range $index, $source := .app.spec.sources }}{{ if $index }}\n{{ end }}:arrow_heading_up: {{ $source.repoURL }}{{- end }}" {{- end }},
|
||||
"name": "Repository",
|
||||
"value": "{{.app.spec.source.repoURL}}"
|
||||
}
|
||||
{{range $index, $c := .app.status.conditions}}
|
||||
,
|
||||
@@ -273,7 +273,7 @@ teams:
|
||||
"name":"Open Repository",
|
||||
"targets":[{
|
||||
"os":"default",
|
||||
"uri":{{- if .app.spec.source }} ":arrow_heading_up: {{ .app.spec.source.repoURL }}" {{- else if .app.spec.sources }} "{{- range $index, $source := .app.spec.sources }}{{ if $index }}\n{{ end }}:arrow_heading_up: {{ $source.repoURL }}{{- end }}" {{- end }},}}" {{- end }},
|
||||
"uri":"{{.app.spec.source.repoURL | call .repo.RepoURLToHTTPS}}"
|
||||
}]
|
||||
}]
|
||||
themeColor: '#FF0000'
|
||||
@@ -301,8 +301,8 @@ slack:
|
||||
"short": true
|
||||
},
|
||||
{
|
||||
"title": {{- if .app.spec.source }} "Repository" {{- else if .app.spec.sources }} "Repositories" {{- end }},
|
||||
"value": {{- if .app.spec.source }} ":arrow_heading_up: {{ .app.spec.source.repoURL }}" {{- else if .app.spec.sources }} "{{- range $index, $source := .app.spec.sources }}{{ if $index }}\n{{ end }}:arrow_heading_up: {{ $source.repoURL }}{{- end }}" {{- end }},
|
||||
"title": "Repository",
|
||||
"value": "{{.app.spec.source.repoURL}}",
|
||||
"short": true
|
||||
}
|
||||
{{range $index, $c := .app.status.conditions}}
|
||||
@@ -329,8 +329,8 @@ teams:
|
||||
"value": "{{.app.status.operationState.startedAt}}"
|
||||
},
|
||||
{
|
||||
"name": {{- if .app.spec.source }} "Repository" {{- else if .app.spec.sources }} "Repositories" {{- end }},
|
||||
"value": {{- if .app.spec.source }} ":arrow_heading_up: {{ .app.spec.source.repoURL }}" {{- else if .app.spec.sources }} "{{- range $index, $source := .app.spec.sources }}{{ if $index }}\n{{ end }}:arrow_heading_up: {{ $source.repoURL }}{{- end }}" {{- end }},
|
||||
"name": "Repository",
|
||||
"value": "{{.app.spec.source.repoURL}}"
|
||||
}
|
||||
{{range $index, $c := .app.status.conditions}}
|
||||
,
|
||||
@@ -354,7 +354,7 @@ teams:
|
||||
"name":"Open Repository",
|
||||
"targets":[{
|
||||
"os":"default",
|
||||
"uri":{{- if .app.spec.source }} ":arrow_heading_up: {{ .app.spec.source.repoURL }}" {{- else if .app.spec.sources }} "{{- range $index, $source := .app.spec.sources }}{{ if $index }}\n{{ end }}:arrow_heading_up: {{ $source.repoURL }}{{- end }}" {{- end }},}}" {{- end }},
|
||||
"uri":"{{.app.spec.source.repoURL | call .repo.RepoURLToHTTPS}}"
|
||||
}]
|
||||
}]
|
||||
title: Start syncing application {{.app.metadata.name}}.
|
||||
@@ -386,8 +386,8 @@ slack:
|
||||
"short": true
|
||||
},
|
||||
{
|
||||
"title": {{- if .app.spec.source }} "Repository" {{- else if .app.spec.sources }} "Repositories" {{- end }},
|
||||
"value": {{- if .app.spec.source }} ":arrow_heading_up: {{ .app.spec.source.repoURL }}" {{- else if .app.spec.sources }} "{{- range $index, $source := .app.spec.sources }}{{ if $index }}\n{{ end }}:arrow_heading_up: {{ $source.repoURL }}{{- end }}" {{- end }},
|
||||
"title": "Repository",
|
||||
"value": "{{.app.spec.source.repoURL}}",
|
||||
"short": true
|
||||
}
|
||||
{{range $index, $c := .app.status.conditions}}
|
||||
@@ -410,8 +410,8 @@ teams:
|
||||
"value": "{{.app.status.sync.status}}"
|
||||
},
|
||||
{
|
||||
"name": {{- if .app.spec.source }} "Repository" {{- else if .app.spec.sources }} "Repositories" {{- end }},
|
||||
"value": {{- if .app.spec.source }} ":arrow_heading_up: {{ .app.spec.source.repoURL }}" {{- else if .app.spec.sources }} "{{- range $index, $source := .app.spec.sources }}{{ if $index }}\n{{ end }}:arrow_heading_up: {{ $source.repoURL }}{{- end }}" {{- end }},
|
||||
"name": "Repository",
|
||||
"value": "{{.app.spec.source.repoURL}}"
|
||||
}
|
||||
{{range $index, $c := .app.status.conditions}}
|
||||
,
|
||||
@@ -435,7 +435,7 @@ teams:
|
||||
"name":"Open Repository",
|
||||
"targets":[{
|
||||
"os":"default",
|
||||
"uri":{{- if .app.spec.source }} ":arrow_heading_up: {{ .app.spec.source.repoURL }}" {{- else if .app.spec.sources }} "{{- range $index, $source := .app.spec.sources }}{{ if $index }}\n{{ end }}:arrow_heading_up: {{ $source.repoURL }}{{- end }}" {{- end }},}}" {{- end }},
|
||||
"uri":"{{.app.spec.source.repoURL | call .repo.RepoURLToHTTPS}}"
|
||||
}]
|
||||
}]
|
||||
title: Application {{.app.metadata.name}} sync status is 'Unknown'
|
||||
@@ -462,8 +462,8 @@ slack:
|
||||
"short": true
|
||||
},
|
||||
{
|
||||
"title": {{- if .app.spec.source }} "Repository" {{- else if .app.spec.sources }} "Repositories" {{- end }},
|
||||
"value": {{- if .app.spec.source }} ":arrow_heading_up: {{ .app.spec.source.repoURL }}" {{- else if .app.spec.sources }} "{{- range $index, $source := .app.spec.sources }}{{ if $index }}\n{{ end }}:arrow_heading_up: {{ $source.repoURL }}{{- end }}" {{- end }},
|
||||
"title": "Repository",
|
||||
"value": "{{.app.spec.source.repoURL}}",
|
||||
"short": true
|
||||
}
|
||||
{{range $index, $c := .app.status.conditions}}
|
||||
@@ -490,8 +490,8 @@ teams:
|
||||
"value": "{{.app.status.operationState.finishedAt}}"
|
||||
},
|
||||
{
|
||||
"name": {{- if .app.spec.source }} "Repository" {{- else if .app.spec.sources }} "Repositories" {{- end }},
|
||||
"value": {{- if .app.spec.source }} ":arrow_heading_up: {{ .app.spec.source.repoURL }}" {{- else if .app.spec.sources }} "{{- range $index, $source := .app.spec.sources }}{{ if $index }}\n{{ end }}:arrow_heading_up: {{ $source.repoURL }}{{- end }}" {{- end }},
|
||||
"name": "Repository",
|
||||
"value": "{{.app.spec.source.repoURL}}"
|
||||
}
|
||||
{{range $index, $c := .app.status.conditions}}
|
||||
,
|
||||
@@ -515,7 +515,7 @@ teams:
|
||||
"name":"Open Repository",
|
||||
"targets":[{
|
||||
"os":"default",
|
||||
"uri":{{- if .app.spec.source }} ":arrow_heading_up: {{ .app.spec.source.repoURL }}" {{- else if .app.spec.sources }} "{{- range $index, $source := .app.spec.sources }}{{ if $index }}\n{{ end }}:arrow_heading_up: {{ $source.repoURL }}{{- end }}" {{- end }},}}" {{- end }},
|
||||
"uri":"{{.app.spec.source.repoURL | call .repo.RepoURLToHTTPS}}"
|
||||
}]
|
||||
}]
|
||||
themeColor: '#000080'
|
||||
|
||||
@@ -2,34 +2,28 @@
|
||||
|
||||
To be able to send notifications with argocd-notifications you have to create an [API Integration](https://docs.opsgenie.com/docs/integrations-overview) inside your [Opsgenie Team](https://docs.opsgenie.com/docs/teams).
|
||||
|
||||
1. Login to Opsgenie at https://app.opsgenie.com or https://app.eu.opsgenie.com (if you have an account in the European Union).
|
||||
2. Make sure you already have a team; if not, follow this guide: https://docs.opsgenie.com/docs/teams.
|
||||
3. Click "Teams" in the Menu on the left.
|
||||
4. Select the team that you want to notify.
|
||||
5. In the team's configuration menu, select "Integrations".
|
||||
6. Click "Add Integration" in the top right corner.
|
||||
7. Select "API" integration.
|
||||
8. Give your integration a name, copy the "API key", and save it somewhere for later.
|
||||
9. Click "Edit" in the integration settings.
|
||||
10. Make sure the checkbox for "Create and Update Access" is selected; disable the other checkboxes to remove unnecessary permissions.
|
||||
11. Click "Save" at the bottom.
|
||||
12. Click "Turn on integration" in the top right corner.
|
||||
13. Check your browser for the correct server apiURL. If it is "app.opsgenie.com", then use the US/international API URL `api.opsgenie.com`; otherwise, use `api.eu.opsgenie.com` (European API).
|
||||
14. You are finished with configuring Opsgenie. Now you need to configure argocd-notifications. Use the apiUrl, the team name, and the apiKey to configure the Opsgenie integration in the `argocd-notifications-secret` secret.
|
||||
15. You can find the example `argocd-notifications-cm` configuration below.
|
||||
1. Login to Opsgenie at https://app.opsgenie.com or https://app.eu.opsgenie.com (if you have an account in the european union)
|
||||
2. Make sure you already have a team, if not follow this guide https://docs.opsgenie.com/docs/teams
|
||||
3. Click "Teams" in the Menu on the left
|
||||
4. Select the team that you want to notify
|
||||
5. In the teams configuration menu select "Integrations"
|
||||
6. Click "Add Integration" in the top right corner
|
||||
7. Select "API" integration
|
||||
8. Give your integration a name, copy the "API key" and safe it somewhere for later
|
||||
9. Click "Edit" in the integration settings
|
||||
10. Make sure the checkbox for "Create and Update Access" is selected, disable the other checkboxes to remove unnecessary permissions
|
||||
11. Click "Save" at the bottom
|
||||
12. Click "Turn on integration" in the top right corner
|
||||
13. Check your browser for the correct server apiURL. If it is "app.opsgenie.com" then use the US/international api url `api.opsgenie.com` in the next step, otherwise use `api.eu.opsgenie.com` (European API).
|
||||
14. You are finished with configuring Opsgenie. Now you need to configure argocd-notifications. Use the apiUrl, the team name and the apiKey to configure the Opsgenie integration in the `argocd-notifications-secret` secret.
|
||||
15. You can find the example `argocd-notifications-cm` configuration at the below.
|
||||
|
||||
| **Option** | **Required** | **Type** | **Description** | **Example** |
|
||||
| ------------- | ------------ | -------- | -------------------------------------------------------------------------------------------------------- | -------------------------------- |
|
||||
| `description` | True | `string` | Description field of the alert that is generally used to provide detailed information about the alert. | `Hello from Argo CD!` |
|
||||
| `priority` | False | `string` | Priority level of the alert. Possible values are P1, P2, P3, P4, and P5. Default value is P3. | `P1` |
|
||||
| `description` | True | `string` | Description field of the alert that is generally used to provide a detailed information about the alert. | `Hello from Argo CD!` |
|
||||
| `priority` | False | `string` | Priority level of the alert. Possible values are P1, P2, P3, P4 and P5. Default value is P3. | `P1` |
|
||||
| `alias` | False | `string` | Client-defined identifier of the alert, that is also the key element of Alert De-Duplication. | `Life is too short for no alias` |
|
||||
| `note` | False | `string` | Additional note that will be added while creating the alert. | `Error from Argo CD!` |
|
||||
| `actions` | False | `[]string` | Custom actions that will be available for the alert. | `["Resolve", "Escalate"]` |
|
||||
| `tags` | False | `[]string` | Tags of the alert. | `["critical", "deployment"]` |
|
||||
| `visibleTo` | False | `[]alert.Responder` | Teams and users that the alert will become visible to without sending any notification. The `type` field is mandatory for each item, where possible values are `team` and `user`. In addition to the `type` field, either `id` or `name` should be provided for teams, and either `id` or `username` should be given for users. Please note that alerts will be visible to the teams specified within the `responders` field by default, so there is no need to re-specify them in the `visibleTo` field. | `[{Type: "team", Id: "team_id"}, {Type: "user", Id: "user_id"}]` |
|
||||
| `details` | False | `map[string]string` | Map of key-value pairs to use as custom properties of the alert. | `{"environment": "production", "service": "web"}` |
|
||||
| `entity` | False | `string` | Entity field of the alert that is generally used to specify which domain the alert is related to. | `web-server` |
|
||||
| `user` | False | `string` | Display name of the request owner. | `admin_user` |
|
||||
| `note` | False | `string` | Additional note that will be added while creating the alert. | `Error from Argo CD!` |
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
@@ -53,26 +47,6 @@ data:
|
||||
priority: P1
|
||||
alias: {{.app.metadata.name}}
|
||||
note: Error from Argo CD!
|
||||
actions:
|
||||
- Restart
|
||||
- AnExampleAction
|
||||
tags:
|
||||
- OverwriteQuietHours
|
||||
- Critical
|
||||
visibleTo:
|
||||
- Id: "{{.app.metadata.responderId}}"
|
||||
Type: "team"
|
||||
- Name: "rocket_team"
|
||||
Type: "team"
|
||||
- Id: "{{.app.metadata.responderUserId}}"
|
||||
Type: "user"
|
||||
- Username: "trinity@opsgenie.com"
|
||||
Type: "user"
|
||||
details:
|
||||
environment: production
|
||||
service: web
|
||||
entity: Argo CD Application
|
||||
user: John Doe
|
||||
trigger.on-a-problem: |
|
||||
- description: Application has a problem.
|
||||
send:
|
||||
@@ -80,11 +54,11 @@ data:
|
||||
when: app.status.health.status == 'Degraded' or app.status.operationState.phase in ['Error', 'Failed'] or app.status.sync.status == 'Unknown'
|
||||
```
|
||||
|
||||
16. Add annotation in the application YAML file to enable notifications for a specific Argo CD app.
|
||||
16. Add annotation in application yaml file to enable notifications for specific Argo CD app.
|
||||
```yaml
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: Application
|
||||
metadata:
|
||||
annotations:
|
||||
notifications.argoproj.io/subscribe.on-a-problem.opsgenie: <your-team>
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: Application
|
||||
metadata:
|
||||
annotations:
|
||||
notifications.argoproj.io/subscribe.on-a-problem.opsgenie: <your-team>
|
||||
```
|
||||
@@ -21,11 +21,11 @@ The Slack notification service configuration includes following settings:
|
||||
|
||||
1. Create Slack Application using https://api.slack.com/apps?new_app=1
|
||||

|
||||
1. Once application is created navigate to `OAuth & Permissions`
|
||||
1. Once application is created navigate to `Enter OAuth & Permissions`
|
||||

|
||||
1. Go to `Scopes` > `Bot Token Scopes` > `Add an OAuth Scope`. Add `chat:write` scope. To use the optional username and icon overrides in the Slack notification service also add the `chat:write.customize` scope.
|
||||
1. Click `Permissions` under `Add features and functionality` section and add `chat:write` scope. To use the optional username and icon overrides in the Slack notification service also add the `chat:write.customize` scope.
|
||||

|
||||
1. `OAuth & Permission` > `OAuth Tokens for Your Workspace` > `Install to Workspace`
|
||||
1. Scroll back to the top, click 'Install App to Workspace' button and confirm the installation.
|
||||

|
||||
1. Once installation is completed copy the OAuth token.
|
||||

|
||||
@@ -117,35 +117,6 @@ template.app-sync-status: |
|
||||
}]
|
||||
```
|
||||
|
||||
If you want to specify an icon and username for each message, you can specify values for `username` and `icon` in the `slack` field.
|
||||
For icon you can specify emoji and image URL, just like in the service definition.
|
||||
If you set `username` and `icon` in template, the values set in template will be used even if values are specified in the service definition.
|
||||
|
||||
```yaml
|
||||
template.app-sync-status: |
|
||||
message: |
|
||||
Application {{.app.metadata.name}} sync is {{.app.status.sync.status}}.
|
||||
Application details: {{.context.argocdUrl}}/applications/{{.app.metadata.name}}.
|
||||
slack:
|
||||
username: "testbot"
|
||||
icon: https://example.com/image.png
|
||||
attachments: |
|
||||
[{
|
||||
"title": "{{.app.metadata.name}}",
|
||||
"title_link": "{{.context.argocdUrl}}/applications/{{.app.metadata.name}}",
|
||||
"color": "#18be52",
|
||||
"fields": [{
|
||||
"title": "Sync Status",
|
||||
"value": "{{.app.status.sync.status}}",
|
||||
"short": true
|
||||
}, {
|
||||
"title": "Repository",
|
||||
"value": "{{.app.spec.source.repoURL}}",
|
||||
"short": true
|
||||
}]
|
||||
}]
|
||||
```
|
||||
|
||||
The messages can be aggregated to the slack threads by grouping key which can be specified in a `groupingKey` string field under `slack` field.
|
||||
`groupingKey` is used across each template and works independently on each slack channel.
|
||||
When multiple applications will be updated at the same time or frequently, the messages in slack channel can be easily read by aggregating with git commit hash, application name, etc.
|
||||
|
||||
@@ -27,66 +27,24 @@ metadata:
|
||||
data:
|
||||
service.slack: |
|
||||
token: $slack-token
|
||||
icon: ":rocket:" # <- diff here
|
||||
icon: ":rocket:"
|
||||
```
|
||||
|
||||
### service type 'xxxx' is not supported
|
||||
|
||||
Check the `argocd-notifications` controller version. For example, the Teams integration support started in `v1.1.0`.
|
||||
You need to check your argocd-notifications controller version. For instance, the teams integration is to support `v1.1.0` and more.
|
||||
|
||||
## Failed to notify recipient
|
||||
|
||||
### notification service 'xxxx' is not supported
|
||||
|
||||
You have not defined `xxxx` in `argocd-notifications-cm` or parsing failed.
|
||||
|
||||
### GitHub.repoURL (\u003cno value\u003e) does not have a / using the configuration
|
||||
|
||||
Likely caused by an Application with [multiple sources](https://argo-cd.readthedocs.io/en/stable/user-guide/multiple_sources/):
|
||||
|
||||
```yaml
|
||||
spec:
|
||||
sources: # <- multiple sources
|
||||
- repoURL: https://github.com/exampleOrg/first.git
|
||||
path: sources/example
|
||||
- repoURL: https://github.com/exampleOrg/second.git
|
||||
targetRevision: "{{branch}}"
|
||||
```
|
||||
|
||||
The standard notification template only supports a single source (`{{.app.spec.source.repoURL}}`). Use an index to specify the source in the array:
|
||||
|
||||
```yaml
|
||||
template.example: |
|
||||
github:
|
||||
repoURLPath: "{{ (index .app.spec.sources 0).repoURL }}"
|
||||
```
|
||||
|
||||
### Error message `POST https://api.github.com/repos/xxxx/yyyy/statuses/: 404 Not Found`
|
||||
|
||||
This case is similar to the previous one, you have multiple sources in the Application manifest.
|
||||
Default `revisionPath` template `{{.app.status.operationState.syncResult.revision}}` is for an Application with single source.
|
||||
|
||||
Multi-source applications report application statuses in an array:
|
||||
|
||||
```yaml
|
||||
status:
|
||||
operationState:
|
||||
syncResult:
|
||||
revisions:
|
||||
- 38cfa22edf9148caabfecb288bfb47dc4352dfc6
|
||||
- 38cfa22edf9148caabfecb288bfb47dc4352dfc6
|
||||
Quick fix for this is to use `index` function to get the first revision:
|
||||
```yaml
|
||||
template.example: |
|
||||
github:
|
||||
revisionPath: "{{index .app.status.operationState.syncResult.revisions 0}}"
|
||||
```
|
||||
You have not defined `xxxx` in `argocd-notifications-cm` or to fail to parse settings.
|
||||
|
||||
## config referenced xxx, but key does not exist in secret
|
||||
|
||||
- If you are using a custom secret, check that the secret is in the same namespace
|
||||
- You have added the label: `app.kubernetes.io/part-of: argocd` to the secret
|
||||
- You have tried restarting `argocd-notifications` controller
|
||||
- You have tried restarting argocd-notifications controller
|
||||
|
||||
### Example:
|
||||
Secret:
|
||||
|
||||
@@ -65,7 +65,7 @@ configuration.
|
||||
**Example**
|
||||
```bash
|
||||
kubectl exec -it argocd-notifications-controller-<pod-hash> \
|
||||
/usr/local/bin/argocd admin notifications trigger get
|
||||
/app/argocd admin notifications trigger get
|
||||
```
|
||||
|
||||
## Commands
|
||||
|
||||
@@ -11,21 +11,12 @@ When a resource update is ignored, if the resource's [health status](./health.md
|
||||
|
||||
## System-Level Configuration
|
||||
|
||||
By default, `resource.ignoreResourceUpdatesEnabled` is set to `true`, enabling Argo CD to ignore resource updates. This default setting ensures that Argo CD maintains sustainable performance by reducing unnecessary reconcile operations. If you need to alter this behavior, you can explicitly set `resource.ignoreResourceUpdatesEnabled` to `false` in the `argocd-cm` ConfigMap:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: argocd-cm
|
||||
namespace: argocd
|
||||
data:
|
||||
resource.ignoreResourceUpdatesEnabled: "false"
|
||||
```
|
||||
|
||||
Argo CD allows ignoring resource updates at a specific JSON path, using [RFC6902 JSON patches](https://tools.ietf.org/html/rfc6902) and [JQ path expressions](https://stedolan.github.io/jq/manual/#path(path_expression)). It can be configured for a specified group and kind
|
||||
in `resource.customizations` key of the `argocd-cm` ConfigMap.
|
||||
|
||||
!!!important "Enabling the feature"
|
||||
The feature is behind a flag. To enable it, set `resource.ignoreResourceUpdatesEnabled` to `"true"` in the `argocd-cm` ConfigMap.
|
||||
|
||||
Following is an example of a customization which ignores the `refreshTime` status field of an [`ExternalSecret`](https://external-secrets.io/main/api/externalsecret/) resource:
|
||||
|
||||
```yaml
|
||||
@@ -119,7 +110,7 @@ data:
|
||||
jqPathExpressions:
|
||||
# Ignore lastTransitionTime for conditions; helpful when SharedResourceWarnings are being regularly updated but not
|
||||
# actually changing in content.
|
||||
- .status?.conditions[]?.lastTransitionTime
|
||||
- .status.conditions[].lastTransitionTime
|
||||
```
|
||||
|
||||
## Ignoring updates for untracked resources
|
||||
|
||||
10
docs/operator-manual/resource_actions_builtin.md
generated
10
docs/operator-manual/resource_actions_builtin.md
generated
@@ -33,16 +33,6 @@
|
||||
- [notification.toolkit.fluxcd.io/Receiver/reconcile](https://github.com/argoproj/argo-cd/blob/master/resource_customizations/notification.toolkit.fluxcd.io/Receiver/actions/reconcile/action.lua)
|
||||
- [notification.toolkit.fluxcd.io/Receiver/resume](https://github.com/argoproj/argo-cd/blob/master/resource_customizations/notification.toolkit.fluxcd.io/Receiver/actions/resume/action.lua)
|
||||
- [notification.toolkit.fluxcd.io/Receiver/suspend](https://github.com/argoproj/argo-cd/blob/master/resource_customizations/notification.toolkit.fluxcd.io/Receiver/actions/suspend/action.lua)
|
||||
- [numaflow.numaproj.io/MonoVertex/pause](https://github.com/argoproj/argo-cd/blob/master/resource_customizations/numaflow.numaproj.io/MonoVertex/actions/pause/action.lua)
|
||||
- [numaflow.numaproj.io/MonoVertex/unpause](https://github.com/argoproj/argo-cd/blob/master/resource_customizations/numaflow.numaproj.io/MonoVertex/actions/unpause/action.lua)
|
||||
- [numaflow.numaproj.io/Pipeline/pause](https://github.com/argoproj/argo-cd/blob/master/resource_customizations/numaflow.numaproj.io/Pipeline/actions/pause/action.lua)
|
||||
- [numaflow.numaproj.io/Pipeline/unpause](https://github.com/argoproj/argo-cd/blob/master/resource_customizations/numaflow.numaproj.io/Pipeline/actions/unpause/action.lua)
|
||||
- [numaplane.numaproj.io/MonoVertexRollout/pause](https://github.com/argoproj/argo-cd/blob/master/resource_customizations/numaplane.numaproj.io/MonoVertexRollout/actions/pause/action.lua)
|
||||
- [numaplane.numaproj.io/MonoVertexRollout/unpause](https://github.com/argoproj/argo-cd/blob/master/resource_customizations/numaplane.numaproj.io/MonoVertexRollout/actions/unpause/action.lua)
|
||||
- [numaplane.numaproj.io/PipelineRollout/allow-data-loss](https://github.com/argoproj/argo-cd/blob/master/resource_customizations/numaplane.numaproj.io/PipelineRollout/actions/allow-data-loss/action.lua)
|
||||
- [numaplane.numaproj.io/PipelineRollout/disallow-data-loss](https://github.com/argoproj/argo-cd/blob/master/resource_customizations/numaplane.numaproj.io/PipelineRollout/actions/disallow-data-loss/action.lua)
|
||||
- [numaplane.numaproj.io/PipelineRollout/pause](https://github.com/argoproj/argo-cd/blob/master/resource_customizations/numaplane.numaproj.io/PipelineRollout/actions/pause/action.lua)
|
||||
- [numaplane.numaproj.io/PipelineRollout/unpause](https://github.com/argoproj/argo-cd/blob/master/resource_customizations/numaplane.numaproj.io/PipelineRollout/actions/unpause/action.lua)
|
||||
- [source.toolkit.fluxcd.io/Bucket/reconcile](https://github.com/argoproj/argo-cd/blob/master/resource_customizations/source.toolkit.fluxcd.io/Bucket/actions/reconcile/action.lua)
|
||||
- [source.toolkit.fluxcd.io/Bucket/resume](https://github.com/argoproj/argo-cd/blob/master/resource_customizations/source.toolkit.fluxcd.io/Bucket/actions/resume/action.lua)
|
||||
- [source.toolkit.fluxcd.io/Bucket/suspend](https://github.com/argoproj/argo-cd/blob/master/resource_customizations/source.toolkit.fluxcd.io/Bucket/actions/suspend/action.lua)
|
||||
|
||||
@@ -31,7 +31,6 @@ argocd-application-controller [flags]
|
||||
--default-cache-expiration duration Cache expiration default (default 24h0m0s)
|
||||
--disable-compression If true, opt-out of response compression for all requests to the server
|
||||
--dynamic-cluster-distribution-enabled Enables dynamic cluster distribution.
|
||||
--enable-k8s-event none Enable ArgoCD to use k8s event. For disabling all events, set the value as none. (e.g --enable-k8s-event=none), For enabling specific events, set the value as `event reason`. (e.g --enable-k8s-event=StatusRefreshed,ResourceCreated) (default [all])
|
||||
--gloglevel int Set the glog logging level
|
||||
-h, --help help for argocd-application-controller
|
||||
--ignore-normalizer-jq-execution-timeout-seconds duration Set ignore normalizer JQ execution timeout
|
||||
@@ -67,10 +66,7 @@ argocd-application-controller [flags]
|
||||
--repo-server-strict-tls Whether to use strict validation of the TLS cert presented by the repo server
|
||||
--repo-server-timeout-seconds int Repo server RPC call timeout seconds. (default 60)
|
||||
--request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
|
||||
--self-heal-backoff-cap-seconds int Specifies max timeout of exponential backoff between application self heal attempts (default 300)
|
||||
--self-heal-backoff-factor int Specifies factor of exponential timeout between application self heal attempts (default 3)
|
||||
--self-heal-backoff-timeout-seconds int Specifies initial timeout of exponential backoff between self heal attempts (default 2)
|
||||
--self-heal-timeout-seconds int Specifies timeout between application self heal attempts
|
||||
--self-heal-timeout-seconds int Specifies timeout between application self heal attempts (default 5)
|
||||
--sentinel stringArray Redis sentinel hostname and port (e.g. argocd-redis-ha-announce-0:6379).
|
||||
--sentinelmaster string Redis sentinel master group name. (default "master")
|
||||
--server string The address and port of the Kubernetes API server
|
||||
|
||||
@@ -35,7 +35,6 @@ argocd-repo-server [flags]
|
||||
--otlp-insecure OpenTelemetry collector insecure mode (default true)
|
||||
--parallelismlimit int Limit on number of concurrent manifests generate requests. Any value less the 1 means no limit.
|
||||
--plugin-tar-exclude stringArray Globs to filter when sending tarballs to plugins.
|
||||
--plugin-use-manifest-generate-paths Pass the resources described in argocd.argoproj.io/manifest-generate-paths value to the cmpserver to generate the application manifests.
|
||||
--port int Listen on given port for incoming connections (default 8081)
|
||||
--redis string Redis server hostname and port (e.g. argocd-redis:6379).
|
||||
--redis-ca-certificate string Path to Redis server CA certificate (e.g. /etc/certs/redis/ca.crt). If not specified, system trusted CAs will be used for server certificate validation.
|
||||
|
||||
@@ -51,7 +51,6 @@ argocd-server [flags]
|
||||
--disable-auth Disable client authentication
|
||||
--disable-compression If true, opt-out of response compression for all requests to the server
|
||||
--enable-gzip Enable GZIP compression (default true)
|
||||
--enable-k8s-event none Enable ArgoCD to use k8s event. For disabling all events, set the value as none. (e.g --enable-k8s-event=none), For enabling specific events, set the value as `event reason`. (e.g --enable-k8s-event=StatusRefreshed,ResourceCreated) (default [all])
|
||||
--enable-proxy-extension Enable Proxy Extension feature
|
||||
--gloglevel int Set the glog logging level
|
||||
-h, --help help for argocd-server
|
||||
|
||||
@@ -1,2 +1,5 @@
|
||||
This page is populated for released Argo CD versions. Use the version selector to view this table for a specific
|
||||
version.
|
||||
| Argo CD version | Kubernetes versions |
|
||||
|-----------------|---------------------|
|
||||
| 2.13 | |
|
||||
| 2.12 | |
|
||||
| 2.11 | v1.29, v1.28, v1.27, v1.26, v1.25 |
|
||||
|
||||
@@ -70,4 +70,4 @@ The default extension for log files generated by Argo CD when using the "Downloa
|
||||
If you have any custom scripts or tools that depend on the `.txt` extension, please update them accordingly.
|
||||
## Added proxy to kustomize
|
||||
|
||||
Proxy config set on repository credentials / repository templates is now passed down to the `kustomize build` command.
|
||||
Proxy config set on repository credentials / repository templates is now passed down to the `kustomie build` command.
|
||||
|
||||
@@ -135,7 +135,7 @@ First, create the OIDC integration:
|
||||

|
||||
1. Update the following:
|
||||
1. `App Integration name` and `Logo` - set these to suit your needs; they'll be displayed in the Okta catalogue.
|
||||
1. `Sign-in redirect URLs`: Add `https://argocd.example.com/auth/callback`; replacing `argocd.example.com` with your ArgoCD web interface URL.
|
||||
1. `Sign-in redirect URLs`: Add `https://argocd.example.com/auth/callback`; replacing `argocd.example.com` with your ArgoCD web interface URL. Also add `http://localhost:8085/auth/callback` if you would like to be able to login with the CLI.
|
||||
1. `Sign-out redirect URIs`: Add `https://argocd.example.com`; substituting the correct domain name as above.
|
||||
1. Either assign groups, or choose to skip this step for now.
|
||||
1. Leave the rest of the options as-is, and save the integration.
|
||||
@@ -170,25 +170,6 @@ Next, create a custom Authorization server:
|
||||

|
||||
1. Finally, click `Back to Authorization Servers`, and copy the `Issuer URI`. You will need this later.
|
||||
|
||||
### CLI login
|
||||
|
||||
In order to login with the CLI `argocd login https://argocd.example.com --sso`, Okta requires a separate dedicated App Integration:
|
||||
|
||||
1. Create a new `Create App Integration`, and choose `OIDC`, and then `Single-Page Application`.
|
||||
1. Update the following:
|
||||
1. `App Integration name` and `Logo` - set these to suit your needs; they'll be displayed in the Okta catalogue.
|
||||
1. `Sign-in redirect URLs`: Add `http://localhost:8085/auth/callback`.
|
||||
1. `Sign-out redirect URIs`: Add `http://localhost:8085`.
|
||||
1. Either assign groups, or choose to skip this step for now.
|
||||
1. Leave the rest of the options as-is, and save the integration.
|
||||
1. Copy the `Client ID` from the newly created app; `cliClientID: <Client ID>` will be used in your `argocd-cm` ConfigMap.
|
||||
1. Edit your Authorization Server `Access Policies`:
|
||||
1. Navigate to the Okta API Management at `Security > API`.
|
||||
1. Choose your existing `Authorization Server` that was created previously.
|
||||
1. Click `Access Policies` > `Edit Policy`.
|
||||
1. Assign your newly created `App Integration` by filling in the text box and clicking `Update Policy`.
|
||||

|
||||
|
||||
If you haven't yet created Okta groups, and assigned them to the application integration, you should do that now:
|
||||
|
||||
1. Go to `Directory > Groups`
|
||||
@@ -209,7 +190,6 @@ oidc.config: |
|
||||
# this is the authorization server URI
|
||||
issuer: https://example.okta.com/oauth2/aus9abcdefgABCDEFGd7
|
||||
clientID: 0oa9abcdefgh123AB5d7
|
||||
cliClientID: gfedcba0987654321GEFDCBA # Optional if using the CLI for SSO
|
||||
clientSecret: ABCDEFG1234567890abcdefg
|
||||
requestedScopes: ["openid", "profile", "email", "groups"]
|
||||
requestedIDTokenClaims: {"groups": {"essential": true}}
|
||||
|
||||
@@ -1,180 +0,0 @@
|
||||
---
|
||||
title: Server Side Pagination for Applications List and Watch APIs
|
||||
authors:
|
||||
- "@alexmt"
|
||||
sponsors:
|
||||
- "@jessesuen"
|
||||
reviewers:
|
||||
- TBD
|
||||
approvers:
|
||||
- TBD
|
||||
|
||||
creation-date: 2024-02-14
|
||||
last-updated: 2024-02-14
|
||||
---
|
||||
|
||||
# Introduce Server Side Pagination for Applications List and Watch APIs
|
||||
|
||||
Improve Argo CD performance by introducing server side pagination for Applications List and Watch APIs.
|
||||
|
||||
## Open Questions [optional]
|
||||
|
||||
This is where to call out areas of the design that require closure before deciding to implement the
|
||||
design.
|
||||
|
||||
|
||||
## Summary
|
||||
|
||||
The Argo CD API server currently returns all applications in a single response. This can be a performance
|
||||
bottleneck when there are a large number of applications. This proposal is to introduce server side pagination
|
||||
for the Applications List and Watch APIs.
|
||||
|
||||
## Motivation
|
||||
|
||||
The main motivation for this proposal it to improve the Argo CD UI responsiveness when there are a large number
|
||||
of applications. The API server memory usage increases with the number of applications however this is not critical
|
||||
and can be mitigated by increasing memory limits for the API server deployment. The UI however becames unresponsive
|
||||
even on a powerful machine when the number of applications increases 2000. The server side pagination will allow
|
||||
to reduce amount of data returned by the API server and improve the UI responsiveness.
|
||||
|
||||
### Goals
|
||||
|
||||
* Support server side pagination for Applications List and Watch APIs
|
||||
|
||||
* Leverage pagination in the Argo CD UI to improve responsiveness
|
||||
|
||||
* Leverage pagination in the Argo CD CLI to improve performance and reduce load on the API server
|
||||
|
||||
### Non-Goals
|
||||
|
||||
* The API Server is known to use a lot of CPU while applying very large RBAC policies to a large number of applications.
|
||||
Even with pagination API still need to apply RBAC policies to return "last page" response. So the issueis not addressed by this proposal.
|
||||
|
||||
## Proposal
|
||||
|
||||
**Pagination Cursor**
|
||||
|
||||
It is proposed to add `offset` and `limit` fields for pagination support in Application List API.
|
||||
The The Watch API is a bit more complex. Both Argo CD user interface and CLI are relying on the Watch API to display real time updates of Argo CD applications.
|
||||
The Watch API currently supports filtering by a project and an application name. In order to effectively
|
||||
implement server side pagination for the Watch API we cannot rely on the order of the applications returned by the API server. Instead of
|
||||
relying on the order it is proposed to rely on the application name and use it as a cursor for pagination. Both the Applications List and Watch
|
||||
APIs will be extended with the optional `minName` and `maxName` fields. The `minName` field will be used to specify the application name to start from
|
||||
and the `maxName` field will be used to specify the application name to end at. The fields should be added to the `ApplicationQuery` message
|
||||
which is used as a request payload for the Applications List and Watch APIs.
|
||||
|
||||
```proto
|
||||
message ApplicationQuery {
|
||||
// ... existing fields
|
||||
// New proto fields for server side pagination
|
||||
// the application name to start from (app with min name is included in response)
|
||||
optional string minName = 9;
|
||||
// the application name to end at (app with max name is included in response)
|
||||
optional string maxName = 10;
|
||||
// offset
|
||||
optional int64 offset = 18;
|
||||
// limit
|
||||
optional int64 limit = 19;
|
||||
}
|
||||
```
|
||||
|
||||
**Server Side Filtering**
|
||||
|
||||
In order to support server side pagination the filtering has to be moved to the server side as well. `ApplicationQuery` message needs to be extended with the following fields:
|
||||
|
||||
```proto
|
||||
message ApplicationQuery {
|
||||
// ... existing fields
|
||||
// New proto fields for server side filtering
|
||||
// the repos filter
|
||||
repeated string repos = 11;
|
||||
// the clusters filter
|
||||
repeated string clusters = 12;
|
||||
// the namespaces filter
|
||||
repeated string namespaces = 13;
|
||||
// the auth sync filter
|
||||
optional bool autoSyncEnabled = 14;
|
||||
// the sync status filter
|
||||
repeated string syncStatuses = 15;
|
||||
// the health status filter
|
||||
repeated string healthStatuses = 16;
|
||||
// search
|
||||
optional string search = 17;
|
||||
}
|
||||
```
|
||||
|
||||
The Argo CD UI should be updated to populate fields in the List and Watch API requests instead of performing filtering on the client side.
|
||||
|
||||
**Applications Stats**
|
||||
|
||||
The Argo CD UI displays the breakdown of the applications by the sync status, health status etc. Stats numbers are calculated on the client side
|
||||
and rely on the full list of applications returned by the API server. The server side pagination will break the stats calculation. The proposal is to
|
||||
intoduce a new `stats` field to the Applications List API response. The field will contain the breakdown of the applications by various statuses.
|
||||
|
||||
```golang
|
||||
type ApplicationLabelStats struct {
|
||||
Key string `json:"key" protobuf:"bytes,1,opt,name=key"`
|
||||
Values []string `json:"values" protobuf:"bytes,2,opt,name=values"`
|
||||
}
|
||||
|
||||
// ApplicationListStats holds additional information about the list of applications
|
||||
type ApplicationListStats struct {
|
||||
Total int64 `json:"total" protobuf:"bytes,1,opt,name=total"`
|
||||
TotalBySyncStatus map[SyncStatusCode]int64 `json:"totalBySyncStatus,omitempty" protobuf:"bytes,2,opt,name=totalBySyncStatus"`
|
||||
TotalByHealthStatus map[health.HealthStatusCode]int64 `json:"totalByHealthStatus,omitempty" protobuf:"bytes,3,opt,name=totalByHealthStatus"`
|
||||
AutoSyncEnabledCount int64 `json:"autoSyncEnabledCount" protobuf:"bytes,4,opt,name=autoSyncEnabledCount"`
|
||||
Destinations []ApplicationDestination `json:"destinations" protobuf:"bytes,5,opt,name=destinations"`
|
||||
Namespaces []string `json:"namespaces" protobuf:"bytes,6,opt,name=namespaces"`
|
||||
Labels []ApplicationLabelStats `json:"labels,omitempty" protobuf:"bytes,7,opt,name=labels"`
|
||||
}
|
||||
```
|
||||
|
||||
The `stats` filter should be populated with information about all applications returned by the API server even when single page is loaded.
|
||||
The Argo CD UI should be updated to use the stats returned by the API server instead of calculating the stats on the client side.
|
||||
|
||||
**Argo CD CLI**
|
||||
|
||||
The Argo CD CLI should be updated to support server side pagination. The `argocd app list` command should be updated to support `--offset` and `--limit` flags.
|
||||
If the `--offset` and `--limit` flags are not specified the CLI should use pagination to load all applications in batches of 500 applications.
|
||||
|
||||
### Use cases
|
||||
|
||||
Add a list of detailed use cases this enhancement intends to take care of.
|
||||
|
||||
#### User Server Side Pagination in Argo CD User Interface to improve responsiveness:
|
||||
As a user, I would like to be able to navigate through the list of applications using the pagination controls.
|
||||
|
||||
#### User Server Side Pagination in Argo CD CLI to reduce load on the API server:
|
||||
As a user, I would like to use Argo CD CLI to list applications while leveraging the pagination without overloading the API server.
|
||||
|
||||
### Implementation Details/Notes/Constraints [optional]
|
||||
|
||||
**Application Stats**
|
||||
|
||||
**CLI Backward Compatibility**
|
||||
|
||||
Typically we bump minimal supported API version when we introduce a new feature in CLI. In this case I
|
||||
suggest to support gracefully missing pagination support in CLI. If the API server returns more applications than
|
||||
specified in `limit` the CLI should assume pagination is not supported and response has full list of applications.
|
||||
This way the user can downgrade API server without downgrading CLI.
|
||||
|
||||
### Security Considerations
|
||||
|
||||
The proposal does not introduce any new security risks.
|
||||
|
||||
### Risks and Mitigations
|
||||
|
||||
We might need to bump minimal supported API version in CLI to support pagination. The `Implementation Details` section
|
||||
contains the proposal to avoid doing it.
|
||||
|
||||
### Upgrade / Downgrade Strategy
|
||||
|
||||
The proposal does not introduce any breaking changes. The API server should gracefully handle requests without pagination fields.
|
||||
|
||||
## Drawbacks
|
||||
|
||||
None.
|
||||
|
||||
## Alternatives
|
||||
|
||||
****
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user