Compare commits

...

1 Commits

Author SHA1 Message Date
austin5219
ac7b13d947 sibling of 092bb7328c 2024-11-04 18:44:23 +00:00
779 changed files with 9215 additions and 43256 deletions

View File

@@ -31,11 +31,6 @@ updates:
directory: "/"
schedule:
interval: "daily"
ignore:
# We use consistent go and node versions across a lot of different files, and updating via dependabot would cause
# drift among those files, instead we let renovate bot handle them.
- dependency-name: "library/golang"
- dependency-name: "library/node"
- package-ecosystem: "docker"
directory: "/test/container/"

View File

@@ -13,8 +13,7 @@ on:
env:
# Golang version to use across CI steps
# renovate: datasource=golang-version packageName=golang
GOLANG_VERSION: '1.23.2'
GOLANG_VERSION: '1.22'
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
@@ -32,7 +31,7 @@ jobs:
docs: ${{ steps.filter.outputs.docs_any_changed }}
steps:
- uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
- uses: tj-actions/changed-files@c3a1bb2c992d77180ae65be6ae6c166cf40f857c # v45.0.3
- uses: tj-actions/changed-files@e9772d140489982e0e3704fea5ee93d536f1e275 # v45.0.1
id: filter
with:
# Any file which is not under docs/, ui/ or is not a markdown file is counted as a backend file
@@ -57,7 +56,7 @@ jobs:
- name: Checkout code
uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
- name: Setup Golang
uses: actions/setup-go@41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed # v5.1.0
uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
with:
go-version: ${{ env.GOLANG_VERSION }}
- name: Download all Go modules
@@ -78,11 +77,11 @@ jobs:
- name: Checkout code
uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
- name: Setup Golang
uses: actions/setup-go@41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed # v5.1.0
uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
with:
go-version: ${{ env.GOLANG_VERSION }}
- name: Restore go build cache
uses: actions/cache@6849a6489940f00c2f30c0fb92c6274307ccb58a # v4.1.2
uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2
with:
path: ~/.cache/go-build
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
@@ -105,14 +104,13 @@ jobs:
- name: Checkout code
uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
- name: Setup Golang
uses: actions/setup-go@41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed # v5.1.0
uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
with:
go-version: ${{ env.GOLANG_VERSION }}
- name: Run golangci-lint
uses: golangci/golangci-lint-action@971e284b6050e8a5849b72094c50ab08da042db8 # v6.1.1
uses: golangci/golangci-lint-action@aaa42aa0628b4ae2578232a66b541047968fac86 # v6.1.0
with:
# renovate: datasource=go packageName=github.com/golangci/golangci-lint versioning=regex:^v(?<major>\d+)\.(?<minor>\d+)\.(?<patch>\d+)?$
version: v1.61.0
version: v1.58.2
args: --verbose
test-go:
@@ -133,7 +131,7 @@ jobs:
- name: Create symlink in GOPATH
run: ln -s $(pwd) ~/go/src/github.com/argoproj/argo-cd
- name: Setup Golang
uses: actions/setup-go@41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed # v5.1.0
uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
with:
go-version: ${{ env.GOLANG_VERSION }}
- name: Install required packages
@@ -153,7 +151,7 @@ jobs:
run: |
echo "/usr/local/bin" >> $GITHUB_PATH
- name: Restore go build cache
uses: actions/cache@6849a6489940f00c2f30c0fb92c6274307ccb58a # v4.1.2
uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2
with:
path: ~/.cache/go-build
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
@@ -174,7 +172,7 @@ jobs:
- name: Run all unit tests
run: make test-local
- name: Generate test results artifacts
uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3
uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0
with:
name: test-results
path: test-results
@@ -197,7 +195,7 @@ jobs:
- name: Create symlink in GOPATH
run: ln -s $(pwd) ~/go/src/github.com/argoproj/argo-cd
- name: Setup Golang
uses: actions/setup-go@41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed # v5.1.0
uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
with:
go-version: ${{ env.GOLANG_VERSION }}
- name: Install required packages
@@ -217,7 +215,7 @@ jobs:
run: |
echo "/usr/local/bin" >> $GITHUB_PATH
- name: Restore go build cache
uses: actions/cache@6849a6489940f00c2f30c0fb92c6274307ccb58a # v4.1.2
uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2
with:
path: ~/.cache/go-build
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
@@ -238,7 +236,7 @@ jobs:
- name: Run all unit tests
run: make test-race-local
- name: Generate test results artifacts
uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3
uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0
with:
name: race-results
path: test-results/
@@ -253,7 +251,7 @@ jobs:
- name: Checkout code
uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
- name: Setup Golang
uses: actions/setup-go@41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed # v5.1.0
uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
with:
go-version: ${{ env.GOLANG_VERSION }}
- name: Create symlink in GOPATH
@@ -305,13 +303,12 @@ jobs:
- name: Checkout code
uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
- name: Setup NodeJS
uses: actions/setup-node@39370e3970a6d050c480ffad4ff0ed4d3fdee5af # v4.1.0
uses: actions/setup-node@1e60f620b9541d16bece96c5465dc8ee9832be0b # v4.0.3
with:
# renovate: datasource=node-version packageName=node versioning=node
node-version: '22.9.0'
node-version: '21.6.1'
- name: Restore node dependency cache
id: cache-dependencies
uses: actions/cache@6849a6489940f00c2f30c0fb92c6274307ccb58a # v4.1.2
uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2
with:
path: ui/node_modules
key: ${{ runner.os }}-node-dep-v2-${{ hashFiles('**/yarn.lock') }}
@@ -351,7 +348,7 @@ jobs:
fetch-depth: 0
- name: Restore node dependency cache
id: cache-dependencies
uses: actions/cache@6849a6489940f00c2f30c0fb92c6274307ccb58a # v4.1.2
uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2
with:
path: ui/node_modules
key: ${{ runner.os }}-node-dep-v2-${{ hashFiles('**/yarn.lock') }}
@@ -376,7 +373,7 @@ jobs:
run: |
go tool covdata percent -i=test-results,e2e-code-coverage/applicationset-controller,e2e-code-coverage/repo-server,e2e-code-coverage/app-controller -o test-results/full-coverage.out
- name: Upload code coverage information to codecov.io
uses: codecov/codecov-action@b9fd7d16f6d7d1b5d2bec1a2887e65ceed900238 # v4.6.0
uses: codecov/codecov-action@e28ff129e5465c2c0dcc6f003fc735cb6ae0c673 # v4.5.0
with:
file: test-results/full-coverage.out
fail_ci_if_error: true
@@ -384,7 +381,7 @@ jobs:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
- name: Upload test results to Codecov
if: github.ref == 'refs/heads/master' && github.event_name == 'push' && github.repository == 'argoproj/argo-cd'
uses: codecov/test-results-action@9739113ad922ea0a9abb4b2c0f8bf6a4aa8ef820 # v1.0.1
uses: codecov/test-results-action@1b5b448b98e58ba90d1a1a1d9fcb72ca2263be46 # v1.0.0
with:
file: test-results/junit.xml
fail_ci_if_error: true
@@ -393,7 +390,7 @@ jobs:
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}
uses: SonarSource/sonarqube-scan-action@884b79409bbd464b2a59edc326a4b77dc56b2195 # v2.2
uses: SonarSource/sonarqube-scan-action@aecaf43ae57e412bd97d70ef9ce6076e672fe0a9 # v2.2
if: env.sonar_secret != ''
test-e2e:
name: Run end-to-end tests
@@ -403,14 +400,14 @@ jobs:
fail-fast: false
matrix:
k3s:
- version: v1.31.0
- version: v1.30.2
# We designate the latest version because we only collect code coverage for that version.
latest: true
- version: v1.30.4
- version: v1.29.6
latest: false
- version: v1.29.8
- version: v1.28.11
latest: false
- version: v1.28.13
- version: v1.27.15
latest: false
needs:
- build-go
@@ -432,7 +429,7 @@ jobs:
- name: Checkout code
uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
- name: Setup Golang
uses: actions/setup-go@41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed # v5.1.0
uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
with:
go-version: ${{ env.GOLANG_VERSION }}
- name: GH actions workaround - Kill XSP4 process
@@ -451,7 +448,7 @@ jobs:
sudo chmod go-r $HOME/.kube/config
kubectl version
- name: Restore go build cache
uses: actions/cache@6849a6489940f00c2f30c0fb92c6274307ccb58a # v4.1.2
uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2
with:
path: ~/.cache/go-build
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
@@ -509,13 +506,13 @@ jobs:
goreman run stop-all || echo "goreman trouble"
sleep 30
- name: Upload e2e coverage report
uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3
uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0
with:
name: e2e-code-coverage
path: /tmp/coverage
if: ${{ matrix.k3s.latest }}
- name: Upload e2e-server logs
uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3
uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0
with:
name: e2e-server-k8s${{ matrix.k3s.version }}.log
path: /tmp/e2e-server.log

View File

@@ -23,7 +23,7 @@ jobs:
actions: read # for github/codeql-action/init to get workflow details
contents: read # for actions/checkout to fetch code
security-events: write # for github/codeql-action/autobuild to send a status report
if: github.repository == 'argoproj/argo-cd' || vars.enable_codeql
if: github.repository == 'argoproj/argo-cd'
# CodeQL runs on ubuntu-latest and windows-latest
runs-on: ubuntu-22.04
@@ -33,7 +33,7 @@ jobs:
# Use correct go version. https://github.com/github/codeql-action/issues/1842#issuecomment-1704398087
- name: Setup Golang
uses: actions/setup-go@41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed # v5.1.0
uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
with:
go-version-file: go.mod

View File

@@ -69,15 +69,15 @@ jobs:
if: ${{ github.ref_type != 'tag'}}
- name: Setup Golang
uses: actions/setup-go@41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed # v5.1.0
uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
with:
go-version: ${{ inputs.go-version }}
- name: Install cosign
uses: sigstore/cosign-installer@dc72c7d5c4d10cd6bcb8cf6e3fd625a9e5e537da # v3.7.0
uses: sigstore/cosign-installer@4959ce089c160fddf62f7b42464195ba1a56d382 # v3.6.0
- uses: docker/setup-qemu-action@49b3bc8e6bdd4a60e6116a5414239cba5943d3cf # v3.2.0
- uses: docker/setup-buildx-action@c47758b77c9736f4b2ef4073d4d51994fabfe349 # v3.7.1
- uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1
- name: Setup tags for container image as a CSV type
run: |
@@ -143,7 +143,7 @@ jobs:
- name: Build and push container image
id: image
uses: docker/build-push-action@4f58ea79222b3b9dc2c8bbdd6debcef730109a75 #v6.9.0
uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 #v6.7.0
with:
context: .
platforms: ${{ inputs.platforms }}

View File

@@ -52,8 +52,7 @@ jobs:
uses: ./.github/workflows/image-reuse.yaml
with:
# Note: cannot use env variables to set go-version (https://docs.github.com/en/actions/using-workflows/reusing-workflows#limitations)
# renovate: datasource=golang-version packageName=golang
go-version: 1.23.2
go-version: 1.22
platforms: ${{ needs.set-vars.outputs.platforms }}
push: false
@@ -69,8 +68,7 @@ jobs:
quay_image_name: quay.io/argoproj/argocd:latest
ghcr_image_name: ghcr.io/argoproj/argo-cd/argocd:${{ needs.set-vars.outputs.image-tag }}
# Note: cannot use env variables to set go-version (https://docs.github.com/en/actions/using-workflows/reusing-workflows#limitations)
# renovate: datasource=golang-version packageName=golang
go-version: 1.23.2
go-version: 1.22
platforms: ${{ needs.set-vars.outputs.platforms }}
push: true
secrets:

View File

@@ -64,7 +64,7 @@ jobs:
git stash pop
- name: Create pull request
uses: peter-evans/create-pull-request@5e914681df9dc83aa4e4905692ca88beb2f9e91f # v7.0.5
uses: peter-evans/create-pull-request@d121e62763d8cc35b5fb1710e887d6e69a52d3a4 # v7.0.2
with:
commit-message: "Bump version to ${{ inputs.TARGET_VERSION }}"
title: "Bump version to ${{ inputs.TARGET_VERSION }} on ${{ inputs.TARGET_BRANCH }} branch"

View File

@@ -10,8 +10,7 @@ on:
permissions: {}
env:
# renovate: datasource=golang-version packageName=golang
GOLANG_VERSION: '1.23.2' # Note: go-version must also be set in job argocd-image.with.go-version
GOLANG_VERSION: '1.22' # Note: go-version must also be set in job argocd-image.with.go-version
jobs:
argocd-image:
@@ -24,8 +23,7 @@ jobs:
with:
quay_image_name: quay.io/argoproj/argocd:${{ github.ref_name }}
# Note: cannot use env variables to set go-version (https://docs.github.com/en/actions/using-workflows/reusing-workflows#limitations)
# renovate: datasource=golang-version packageName=golang
go-version: 1.23.2
go-version: 1.22
platforms: linux/amd64,linux/arm64,linux/s390x,linux/ppc64le
push: true
secrets:
@@ -69,15 +67,19 @@ jobs:
- name: Fetch all tags
run: git fetch --force --tags
- name: Setup Golang
uses: actions/setup-go@41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed # v5.1.0
with:
go-version: ${{ env.GOLANG_VERSION }}
- name: Set GORELEASER_PREVIOUS_TAG # Workaround, GoReleaser uses 'git-describe' to determine a previous tag. Our tags are created in release branches.
- name: Set GORELEASER_PREVIOUS_TAG # Workaround, GoReleaser uses 'git-describe' to determine a previous tag. Our tags are created in realease branches.
run: |
set -xue
echo "GORELEASER_PREVIOUS_TAG=$(go run hack/get-previous-release/get-previous-version-for-release-notes.go ${{ github.ref_name }})" >> $GITHUB_ENV
if echo ${{ github.ref_name }} | grep -E -- '-rc1+$';then
echo "GORELEASER_PREVIOUS_TAG=$(git -c 'versionsort.suffix=-rc' tag --list --sort=version:refname | tail -n 2 | head -n 1)" >> $GITHUB_ENV
else
echo "This is not the first release on the branch, Using GoReleaser defaults"
fi
- name: Setup Golang
uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
with:
go-version: ${{ env.GOLANG_VERSION }}
- name: Set environment variables for ldflags
id: set_ldflag
@@ -101,7 +103,7 @@ jobs:
args: release --clean --timeout 55m
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
KUBECTL_VERSION: ${{ env.KUBECTL_VERSION }}
KUBECTL_VERSION: ${{ env.KUBECTL_VERSION }}
GIT_TREE_STATE: ${{ env.GIT_TREE_STATE }}
- name: Generate subject for provenance
@@ -151,7 +153,7 @@ jobs:
token: ${{ secrets.GITHUB_TOKEN }}
- name: Setup Golang
uses: actions/setup-go@41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed # v5.1.0
uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
with:
go-version: ${{ env.GOLANG_VERSION }}
@@ -184,7 +186,7 @@ jobs:
fi
cd /tmp && tar -zcf sbom.tar.gz *.spdx
- name: Generate SBOM hash
shell: bash
id: sbom-hash
@@ -193,7 +195,7 @@ jobs:
# base64 -w0 encodes to base64 and outputs on a single line.
# sha256sum /tmp/sbom.tar.gz ... | base64 -w0
echo "hashes=$(sha256sum /tmp/sbom.tar.gz | base64 -w0)" >> "$GITHUB_OUTPUT"
- name: Upload SBOM
uses: softprops/action-gh-release@c062e08bd532815e2082a85e87e3ef29c3e6d191 # v2.0.8
env:
@@ -201,7 +203,7 @@ jobs:
with:
files: |
/tmp/sbom.tar.gz
sbom-provenance:
needs: [generate-sbom]
permissions:
@@ -209,13 +211,13 @@ jobs:
id-token: write # Needed for provenance signing and ID
contents: write # Needed for release uploads
if: github.repository == 'argoproj/argo-cd'
# Must be referenced by a tag. https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#referencing-the-slsa-generator
# Must be refernced by a tag. https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#referencing-the-slsa-generator
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.0.0
with:
base64-subjects: "${{ needs.generate-sbom.outputs.hashes }}"
provenance-name: "argocd-sbom.intoto.jsonl"
upload-assets: true
post-release:
needs:
- argocd-image
@@ -293,7 +295,7 @@ jobs:
if: ${{ env.UPDATE_VERSION == 'true' }}
- name: Create PR to update VERSION on master branch
uses: peter-evans/create-pull-request@5e914681df9dc83aa4e4905692ca88beb2f9e91f # v7.0.5
uses: peter-evans/create-pull-request@d121e62763d8cc35b5fb1710e887d6e69a52d3a4 # v7.0.2
with:
commit-message: Bump version in master
title: "chore: Bump version in master"

View File

@@ -54,7 +54,7 @@ jobs:
# Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
# format to the repository Actions tab.
- name: "Upload artifact"
uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3
uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0
with:
name: SARIF file
path: results.sarif

View File

@@ -20,10 +20,8 @@ linters:
- misspell
- staticcheck
- testifylint
- thelper
- unparam
- unused
- usestdlibvars
- whitespace
linters-settings:
gocritic:

View File

@@ -8,4 +8,4 @@ python:
build:
os: "ubuntu-22.04"
tools:
python: "3.12"
python: "3.7"

View File

@@ -4,7 +4,7 @@ ARG BASE_IMAGE=docker.io/library/ubuntu:24.04@sha256:3f85b7caad41a95462cf5b787d8
# Initial stage which pulls prepares build dependencies and CLI tooling we need for our final image
# Also used as the image in CI jobs so needs all dependencies
####################################################################################################
FROM docker.io/library/golang:1.23.2@sha256:ad5c126b5cf501a8caef751a243bb717ec204ab1aa56dc41dc11be089fafcb4f AS builder
FROM docker.io/library/golang:1.23.1@sha256:2fe82a3f3e006b4f2a316c6a21f62b66e1330ae211d039bb8d1128e12ed57bf1 AS builder
RUN echo 'deb http://archive.debian.org/debian buster-backports main' >> /etc/apt/sources.list
@@ -83,7 +83,7 @@ WORKDIR /home/argocd
####################################################################################################
# Argo CD UI stage
####################################################################################################
FROM --platform=$BUILDPLATFORM docker.io/library/node:23.0.0@sha256:e643c0b70dca9704dff42e12b17f5b719dbe4f95e6392fc2dfa0c5f02ea8044d AS argocd-ui
FROM --platform=$BUILDPLATFORM docker.io/library/node:22.8.0@sha256:bd00c03095f7586432805dbf7989be10361d27987f93de904b1fc003949a4794 AS argocd-ui
WORKDIR /src
COPY ["ui/package.json", "ui/yarn.lock", "./"]
@@ -101,7 +101,7 @@ RUN HOST_ARCH=$TARGETARCH NODE_ENV='production' NODE_ONLINE_ENV='online' NODE_OP
####################################################################################################
# Argo CD Build stage which performs the actual build of Argo CD binaries
####################################################################################################
FROM --platform=$BUILDPLATFORM docker.io/library/golang:1.23.2@sha256:ad5c126b5cf501a8caef751a243bb717ec204ab1aa56dc41dc11be089fafcb4f AS argocd-build
FROM --platform=$BUILDPLATFORM docker.io/library/golang:1.23.1@sha256:2fe82a3f3e006b4f2a316c6a21f62b66e1330ae211d039bb8d1128e12ed57bf1 AS argocd-build
WORKDIR /go/src/github.com/argoproj/argo-cd

View File

@@ -486,7 +486,6 @@ start-e2e-local: mod-vendor-local dep-ui-local cli-local
BIN_MODE=$(ARGOCD_BIN_MODE) \
ARGOCD_APPLICATION_NAMESPACES=argocd-e2e-external,argocd-e2e-external-2 \
ARGOCD_APPLICATIONSET_CONTROLLER_NAMESPACES=argocd-e2e-external,argocd-e2e-external-2 \
ARGOCD_APPLICATIONSET_CONTROLLER_TOKENREF_STRICT_MODE=true \
ARGOCD_APPLICATIONSET_CONTROLLER_ALLOWED_SCM_PROVIDERS=http://127.0.0.1:8341,http://127.0.0.1:8342,http://127.0.0.1:8343,http://127.0.0.1:8344 \
ARGOCD_E2E_TEST=true \
goreman -f $(ARGOCD_PROCFILE) start ${ARGOCD_START}

View File

@@ -1,7 +1,7 @@
controller: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "GOCOVERDIR=${ARGOCD_COVERAGE_DIR:-/tmp/coverage/app-controller} HOSTNAME=testappcontroller-1 FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-application-controller $COMMAND --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081} --otlp-address=${ARGOCD_OTLP_ADDRESS} --application-namespaces=${ARGOCD_APPLICATION_NAMESPACES:-''} --server-side-diff-enabled=${ARGOCD_APPLICATION_CONTROLLER_SERVER_SIDE_DIFF:-'false'}"
api-server: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "GOCOVERDIR=${ARGOCD_COVERAGE_DIR:-/tmp/coverage/api-server} FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-server $COMMAND --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --disable-auth=${ARGOCD_E2E_DISABLE_AUTH:-'true'} --insecure --dex-server http://localhost:${ARGOCD_E2E_DEX_PORT:-5556} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081} --port ${ARGOCD_E2E_APISERVER_PORT:-8080} --otlp-address=${ARGOCD_OTLP_ADDRESS} --application-namespaces=${ARGOCD_APPLICATION_NAMESPACES:-''}"
dex: sh -c "ARGOCD_BINARY_NAME=argocd-dex go run github.com/argoproj/argo-cd/v2/cmd gendexcfg -o `pwd`/dist/dex.yaml && (test -f dist/dex.yaml || { echo 'Failed to generate dex configuration'; exit 1; }) && docker run --rm -p ${ARGOCD_E2E_DEX_PORT:-5556}:${ARGOCD_E2E_DEX_PORT:-5556} -v `pwd`/dist/dex.yaml:/dex.yaml ghcr.io/dexidp/dex:$(grep "image: ghcr.io/dexidp/dex" manifests/base/dex/argocd-dex-server-deployment.yaml | cut -d':' -f3) dex serve /dex.yaml"
redis: hack/start-redis-with-password.sh
redis: bash -c "if [ \"$ARGOCD_REDIS_LOCAL\" = 'true' ]; then redis-server --save '' --appendonly no --port ${ARGOCD_E2E_REDIS_PORT:-6379}; else docker run --rm --name argocd-redis -i -p ${ARGOCD_E2E_REDIS_PORT:-6379}:${ARGOCD_E2E_REDIS_PORT:-6379} docker.io/library/redis:$(grep "image: redis" manifests/base/redis/argocd-redis-deployment.yaml | cut -d':' -f3) --save '' --appendonly no --port ${ARGOCD_E2E_REDIS_PORT:-6379}; fi"
repo-server: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "GOCOVERDIR=${ARGOCD_COVERAGE_DIR:-/tmp/coverage/repo-server} FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_GNUPGHOME=${ARGOCD_GNUPGHOME:-/tmp/argocd-local/gpg/keys} ARGOCD_PLUGINSOCKFILEPATH=${ARGOCD_PLUGINSOCKFILEPATH:-./test/cmp} ARGOCD_GPG_DATA_PATH=${ARGOCD_GPG_DATA_PATH:-/tmp/argocd-local/gpg/source} ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-repo-server ARGOCD_GPG_ENABLED=${ARGOCD_GPG_ENABLED:-false} $COMMAND --loglevel debug --port ${ARGOCD_E2E_REPOSERVER_PORT:-8081} --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --otlp-address=${ARGOCD_OTLP_ADDRESS}"
cmp-server: [ "$ARGOCD_E2E_TEST" = 'true' ] && exit 0 || [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_BINARY_NAME=argocd-cmp-server ARGOCD_PLUGINSOCKFILEPATH=${ARGOCD_PLUGINSOCKFILEPATH:-./test/cmp} $COMMAND --config-dir-path ./test/cmp --loglevel debug --otlp-address=${ARGOCD_OTLP_ADDRESS}"
ui: sh -c 'cd ui && ${ARGOCD_E2E_YARN_CMD:-yarn} start'

View File

@@ -8,6 +8,7 @@
[![codecov](https://codecov.io/gh/argoproj/argo-cd/branch/master/graph/badge.svg)](https://codecov.io/gh/argoproj/argo-cd)
[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/4486/badge)](https://bestpractices.coreinfrastructure.org/projects/4486)
[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/argoproj/argo-cd/badge)](https://scorecard.dev/viewer/?uri=github.com/argoproj/argo-cd)
[![FOSSA Status](https://app.fossa.com/api/projects/git%2Bgithub.com%2Fargoproj%2Fargo-cd.svg?type=shield)](https://app.fossa.com/projects/git%2Bgithub.com%2Fargoproj%2Fargo-cd?ref=badge_shield)
**Social:**
[![Twitter Follow](https://img.shields.io/twitter/follow/argoproj?style=social)](https://twitter.com/argoproj)
@@ -56,7 +57,7 @@ Participation in the Argo CD project is governed by the [CNCF Code of Conduct](h
### Blogs and Presentations
1. [Awesome-Argo: A Curated List of Awesome Projects and Resources Related to Argo](https://github.com/terrytangyuan/awesome-argo)
1. [Unveil the Secret Ingredients of Continuous Delivery at Enterprise Scale with Argo CD](https://akuity.io/blog/secret-ingredients-of-continuous-delivery-at-enterprise-scale-with-argocd/)
1. [Unveil the Secret Ingredients of Continuous Delivery at Enterprise Scale with Argo CD](https://akuity.io/blog/unveil-the-secret-ingredients-of-continuous-delivery-at-enterprise-scale-with-argocd-kubecon-china-2021/)
1. [GitOps Without Pipelines With ArgoCD Image Updater](https://youtu.be/avPUQin9kzU)
1. [Combining Argo CD (GitOps), Crossplane (Control Plane), And KubeVela (OAM)](https://youtu.be/eEcgn_gU3SM)
1. [How to Apply GitOps to Everything - Combining Argo CD and Crossplane](https://youtu.be/yrj4lmScKHQ)

View File

@@ -3,9 +3,9 @@ header:
expiration-date: '2024-10-31T00:00:00.000Z' # One year from initial release.
last-updated: '2023-10-27'
last-reviewed: '2023-10-27'
commit-hash: 74a367d10e7110209610ba3ec225539ebe5f7522
commit-hash: fe606708859574b9b6102a505e260fac5d3fb14e
project-url: https://github.com/argoproj/argo-cd
project-release: v2.14.0
project-release: v2.13.0
changelog: https://github.com/argoproj/argo-cd/releases
license: https://github.com/argoproj/argo-cd/blob/master/LICENSE
project-lifecycle:

View File

@@ -35,7 +35,6 @@ Currently, the following organizations are **officially** using Argo CD:
1. [Axians ACSP](https://www.axians.fr)
1. [Axual B.V.](https://axual.com)
1. [Back Market](https://www.backmarket.com)
1. [Bajaj Finserv Health Ltd.](https://www.bajajfinservhealth.in)
1. [Baloise](https://www.baloise.com)
1. [BCDevExchange DevOps Platform](https://bcdevexchange.org/DevOpsPlatform)
1. [Beat](https://thebeat.co/en/)
@@ -77,7 +76,6 @@ Currently, the following organizations are **officially** using Argo CD:
1. [Codility](https://www.codility.com/)
1. [Cognizant](https://www.cognizant.com/)
1. [Commonbond](https://commonbond.co/)
1. [Compatio.AI](https://compatio.ai/)
1. [Contlo](https://contlo.com/)
1. [Coralogix](https://coralogix.com/)
1. [Crédit Agricole CIB](https://www.ca-cib.com)
@@ -87,7 +85,6 @@ Currently, the following organizations are **officially** using Argo CD:
1. [D2iQ](https://www.d2iq.com)
1. [DaoCloud](https://daocloud.io/)
1. [Datarisk](https://www.datarisk.io/)
1. [Daydream](https://daydream.ing)
1. [Deloitte](https://www.deloitte.com/)
1. [Deutsche Telekom AG](https://telekom.com)
1. [Devopsi - Poland Software/DevOps Consulting](https://devopsi.pl/)
@@ -246,7 +243,6 @@ Currently, the following organizations are **officially** using Argo CD:
1. [Optoro](https://www.optoro.com/)
1. [Orbital Insight](https://orbitalinsight.com/)
1. [Oscar Health Insurance](https://hioscar.com/)
1. [Outpost24](https://outpost24.com/)
1. [p3r](https://www.p3r.one/)
1. [Packlink](https://www.packlink.com/)
1. [PagerDuty](https://www.pagerduty.com/)
@@ -275,7 +271,6 @@ Currently, the following organizations are **officially** using Argo CD:
1. [PT Boer Technology (Btech)](https://btech.id/)
1. [PUBG](https://www.pubg.com)
1. [Puzzle ITC](https://www.puzzle.ch/)
1. [Pvotal Technologies](https://pvotal.tech/)
1. [Qonto](https://qonto.com)
1. [QuintoAndar](https://quintoandar.com.br)
1. [Quipper](https://www.quipper.com/)
@@ -312,7 +307,6 @@ Currently, the following organizations are **officially** using Argo CD:
1. [Skyscanner](https://www.skyscanner.net/)
1. [Smart Pension](https://www.smartpension.co.uk/)
1. [Smilee.io](https://smilee.io)
1. [Smilegate Stove](https://www.onstove.com/)
1. [Smood.ch](https://www.smood.ch/)
1. [Snapp](https://snapp.ir/)
1. [Snyk](https://snyk.io/)
@@ -336,7 +330,6 @@ Currently, the following organizations are **officially** using Argo CD:
1. [TableCheck](https://tablecheck.com/)
1. [Tailor Brands](https://www.tailorbrands.com)
1. [Tamkeen Technologies](https://tamkeentech.sa/)
1. [TBC Bank](https://tbcbank.ge/)
1. [Techcombank](https://www.techcombank.com.vn/trang-chu)
1. [Technacy](https://www.technacy.it/)
1. [Telavita](https://www.telavita.com.br/)

View File

@@ -1 +1 @@
2.14.0
2.13.0

View File

@@ -52,6 +52,7 @@ import (
"github.com/argoproj/argo-cd/v2/util/db"
argov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
appclientset "github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned"
argoutil "github.com/argoproj/argo-cd/v2/util/argo"
"github.com/argoproj/argo-cd/v2/util/argo/normalizers"
@@ -78,6 +79,7 @@ type ApplicationSetReconciler struct {
Recorder record.EventRecorder
Generators map[string]generators.Generator
ArgoDB db.ArgoDB
ArgoAppClientset appclientset.Interface
KubeClientset kubernetes.Interface
Policy argov1alpha1.ApplicationsSyncPolicy
EnablePolicyOverride bool
@@ -95,7 +97,6 @@ type ApplicationSetReconciler struct {
// +kubebuilder:rbac:groups=argoproj.io,resources=applicationsets/status,verbs=get;update;patch
func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
startReconcile := time.Now()
logCtx := log.WithField("applicationset", req.NamespacedName)
var applicationSetInfo argov1alpha1.ApplicationSet
@@ -333,7 +334,7 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
requeueAfter = ReconcileRequeueOnValidationError
}
logCtx.WithField("requeueAfter", requeueAfter).Info("end reconcile in ", time.Since(startReconcile))
logCtx.WithField("requeueAfter", requeueAfter).Info("end reconcile")
return ctrl.Result{
RequeueAfter: requeueAfter,
@@ -471,9 +472,7 @@ func (r *ApplicationSetReconciler) validateGeneratedApplications(ctx context.Con
errorsByIndex[i] = fmt.Errorf("ApplicationSet %s contains applications with duplicate name: %s", applicationSetInfo.Name, app.Name)
continue
}
appProject := &argov1alpha1.AppProject{}
err := r.Client.Get(ctx, types.NamespacedName{Name: app.Spec.Project, Namespace: r.ArgoCDNamespace}, appProject)
_, err := r.ArgoAppClientset.ArgoprojV1alpha1().AppProjects(r.ArgoCDNamespace).Get(ctx, app.Spec.GetProject(), metav1.GetOptions{})
if err != nil {
if apierr.IsNotFound(err) {
errorsByIndex[i] = fmt.Errorf("application references project %s which does not exist", app.Spec.Project)
@@ -1491,7 +1490,7 @@ func getOwnsHandlerPredicates(enableProgressiveSyncs bool) predicate.Funcs {
return false
}
requeue := shouldRequeueApplicationSet(appOld, appNew, enableProgressiveSyncs)
logCtx.WithField("requeue", requeue).Debugf("requeue: %t caused by application %s", requeue, appNew.Name)
logCtx.WithField("requeue", requeue).Debugf("requeue: %t caused by application %s\n", requeue, appNew.Name)
return requeue
},
GenericFunc: func(e event.GenericEvent) bool {

View File

@@ -36,8 +36,8 @@ import (
"github.com/argoproj/argo-cd/v2/applicationset/utils"
appsetmetrics "github.com/argoproj/argo-cd/v2/applicationset/metrics"
argocommon "github.com/argoproj/argo-cd/v2/common"
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
appclientset "github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned/fake"
dbmocks "github.com/argoproj/argo-cd/v2/util/db/mocks"
"github.com/argoproj/argo-cd/v2/pkg/apis/application"
@@ -48,6 +48,9 @@ func TestCreateOrUpdateInCluster(t *testing.T) {
err := v1alpha1.AddToScheme(scheme)
require.NoError(t, err)
err = v1alpha1.AddToScheme(scheme)
require.NoError(t, err)
for _, c := range []struct {
// name is human-readable test name
name string
@@ -1088,6 +1091,9 @@ func TestRemoveFinalizerOnInvalidDestination_FinalizerTypes(t *testing.T) {
err := v1alpha1.AddToScheme(scheme)
require.NoError(t, err)
err = v1alpha1.AddToScheme(scheme)
require.NoError(t, err)
for _, c := range []struct {
// name is human-readable test name
name string
@@ -1151,7 +1157,7 @@ func TestRemoveFinalizerOnInvalidDestination_FinalizerTypes(t *testing.T) {
Name: "my-secret",
Namespace: "namespace",
Labels: map[string]string{
argocommon.LabelKeySecretType: argocommon.LabelValueSecretTypeCluster,
generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster,
},
},
Data: map[string][]byte{
@@ -1208,6 +1214,9 @@ func TestRemoveFinalizerOnInvalidDestination_DestinationTypes(t *testing.T) {
err := v1alpha1.AddToScheme(scheme)
require.NoError(t, err)
err = v1alpha1.AddToScheme(scheme)
require.NoError(t, err)
for _, c := range []struct {
// name is human-readable test name
name string
@@ -1307,7 +1316,7 @@ func TestRemoveFinalizerOnInvalidDestination_DestinationTypes(t *testing.T) {
Name: "my-secret",
Namespace: "namespace",
Labels: map[string]string{
argocommon.LabelKeySecretType: argocommon.LabelValueSecretTypeCluster,
generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster,
},
},
Data: map[string][]byte{
@@ -1362,6 +1371,9 @@ func TestRemoveOwnerReferencesOnDeleteAppSet(t *testing.T) {
err := v1alpha1.AddToScheme(scheme)
require.NoError(t, err)
err = v1alpha1.AddToScheme(scheme)
require.NoError(t, err)
for _, c := range []struct {
// name is human-readable test name
name string
@@ -1435,6 +1447,9 @@ func TestCreateApplications(t *testing.T) {
err := v1alpha1.AddToScheme(scheme)
require.NoError(t, err)
err = v1alpha1.AddToScheme(scheme)
require.NoError(t, err)
testCases := []struct {
name string
appSet v1alpha1.ApplicationSet
@@ -1638,6 +1653,8 @@ func TestDeleteInCluster(t *testing.T) {
scheme := runtime.NewScheme()
err := v1alpha1.AddToScheme(scheme)
require.NoError(t, err)
err = v1alpha1.AddToScheme(scheme)
require.NoError(t, err)
for _, c := range []struct {
// appSet is the application set on which the delete function is called
@@ -1792,6 +1809,8 @@ func TestGetMinRequeueAfter(t *testing.T) {
scheme := runtime.NewScheme()
err := v1alpha1.AddToScheme(scheme)
require.NoError(t, err)
err = v1alpha1.AddToScheme(scheme)
require.NoError(t, err)
client := fake.NewClientBuilder().WithScheme(scheme).Build()
metrics := appsetmetrics.NewFakeAppsetMetrics(client)
@@ -1894,6 +1913,12 @@ func TestValidateGeneratedApplications(t *testing.T) {
err := v1alpha1.AddToScheme(scheme)
require.NoError(t, err)
err = v1alpha1.AddToScheme(scheme)
require.NoError(t, err)
client := fake.NewClientBuilder().WithScheme(scheme).Build()
metrics := appsetmetrics.NewFakeAppsetMetrics(client)
// Valid cluster
myCluster := v1alpha1.Cluster{
Server: "https://kubernetes.default.svc",
@@ -1920,9 +1945,6 @@ func TestValidateGeneratedApplications(t *testing.T) {
},
}
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(myProject).Build()
metrics := appsetmetrics.NewFakeAppsetMetrics(client)
// Test a subset of the validations that 'validateGeneratedApplications' performs
for _, cc := range []struct {
name string
@@ -2053,7 +2075,7 @@ func TestValidateGeneratedApplications(t *testing.T) {
Name: "my-secret",
Namespace: "namespace",
Labels: map[string]string{
argocommon.LabelKeySecretType: argocommon.LabelValueSecretTypeCluster,
generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster,
},
},
Data: map[string][]byte{
@@ -2072,15 +2094,21 @@ func TestValidateGeneratedApplications(t *testing.T) {
myCluster,
}}, nil)
argoObjs := []runtime.Object{myProject}
for _, app := range cc.apps {
argoObjs = append(argoObjs, &app)
}
r := ApplicationSetReconciler{
Client: client,
Scheme: scheme,
Recorder: record.NewFakeRecorder(1),
Generators: map[string]generators.Generator{},
ArgoDB: &argoDBMock,
ArgoCDNamespace: "namespace",
KubeClientset: kubeclientset,
Metrics: metrics,
Client: client,
Scheme: scheme,
Recorder: record.NewFakeRecorder(1),
Generators: map[string]generators.Generator{},
ArgoDB: &argoDBMock,
ArgoCDNamespace: "namespace",
ArgoAppClientset: appclientset.NewSimpleClientset(argoObjs...),
KubeClientset: kubeclientset,
Metrics: metrics,
}
appSetInfo := v1alpha1.ApplicationSet{}
@@ -2122,6 +2150,8 @@ func TestReconcilerValidationProjectErrorBehaviour(t *testing.T) {
scheme := runtime.NewScheme()
err := v1alpha1.AddToScheme(scheme)
require.NoError(t, err)
err = v1alpha1.AddToScheme(scheme)
require.NoError(t, err)
project := v1alpha1.AppProject{
ObjectMeta: metav1.ObjectMeta{Name: "good-project", Namespace: "argocd"},
@@ -2160,8 +2190,9 @@ func TestReconcilerValidationProjectErrorBehaviour(t *testing.T) {
kubeclientset := kubefake.NewSimpleClientset()
argoDBMock := dbmocks.ArgoDB{}
argoObjs := []runtime.Object{&project}
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet, &project).WithStatusSubresource(&appSet).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet).WithStatusSubresource(&appSet).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()
metrics := appsetmetrics.NewFakeAppsetMetrics(client)
goodCluster := v1alpha1.Cluster{Server: "https://good-cluster", Name: "good-cluster"}
badCluster := v1alpha1.Cluster{Server: "https://bad-cluster", Name: "bad-cluster"}
@@ -2179,11 +2210,12 @@ func TestReconcilerValidationProjectErrorBehaviour(t *testing.T) {
Generators: map[string]generators.Generator{
"List": generators.NewListGenerator(),
},
ArgoDB: &argoDBMock,
KubeClientset: kubeclientset,
Policy: v1alpha1.ApplicationsSyncPolicySync,
ArgoCDNamespace: "argocd",
Metrics: metrics,
ArgoDB: &argoDBMock,
ArgoAppClientset: appclientset.NewSimpleClientset(argoObjs...),
KubeClientset: kubeclientset,
Policy: v1alpha1.ApplicationsSyncPolicySync,
ArgoCDNamespace: "argocd",
Metrics: metrics,
}
req := ctrl.Request{
@@ -2214,6 +2246,8 @@ func TestSetApplicationSetStatusCondition(t *testing.T) {
scheme := runtime.NewScheme()
err := v1alpha1.AddToScheme(scheme)
require.NoError(t, err)
err = v1alpha1.AddToScheme(scheme)
require.NoError(t, err)
testCases := []struct {
appset v1alpha1.ApplicationSet
@@ -2246,7 +2280,6 @@ func TestSetApplicationSetStatusCondition(t *testing.T) {
},
},
testfunc: func(t *testing.T, appset v1alpha1.ApplicationSet) {
t.Helper()
assert.Len(t, appset.Status.Conditions, 3)
},
},
@@ -2282,7 +2315,6 @@ func TestSetApplicationSetStatusCondition(t *testing.T) {
},
},
testfunc: func(t *testing.T, appset v1alpha1.ApplicationSet) {
t.Helper()
assert.Len(t, appset.Status.Conditions, 3)
isProgressingCondition := false
@@ -2345,7 +2377,6 @@ func TestSetApplicationSetStatusCondition(t *testing.T) {
},
},
testfunc: func(t *testing.T, appset v1alpha1.ApplicationSet) {
t.Helper()
assert.Len(t, appset.Status.Conditions, 4)
isProgressingCondition := false
@@ -2364,6 +2395,7 @@ func TestSetApplicationSetStatusCondition(t *testing.T) {
kubeclientset := kubefake.NewSimpleClientset([]runtime.Object{}...)
argoDBMock := dbmocks.ArgoDB{}
argoObjs := []runtime.Object{}
for _, testCase := range testCases {
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&testCase.appset).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).WithStatusSubresource(&testCase.appset).Build()
@@ -2377,9 +2409,10 @@ func TestSetApplicationSetStatusCondition(t *testing.T) {
Generators: map[string]generators.Generator{
"List": generators.NewListGenerator(),
},
ArgoDB: &argoDBMock,
KubeClientset: kubeclientset,
Metrics: metrics,
ArgoDB: &argoDBMock,
ArgoAppClientset: appclientset.NewSimpleClientset(argoObjs...),
KubeClientset: kubeclientset,
Metrics: metrics,
}
for _, condition := range testCase.conditions {
@@ -2392,10 +2425,11 @@ func TestSetApplicationSetStatusCondition(t *testing.T) {
}
func applicationsUpdateSyncPolicyTest(t *testing.T, applicationsSyncPolicy v1alpha1.ApplicationsSyncPolicy, recordBuffer int, allowPolicyOverride bool) v1alpha1.Application {
t.Helper()
scheme := runtime.NewScheme()
err := v1alpha1.AddToScheme(scheme)
require.NoError(t, err)
err = v1alpha1.AddToScheme(scheme)
require.NoError(t, err)
defaultProject := v1alpha1.AppProject{
ObjectMeta: metav1.ObjectMeta{Name: "default", Namespace: "argocd"},
@@ -2435,8 +2469,9 @@ func applicationsUpdateSyncPolicyTest(t *testing.T, applicationsSyncPolicy v1alp
kubeclientset := kubefake.NewSimpleClientset()
argoDBMock := dbmocks.ArgoDB{}
argoObjs := []runtime.Object{&defaultProject}
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet, &defaultProject).WithStatusSubresource(&appSet).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet).WithStatusSubresource(&appSet).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()
metrics := appsetmetrics.NewFakeAppsetMetrics(client)
goodCluster := v1alpha1.Cluster{Server: "https://good-cluster", Name: "good-cluster"}
argoDBMock.On("GetCluster", mock.Anything, "https://good-cluster").Return(&goodCluster, nil)
@@ -2454,6 +2489,7 @@ func applicationsUpdateSyncPolicyTest(t *testing.T, applicationsSyncPolicy v1alp
},
ArgoDB: &argoDBMock,
ArgoCDNamespace: "argocd",
ArgoAppClientset: appclientset.NewSimpleClientset(argoObjs...),
KubeClientset: kubeclientset,
Policy: v1alpha1.ApplicationsSyncPolicySync,
EnablePolicyOverride: allowPolicyOverride,
@@ -2554,10 +2590,11 @@ func TestUpdatePerformedWithSyncPolicyCreateOnlyAndAllowPolicyOverrideFalse(t *t
}
func applicationsDeleteSyncPolicyTest(t *testing.T, applicationsSyncPolicy v1alpha1.ApplicationsSyncPolicy, recordBuffer int, allowPolicyOverride bool) v1alpha1.ApplicationList {
t.Helper()
scheme := runtime.NewScheme()
err := v1alpha1.AddToScheme(scheme)
require.NoError(t, err)
err = v1alpha1.AddToScheme(scheme)
require.NoError(t, err)
defaultProject := v1alpha1.AppProject{
ObjectMeta: metav1.ObjectMeta{Name: "default", Namespace: "argocd"},
@@ -2597,8 +2634,9 @@ func applicationsDeleteSyncPolicyTest(t *testing.T, applicationsSyncPolicy v1alp
kubeclientset := kubefake.NewSimpleClientset()
argoDBMock := dbmocks.ArgoDB{}
argoObjs := []runtime.Object{&defaultProject}
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet, &defaultProject).WithStatusSubresource(&appSet).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet).WithStatusSubresource(&appSet).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()
metrics := appsetmetrics.NewFakeAppsetMetrics(client)
goodCluster := v1alpha1.Cluster{Server: "https://good-cluster", Name: "good-cluster"}
argoDBMock.On("GetCluster", mock.Anything, "https://good-cluster").Return(&goodCluster, nil)
@@ -2616,6 +2654,7 @@ func applicationsDeleteSyncPolicyTest(t *testing.T, applicationsSyncPolicy v1alp
},
ArgoDB: &argoDBMock,
ArgoCDNamespace: "argocd",
ArgoAppClientset: appclientset.NewSimpleClientset(argoObjs...),
KubeClientset: kubeclientset,
Policy: v1alpha1.ApplicationsSyncPolicySync,
EnablePolicyOverride: allowPolicyOverride,
@@ -2713,6 +2752,9 @@ func TestPolicies(t *testing.T) {
err := v1alpha1.AddToScheme(scheme)
require.NoError(t, err)
err = v1alpha1.AddToScheme(scheme)
require.NoError(t, err)
defaultProject := v1alpha1.AppProject{
ObjectMeta: metav1.ObjectMeta{Name: "default", Namespace: "argocd"},
Spec: v1alpha1.AppProjectSpec{SourceRepos: []string{"*"}, Destinations: []v1alpha1.ApplicationDestination{{Namespace: "*", Server: "https://kubernetes.default.svc"}}},
@@ -2725,6 +2767,7 @@ func TestPolicies(t *testing.T) {
kubeclientset := kubefake.NewSimpleClientset()
argoDBMock := dbmocks.ArgoDB{}
argoDBMock.On("GetCluster", mock.Anything, "https://kubernetes.default.svc").Return(&myCluster, nil)
argoObjs := []runtime.Object{&defaultProject}
for _, c := range []struct {
name string
@@ -2796,7 +2839,7 @@ func TestPolicies(t *testing.T) {
},
}
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet, &defaultProject).WithStatusSubresource(&appSet).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet).WithStatusSubresource(&appSet).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()
metrics := appsetmetrics.NewFakeAppsetMetrics(client)
r := ApplicationSetReconciler{
@@ -2807,11 +2850,12 @@ func TestPolicies(t *testing.T) {
Generators: map[string]generators.Generator{
"List": generators.NewListGenerator(),
},
ArgoDB: &argoDBMock,
ArgoCDNamespace: "argocd",
KubeClientset: kubeclientset,
Policy: policy,
Metrics: metrics,
ArgoDB: &argoDBMock,
ArgoCDNamespace: "argocd",
ArgoAppClientset: appclientset.NewSimpleClientset(argoObjs...),
KubeClientset: kubeclientset,
Policy: policy,
Metrics: metrics,
}
req := ctrl.Request{
@@ -2879,9 +2923,12 @@ func TestSetApplicationSetApplicationStatus(t *testing.T) {
scheme := runtime.NewScheme()
err := v1alpha1.AddToScheme(scheme)
require.NoError(t, err)
err = v1alpha1.AddToScheme(scheme)
require.NoError(t, err)
kubeclientset := kubefake.NewSimpleClientset([]runtime.Object{}...)
argoDBMock := dbmocks.ArgoDB{}
argoObjs := []runtime.Object{}
for _, cc := range []struct {
name string
@@ -2965,9 +3012,10 @@ func TestSetApplicationSetApplicationStatus(t *testing.T) {
Generators: map[string]generators.Generator{
"List": generators.NewListGenerator(),
},
ArgoDB: &argoDBMock,
KubeClientset: kubeclientset,
Metrics: metrics,
ArgoDB: &argoDBMock,
ArgoAppClientset: appclientset.NewSimpleClientset(argoObjs...),
KubeClientset: kubeclientset,
Metrics: metrics,
}
err = r.setAppSetApplicationStatus(context.TODO(), log.NewEntry(log.StandardLogger()), &cc.appSet, cc.appStatuses)
@@ -2983,6 +3031,9 @@ func TestBuildAppDependencyList(t *testing.T) {
err := v1alpha1.AddToScheme(scheme)
require.NoError(t, err)
err = v1alpha1.AddToScheme(scheme)
require.NoError(t, err)
client := fake.NewClientBuilder().WithScheme(scheme).Build()
metrics := appsetmetrics.NewFakeAppsetMetrics(client)
@@ -3715,15 +3766,17 @@ func TestBuildAppDependencyList(t *testing.T) {
t.Run(cc.name, func(t *testing.T) {
kubeclientset := kubefake.NewSimpleClientset([]runtime.Object{}...)
argoDBMock := dbmocks.ArgoDB{}
argoObjs := []runtime.Object{}
r := ApplicationSetReconciler{
Client: client,
Scheme: scheme,
Recorder: record.NewFakeRecorder(1),
Generators: map[string]generators.Generator{},
ArgoDB: &argoDBMock,
KubeClientset: kubeclientset,
Metrics: metrics,
Client: client,
Scheme: scheme,
Recorder: record.NewFakeRecorder(1),
Generators: map[string]generators.Generator{},
ArgoDB: &argoDBMock,
ArgoAppClientset: appclientset.NewSimpleClientset(argoObjs...),
KubeClientset: kubeclientset,
Metrics: metrics,
}
appDependencyList, appStepMap := r.buildAppDependencyList(log.NewEntry(log.StandardLogger()), cc.appSet, cc.apps)
@@ -3738,6 +3791,9 @@ func TestBuildAppSyncMap(t *testing.T) {
err := v1alpha1.AddToScheme(scheme)
require.NoError(t, err)
err = v1alpha1.AddToScheme(scheme)
require.NoError(t, err)
client := fake.NewClientBuilder().WithScheme(scheme).Build()
metrics := appsetmetrics.NewFakeAppsetMetrics(client)
@@ -4382,15 +4438,17 @@ func TestBuildAppSyncMap(t *testing.T) {
t.Run(cc.name, func(t *testing.T) {
kubeclientset := kubefake.NewSimpleClientset([]runtime.Object{}...)
argoDBMock := dbmocks.ArgoDB{}
argoObjs := []runtime.Object{}
r := ApplicationSetReconciler{
Client: client,
Scheme: scheme,
Recorder: record.NewFakeRecorder(1),
Generators: map[string]generators.Generator{},
ArgoDB: &argoDBMock,
KubeClientset: kubeclientset,
Metrics: metrics,
Client: client,
Scheme: scheme,
Recorder: record.NewFakeRecorder(1),
Generators: map[string]generators.Generator{},
ArgoDB: &argoDBMock,
ArgoAppClientset: appclientset.NewSimpleClientset(argoObjs...),
KubeClientset: kubeclientset,
Metrics: metrics,
}
appSyncMap := r.buildAppSyncMap(cc.appSet, cc.appDependencyList, cc.appMap)
@@ -4404,6 +4462,9 @@ func TestUpdateApplicationSetApplicationStatus(t *testing.T) {
err := v1alpha1.AddToScheme(scheme)
require.NoError(t, err)
err = v1alpha1.AddToScheme(scheme)
require.NoError(t, err)
for _, cc := range []struct {
name string
appSet v1alpha1.ApplicationSet
@@ -5327,18 +5388,20 @@ func TestUpdateApplicationSetApplicationStatus(t *testing.T) {
t.Run(cc.name, func(t *testing.T) {
kubeclientset := kubefake.NewSimpleClientset([]runtime.Object{}...)
argoDBMock := dbmocks.ArgoDB{}
argoObjs := []runtime.Object{}
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&cc.appSet).WithStatusSubresource(&cc.appSet).Build()
metrics := appsetmetrics.NewFakeAppsetMetrics(client)
r := ApplicationSetReconciler{
Client: client,
Scheme: scheme,
Recorder: record.NewFakeRecorder(1),
Generators: map[string]generators.Generator{},
ArgoDB: &argoDBMock,
KubeClientset: kubeclientset,
Metrics: metrics,
Client: client,
Scheme: scheme,
Recorder: record.NewFakeRecorder(1),
Generators: map[string]generators.Generator{},
ArgoDB: &argoDBMock,
ArgoAppClientset: appclientset.NewSimpleClientset(argoObjs...),
KubeClientset: kubeclientset,
Metrics: metrics,
}
appStatuses, err := r.updateApplicationSetApplicationStatus(context.TODO(), log.NewEntry(log.StandardLogger()), &cc.appSet, cc.apps, cc.appStepMap)
@@ -5359,6 +5422,9 @@ func TestUpdateApplicationSetApplicationStatusProgress(t *testing.T) {
err := v1alpha1.AddToScheme(scheme)
require.NoError(t, err)
err = v1alpha1.AddToScheme(scheme)
require.NoError(t, err)
for _, cc := range []struct {
name string
appSet v1alpha1.ApplicationSet
@@ -6076,18 +6142,20 @@ func TestUpdateApplicationSetApplicationStatusProgress(t *testing.T) {
t.Run(cc.name, func(t *testing.T) {
kubeclientset := kubefake.NewSimpleClientset([]runtime.Object{}...)
argoDBMock := dbmocks.ArgoDB{}
argoObjs := []runtime.Object{}
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&cc.appSet).WithStatusSubresource(&cc.appSet).Build()
metrics := appsetmetrics.NewFakeAppsetMetrics(client)
r := ApplicationSetReconciler{
Client: client,
Scheme: scheme,
Recorder: record.NewFakeRecorder(1),
Generators: map[string]generators.Generator{},
ArgoDB: &argoDBMock,
KubeClientset: kubeclientset,
Metrics: metrics,
Client: client,
Scheme: scheme,
Recorder: record.NewFakeRecorder(1),
Generators: map[string]generators.Generator{},
ArgoDB: &argoDBMock,
ArgoAppClientset: appclientset.NewSimpleClientset(argoObjs...),
KubeClientset: kubeclientset,
Metrics: metrics,
}
appStatuses, err := r.updateApplicationSetApplicationStatusProgress(context.TODO(), log.NewEntry(log.StandardLogger()), &cc.appSet, cc.appSyncMap, cc.appStepMap)
@@ -6108,6 +6176,9 @@ func TestUpdateResourceStatus(t *testing.T) {
err := v1alpha1.AddToScheme(scheme)
require.NoError(t, err)
err = v1alpha1.AddToScheme(scheme)
require.NoError(t, err)
for _, cc := range []struct {
name string
appSet v1alpha1.ApplicationSet
@@ -6287,18 +6358,20 @@ func TestUpdateResourceStatus(t *testing.T) {
t.Run(cc.name, func(t *testing.T) {
kubeclientset := kubefake.NewSimpleClientset([]runtime.Object{}...)
argoDBMock := dbmocks.ArgoDB{}
argoObjs := []runtime.Object{}
client := fake.NewClientBuilder().WithScheme(scheme).WithStatusSubresource(&cc.appSet).WithObjects(&cc.appSet).Build()
metrics := appsetmetrics.NewFakeAppsetMetrics(client)
r := ApplicationSetReconciler{
Client: client,
Scheme: scheme,
Recorder: record.NewFakeRecorder(1),
Generators: map[string]generators.Generator{},
ArgoDB: &argoDBMock,
KubeClientset: kubeclientset,
Metrics: metrics,
Client: client,
Scheme: scheme,
Recorder: record.NewFakeRecorder(1),
Generators: map[string]generators.Generator{},
ArgoDB: &argoDBMock,
ArgoAppClientset: appclientset.NewSimpleClientset(argoObjs...),
KubeClientset: kubeclientset,
Metrics: metrics,
}
err := r.updateResourcesStatus(context.TODO(), log.NewEntry(log.StandardLogger()), &cc.appSet, cc.apps)
@@ -6377,18 +6450,20 @@ func TestResourceStatusAreOrdered(t *testing.T) {
t.Run(cc.name, func(t *testing.T) {
kubeclientset := kubefake.NewSimpleClientset([]runtime.Object{}...)
argoDBMock := dbmocks.ArgoDB{}
argoObjs := []runtime.Object{}
client := fake.NewClientBuilder().WithScheme(scheme).WithStatusSubresource(&cc.appSet).WithObjects(&cc.appSet).Build()
metrics := appsetmetrics.NewFakeAppsetMetrics(client)
r := ApplicationSetReconciler{
Client: client,
Scheme: scheme,
Recorder: record.NewFakeRecorder(1),
Generators: map[string]generators.Generator{},
ArgoDB: &argoDBMock,
KubeClientset: kubeclientset,
Metrics: metrics,
Client: client,
Scheme: scheme,
Recorder: record.NewFakeRecorder(1),
Generators: map[string]generators.Generator{},
ArgoDB: &argoDBMock,
ArgoAppClientset: appclientset.NewSimpleClientset(argoObjs...),
KubeClientset: kubeclientset,
Metrics: metrics,
}
err := r.updateResourcesStatus(context.TODO(), log.NewEntry(log.StandardLogger()), &cc.appSet, cc.apps)

View File

@@ -14,7 +14,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/event"
"github.com/argoproj/argo-cd/v2/common"
"github.com/argoproj/argo-cd/v2/applicationset/generators"
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
)
@@ -50,7 +50,7 @@ type addRateLimitingInterface[T comparable] interface {
func (h *clusterSecretEventHandler) queueRelatedAppGenerators(ctx context.Context, q addRateLimitingInterface[reconcile.Request], object client.Object) {
// Check for label, lookup all ApplicationSets that might match the cluster, queue them all
if object.GetLabels()[common.LabelKeySecretType] != common.LabelValueSecretTypeCluster {
if object.GetLabels()[generators.ArgoCDSecretTypeLabel] != generators.ArgoCDSecretTypeCluster {
return
}

View File

@@ -4,8 +4,6 @@ import (
"context"
"testing"
argocommon "github.com/argoproj/argo-cd/v2/common"
log "github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -18,6 +16,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"github.com/argoproj/argo-cd/v2/applicationset/generators"
argov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
)
@@ -43,7 +42,7 @@ func TestClusterEventHandler(t *testing.T) {
Namespace: "argocd",
Name: "my-secret",
Labels: map[string]string{
argocommon.LabelKeySecretType: argocommon.LabelValueSecretTypeCluster,
generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster,
},
},
},
@@ -71,7 +70,7 @@ func TestClusterEventHandler(t *testing.T) {
Namespace: "argocd",
Name: "my-secret",
Labels: map[string]string{
argocommon.LabelKeySecretType: argocommon.LabelValueSecretTypeCluster,
generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster,
},
},
},
@@ -114,7 +113,7 @@ func TestClusterEventHandler(t *testing.T) {
Namespace: "argocd",
Name: "my-secret",
Labels: map[string]string{
argocommon.LabelKeySecretType: argocommon.LabelValueSecretTypeCluster,
generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster,
},
},
},
@@ -158,7 +157,7 @@ func TestClusterEventHandler(t *testing.T) {
Namespace: "argocd",
Name: "my-secret",
Labels: map[string]string{
argocommon.LabelKeySecretType: argocommon.LabelValueSecretTypeCluster,
generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster,
},
},
},
@@ -219,7 +218,7 @@ func TestClusterEventHandler(t *testing.T) {
Namespace: "argocd",
Name: "my-secret",
Labels: map[string]string{
argocommon.LabelKeySecretType: argocommon.LabelValueSecretTypeCluster,
generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster,
},
},
},
@@ -255,7 +254,7 @@ func TestClusterEventHandler(t *testing.T) {
Namespace: "argocd",
Name: "my-secret",
Labels: map[string]string{
argocommon.LabelKeySecretType: argocommon.LabelValueSecretTypeCluster,
generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster,
},
},
},
@@ -305,7 +304,7 @@ func TestClusterEventHandler(t *testing.T) {
Namespace: "argocd",
Name: "my-secret",
Labels: map[string]string{
argocommon.LabelKeySecretType: argocommon.LabelValueSecretTypeCluster,
generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster,
},
},
},
@@ -356,7 +355,7 @@ func TestClusterEventHandler(t *testing.T) {
Namespace: "argocd",
Name: "my-secret",
Labels: map[string]string{
argocommon.LabelKeySecretType: argocommon.LabelValueSecretTypeCluster,
generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster,
},
},
},
@@ -390,7 +389,7 @@ func TestClusterEventHandler(t *testing.T) {
Namespace: "argocd",
Name: "my-secret",
Labels: map[string]string{
argocommon.LabelKeySecretType: argocommon.LabelValueSecretTypeCluster,
generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster,
},
},
},
@@ -426,7 +425,7 @@ func TestClusterEventHandler(t *testing.T) {
Namespace: "argocd",
Name: "my-secret",
Labels: map[string]string{
argocommon.LabelKeySecretType: argocommon.LabelValueSecretTypeCluster,
generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster,
},
},
},
@@ -476,7 +475,7 @@ func TestClusterEventHandler(t *testing.T) {
Namespace: "argocd",
Name: "my-secret",
Labels: map[string]string{
argocommon.LabelKeySecretType: argocommon.LabelValueSecretTypeCluster,
generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster,
},
},
},
@@ -527,7 +526,7 @@ func TestClusterEventHandler(t *testing.T) {
Namespace: "argocd",
Name: "my-secret",
Labels: map[string]string{
argocommon.LabelKeySecretType: argocommon.LabelValueSecretTypeCluster,
generators.ArgoCDSecretTypeLabel: generators.ArgoCDSecretTypeCluster,
},
},
},

View File

@@ -57,7 +57,7 @@ func TestRequeueAfter(t *testing.T) {
},
}
fakeDynClient := dynfake.NewSimpleDynamicClientWithCustomListKinds(runtime.NewScheme(), gvrToListKind, duckType)
scmConfig := generators.NewSCMConfig("", []string{""}, true, nil, true)
scmConfig := generators.NewSCMConfig("", []string{""}, true, nil)
terminalGenerators := map[string]generators.Generator{
"List": generators.NewListGenerator(),
"Clusters": generators.NewClusterGenerator(k8sClient, ctx, appClientset, "argocd"),
@@ -100,8 +100,7 @@ func TestRequeueAfter(t *testing.T) {
}
type args struct {
appset *argov1alpha1.ApplicationSet
requeueAfterOverride string
appset *argov1alpha1.ApplicationSet
}
tests := []struct {
name string
@@ -109,13 +108,11 @@ func TestRequeueAfter(t *testing.T) {
want time.Duration
wantErr assert.ErrorAssertionFunc
}{
{name: "Cluster", args: args{
appset: &argov1alpha1.ApplicationSet{
Spec: argov1alpha1.ApplicationSetSpec{
Generators: []argov1alpha1.ApplicationSetGenerator{{Clusters: &argov1alpha1.ClusterGenerator{}}},
},
}, requeueAfterOverride: "",
}, want: generators.NoRequeueAfter, wantErr: assert.NoError},
{name: "Cluster", args: args{appset: &argov1alpha1.ApplicationSet{
Spec: argov1alpha1.ApplicationSetSpec{
Generators: []argov1alpha1.ApplicationSetGenerator{{Clusters: &argov1alpha1.ClusterGenerator{}}},
},
}}, want: generators.NoRequeueAfter, wantErr: assert.NoError},
{name: "ClusterMergeNested", args: args{&argov1alpha1.ApplicationSet{
Spec: argov1alpha1.ApplicationSetSpec{
Generators: []argov1alpha1.ApplicationSetGenerator{
@@ -130,7 +127,7 @@ func TestRequeueAfter(t *testing.T) {
}},
},
},
}, ""}, want: generators.DefaultRequeueAfterSeconds, wantErr: assert.NoError},
}}, want: generators.DefaultRequeueAfterSeconds, wantErr: assert.NoError},
{name: "ClusterMatrixNested", args: args{&argov1alpha1.ApplicationSet{
Spec: argov1alpha1.ApplicationSetSpec{
Generators: []argov1alpha1.ApplicationSetGenerator{
@@ -145,65 +142,15 @@ func TestRequeueAfter(t *testing.T) {
}},
},
},
}, ""}, want: generators.DefaultRequeueAfterSeconds, wantErr: assert.NoError},
}}, want: generators.DefaultRequeueAfterSeconds, wantErr: assert.NoError},
{name: "ListGenerator", args: args{appset: &argov1alpha1.ApplicationSet{
Spec: argov1alpha1.ApplicationSetSpec{
Generators: []argov1alpha1.ApplicationSetGenerator{{List: &argov1alpha1.ListGenerator{}}},
},
}}, want: generators.NoRequeueAfter, wantErr: assert.NoError},
{name: "DuckGenerator", args: args{appset: &argov1alpha1.ApplicationSet{
Spec: argov1alpha1.ApplicationSetSpec{
Generators: []argov1alpha1.ApplicationSetGenerator{{ClusterDecisionResource: &argov1alpha1.DuckTypeGenerator{}}},
},
}}, want: generators.DefaultRequeueAfterSeconds, wantErr: assert.NoError},
{name: "OverrideRequeueDuck", args: args{
appset: &argov1alpha1.ApplicationSet{
Spec: argov1alpha1.ApplicationSetSpec{
Generators: []argov1alpha1.ApplicationSetGenerator{{ClusterDecisionResource: &argov1alpha1.DuckTypeGenerator{}}},
},
}, requeueAfterOverride: "1h",
}, want: 1 * time.Hour, wantErr: assert.NoError},
{name: "OverrideRequeueGit", args: args{&argov1alpha1.ApplicationSet{
Spec: argov1alpha1.ApplicationSetSpec{
Generators: []argov1alpha1.ApplicationSetGenerator{
{Git: &argov1alpha1.GitGenerator{}},
},
},
}, "1h"}, want: 1 * time.Hour, wantErr: assert.NoError},
{name: "OverrideRequeueMatrix", args: args{&argov1alpha1.ApplicationSet{
Spec: argov1alpha1.ApplicationSetSpec{
Generators: []argov1alpha1.ApplicationSetGenerator{
{Clusters: &argov1alpha1.ClusterGenerator{}},
{Merge: &argov1alpha1.MergeGenerator{
Generators: []argov1alpha1.ApplicationSetNestedGenerator{
{
Clusters: &argov1alpha1.ClusterGenerator{},
Git: &argov1alpha1.GitGenerator{},
},
},
}},
},
},
}, "5m"}, want: 5 * time.Minute, wantErr: assert.NoError},
{name: "OverrideRequeueMerge", args: args{&argov1alpha1.ApplicationSet{
Spec: argov1alpha1.ApplicationSetSpec{
Generators: []argov1alpha1.ApplicationSetGenerator{
{Clusters: &argov1alpha1.ClusterGenerator{}},
{Merge: &argov1alpha1.MergeGenerator{
Generators: []argov1alpha1.ApplicationSetNestedGenerator{
{
Clusters: &argov1alpha1.ClusterGenerator{},
Git: &argov1alpha1.GitGenerator{},
},
},
}},
},
},
}, "12s"}, want: 12 * time.Second, wantErr: assert.NoError},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Setenv("ARGOCD_APPLICATIONSET_CONTROLLER_REQUEUE_AFTER", tt.args.requeueAfterOverride)
assert.Equalf(t, tt.want, r.getMinRequeueAfter(tt.args.appset), "getMinRequeueAfter(%v)", tt.args.appset)
})
}

View File

@@ -2,7 +2,6 @@ package template
import (
"fmt"
"maps"
"testing"
"github.com/stretchr/testify/mock"
@@ -19,6 +18,7 @@ import (
rendmock "github.com/argoproj/argo-cd/v2/applicationset/utils/mocks"
"github.com/argoproj/argo-cd/v2/pkg/apis/application"
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/v2/util/collections"
)
func TestGenerateApplications(t *testing.T) {
@@ -344,7 +344,7 @@ func TestGenerateAppsUsingPullRequestGenerator(t *testing.T) {
assert.EqualValues(t, cases.expectedApp[0].ObjectMeta.Name, gotApp[0].ObjectMeta.Name)
assert.EqualValues(t, cases.expectedApp[0].Spec.Source.TargetRevision, gotApp[0].Spec.Source.TargetRevision)
assert.EqualValues(t, cases.expectedApp[0].Spec.Destination.Namespace, gotApp[0].Spec.Destination.Namespace)
assert.True(t, maps.Equal(cases.expectedApp[0].ObjectMeta.Labels, gotApp[0].ObjectMeta.Labels))
assert.True(t, collections.StringMapsEqual(cases.expectedApp[0].ObjectMeta.Labels, gotApp[0].ObjectMeta.Labels))
})
}
}

View File

@@ -15,10 +15,14 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/argoproj/argo-cd/v2/applicationset/utils"
"github.com/argoproj/argo-cd/v2/common"
argoappsetv1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
)
const (
ArgoCDSecretTypeLabel = "argocd.argoproj.io/secret-type"
ArgoCDSecretTypeCluster = "cluster"
)
var _ Generator = (*ClusterGenerator)(nil)
// ClusterGenerator generates Applications for some or all clusters registered with ArgoCD.
@@ -88,10 +92,6 @@ func (g *ClusterGenerator) GenerateParams(appSetGenerator *argoappsetv1alpha1.Ap
secretsFound := []corev1.Secret{}
isFlatMode := appSetGenerator.Clusters.FlatList
log.Debug("Using flat mode = ", isFlatMode, " for cluster generator")
clustersParams := make([]map[string]interface{}, 0)
for _, cluster := range clustersFromArgoCD.Items {
// If there is a secret for this cluster, then it's a non-local cluster, so it will be
// handled by the next step.
@@ -103,18 +103,13 @@ func (g *ClusterGenerator) GenerateParams(appSetGenerator *argoappsetv1alpha1.Ap
params["name"] = cluster.Name
params["nameNormalized"] = cluster.Name
params["server"] = cluster.Server
params["project"] = ""
err = appendTemplatedValues(appSetGenerator.Clusters.Values, params, appSet.Spec.GoTemplate, appSet.Spec.GoTemplateOptions)
if err != nil {
return nil, fmt.Errorf("error appending templated values for local cluster: %w", err)
}
if isFlatMode {
clustersParams = append(clustersParams, params)
} else {
res = append(res, params)
}
res = append(res, params)
log.WithField("cluster", "local cluster").Info("matched local cluster")
}
@@ -128,13 +123,6 @@ func (g *ClusterGenerator) GenerateParams(appSetGenerator *argoappsetv1alpha1.Ap
params["nameNormalized"] = utils.SanitizeName(string(cluster.Data["name"]))
params["server"] = string(cluster.Data["server"])
project, ok := cluster.Data["project"]
if ok {
params["project"] = string(project)
} else {
params["project"] = ""
}
if appSet.Spec.GoTemplate {
meta := map[string]interface{}{}
@@ -161,20 +149,11 @@ func (g *ClusterGenerator) GenerateParams(appSetGenerator *argoappsetv1alpha1.Ap
return nil, fmt.Errorf("error appending templated values for cluster: %w", err)
}
if isFlatMode {
clustersParams = append(clustersParams, params)
} else {
res = append(res, params)
}
res = append(res, params)
log.WithField("cluster", cluster.Name).Info("matched cluster secret")
}
if isFlatMode {
res = append(res, map[string]interface{}{
"clusters": clustersParams,
})
}
return res, nil
}
@@ -182,7 +161,7 @@ func (g *ClusterGenerator) getSecretsByClusterName(appSetGenerator *argoappsetv1
// List all Clusters:
clusterSecretList := &corev1.SecretList{}
selector := metav1.AddLabelToSelector(&appSetGenerator.Clusters.Selector, common.LabelKeySecretType, common.LabelValueSecretTypeCluster)
selector := metav1.AddLabelToSelector(&appSetGenerator.Clusters.Selector, ArgoCDSecretTypeLabel, ArgoCDSecretTypeCluster)
secretSelector, err := metav1.LabelSelectorAsSelector(selector)
if err != nil {
return nil, fmt.Errorf("error converting label selector: %w", err)

View File

@@ -76,20 +76,18 @@ func TestGenerateParams(t *testing.T) {
},
},
Data: map[string][]byte{
"config": []byte("{}"),
"name": []byte("production_01/west"),
"server": []byte("https://production-01.example.com"),
"project": []byte("prod-project"),
"config": []byte("{}"),
"name": []byte("production_01/west"),
"server": []byte("https://production-01.example.com"),
},
Type: corev1.SecretType("Opaque"),
},
}
testCases := []struct {
name string
selector metav1.LabelSelector
isFlatMode bool
values map[string]string
expected []map[string]interface{}
name string
selector metav1.LabelSelector
values map[string]string
expected []map[string]interface{}
// clientError is true if a k8s client error should be simulated
clientError bool
expectedError error
@@ -107,16 +105,17 @@ func TestGenerateParams(t *testing.T) {
"aaa": "{{ server }}",
"no-op": "{{ this-does-not-exist }}",
}, expected: []map[string]interface{}{
{"values.lol1": "lol", "values.lol2": "{{values.lol1}}{{values.lol1}}", "values.lol3": "{{values.lol2}}{{values.lol2}}{{values.lol2}}", "values.foo": "bar", "values.bar": "{{ metadata.annotations.foo.argoproj.io }}", "values.no-op": "{{ this-does-not-exist }}", "values.bat": "{{ metadata.labels.environment }}", "values.aaa": "https://kubernetes.default.svc", "nameNormalized": "in-cluster", "name": "in-cluster", "server": "https://kubernetes.default.svc", "project": ""},
{
"values.lol1": "lol", "values.lol2": "{{values.lol1}}{{values.lol1}}", "values.lol3": "{{values.lol2}}{{values.lol2}}{{values.lol2}}", "values.foo": "bar", "values.bar": "production", "values.no-op": "{{ this-does-not-exist }}", "values.bat": "production", "values.aaa": "https://production-01.example.com", "name": "production_01/west", "nameNormalized": "production-01-west", "server": "https://production-01.example.com", "metadata.labels.environment": "production", "metadata.labels.org": "bar",
"metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "production", "project": "prod-project",
"metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "production",
},
{
"values.lol1": "lol", "values.lol2": "{{values.lol1}}{{values.lol1}}", "values.lol3": "{{values.lol2}}{{values.lol2}}{{values.lol2}}", "values.foo": "bar", "values.bar": "staging", "values.no-op": "{{ this-does-not-exist }}", "values.bat": "staging", "values.aaa": "https://staging-01.example.com", "name": "staging-01", "nameNormalized": "staging-01", "server": "https://staging-01.example.com", "metadata.labels.environment": "staging", "metadata.labels.org": "foo",
"metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "staging", "project": "",
"metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "staging",
},
{"values.lol1": "lol", "values.lol2": "{{values.lol1}}{{values.lol1}}", "values.lol3": "{{values.lol2}}{{values.lol2}}{{values.lol2}}", "values.foo": "bar", "values.bar": "{{ metadata.annotations.foo.argoproj.io }}", "values.no-op": "{{ this-does-not-exist }}", "values.bat": "{{ metadata.labels.environment }}", "values.aaa": "https://kubernetes.default.svc", "nameNormalized": "in-cluster", "name": "in-cluster", "server": "https://kubernetes.default.svc"},
},
clientError: false,
expectedError: nil,
@@ -132,12 +131,12 @@ func TestGenerateParams(t *testing.T) {
expected: []map[string]interface{}{
{
"name": "production_01/west", "nameNormalized": "production-01-west", "server": "https://production-01.example.com", "metadata.labels.environment": "production", "metadata.labels.org": "bar",
"metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "production", "project": "prod-project",
"metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "production",
},
{
"name": "staging-01", "nameNormalized": "staging-01", "server": "https://staging-01.example.com", "metadata.labels.environment": "staging", "metadata.labels.org": "foo",
"metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "staging", "project": "",
"metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "staging",
},
},
clientError: false,
@@ -156,7 +155,7 @@ func TestGenerateParams(t *testing.T) {
expected: []map[string]interface{}{
{
"values.foo": "bar", "name": "production_01/west", "nameNormalized": "production-01-west", "server": "https://production-01.example.com", "metadata.labels.environment": "production", "metadata.labels.org": "bar",
"metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "production", "project": "prod-project",
"metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "production",
},
},
clientError: false,
@@ -182,11 +181,11 @@ func TestGenerateParams(t *testing.T) {
expected: []map[string]interface{}{
{
"values.foo": "bar", "name": "staging-01", "nameNormalized": "staging-01", "server": "https://staging-01.example.com", "metadata.labels.environment": "staging", "metadata.labels.org": "foo",
"metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "staging", "project": "",
"metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "staging",
},
{
"values.foo": "bar", "name": "production_01/west", "nameNormalized": "production-01-west", "server": "https://production-01.example.com", "metadata.labels.environment": "production", "metadata.labels.org": "bar",
"metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "production", "project": "prod-project",
"metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "production",
},
},
clientError: false,
@@ -215,7 +214,7 @@ func TestGenerateParams(t *testing.T) {
expected: []map[string]interface{}{
{
"values.name": "baz", "name": "staging-01", "nameNormalized": "staging-01", "server": "https://staging-01.example.com", "metadata.labels.environment": "staging", "metadata.labels.org": "foo",
"metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "staging", "project": "",
"metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "staging",
},
},
clientError: false,
@@ -229,74 +228,6 @@ func TestGenerateParams(t *testing.T) {
clientError: true,
expectedError: fmt.Errorf("error getting cluster secrets: could not list Secrets"),
},
{
name: "flat mode without selectors",
selector: metav1.LabelSelector{},
values: map[string]string{
"lol1": "lol",
"lol2": "{{values.lol1}}{{values.lol1}}",
"lol3": "{{values.lol2}}{{values.lol2}}{{values.lol2}}",
"foo": "bar",
"bar": "{{ metadata.annotations.foo.argoproj.io }}",
"bat": "{{ metadata.labels.environment }}",
"aaa": "{{ server }}",
"no-op": "{{ this-does-not-exist }}",
},
expected: []map[string]interface{}{
{
"clusters": []map[string]interface{}{
{"values.lol1": "lol", "values.lol2": "{{values.lol1}}{{values.lol1}}", "values.lol3": "{{values.lol2}}{{values.lol2}}{{values.lol2}}", "values.foo": "bar", "values.bar": "{{ metadata.annotations.foo.argoproj.io }}", "values.no-op": "{{ this-does-not-exist }}", "values.bat": "{{ metadata.labels.environment }}", "values.aaa": "https://kubernetes.default.svc", "nameNormalized": "in-cluster", "name": "in-cluster", "server": "https://kubernetes.default.svc", "project": ""},
{
"values.lol1": "lol", "values.lol2": "{{values.lol1}}{{values.lol1}}", "values.lol3": "{{values.lol2}}{{values.lol2}}{{values.lol2}}", "values.foo": "bar", "values.bar": "production", "values.no-op": "{{ this-does-not-exist }}", "values.bat": "production", "values.aaa": "https://production-01.example.com", "name": "production_01/west", "nameNormalized": "production-01-west", "server": "https://production-01.example.com", "metadata.labels.environment": "production", "metadata.labels.org": "bar",
"metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "production", "project": "prod-project",
},
{
"values.lol1": "lol", "values.lol2": "{{values.lol1}}{{values.lol1}}", "values.lol3": "{{values.lol2}}{{values.lol2}}{{values.lol2}}", "values.foo": "bar", "values.bar": "staging", "values.no-op": "{{ this-does-not-exist }}", "values.bat": "staging", "values.aaa": "https://staging-01.example.com", "name": "staging-01", "nameNormalized": "staging-01", "server": "https://staging-01.example.com", "metadata.labels.environment": "staging", "metadata.labels.org": "foo",
"metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "staging", "project": "",
},
},
},
},
isFlatMode: true,
clientError: false,
expectedError: nil,
},
{
name: "production or staging with flat mode",
selector: metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "environment",
Operator: "In",
Values: []string{
"production",
"staging",
},
},
},
},
isFlatMode: true,
values: map[string]string{
"foo": "bar",
},
expected: []map[string]interface{}{
{
"clusters": []map[string]interface{}{
{
"values.foo": "bar", "name": "production_01/west", "nameNormalized": "production-01-west", "server": "https://production-01.example.com", "metadata.labels.environment": "production", "metadata.labels.org": "bar",
"metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "production", "project": "prod-project",
},
{
"values.foo": "bar", "name": "staging-01", "nameNormalized": "staging-01", "server": "https://staging-01.example.com", "metadata.labels.environment": "staging", "metadata.labels.org": "foo",
"metadata.labels.argocd.argoproj.io/secret-type": "cluster", "metadata.annotations.foo.argoproj.io": "staging", "project": "",
},
},
},
},
clientError: false,
expectedError: nil,
},
}
// convert []client.Object to []runtime.Object, for use by kubefake package
@@ -328,7 +259,6 @@ func TestGenerateParams(t *testing.T) {
Clusters: &argoprojiov1alpha1.ClusterGenerator{
Selector: testCase.selector,
Values: testCase.values,
FlatList: testCase.isFlatMode,
},
}, &applicationSetInfo, nil)
@@ -394,11 +324,10 @@ func TestGenerateParamsGoTemplate(t *testing.T) {
},
}
testCases := []struct {
name string
selector metav1.LabelSelector
values map[string]string
isFlatMode bool
expected []map[string]interface{}
name string
selector metav1.LabelSelector
values map[string]string
expected []map[string]interface{}
// clientError is true if a k8s client error should be simulated
clientError bool
expectedError error
@@ -420,7 +349,6 @@ func TestGenerateParamsGoTemplate(t *testing.T) {
"name": "production_01/west",
"nameNormalized": "production-01-west",
"server": "https://production-01.example.com",
"project": "",
"metadata": map[string]interface{}{
"labels": map[string]string{
"argocd.argoproj.io/secret-type": "cluster",
@@ -446,7 +374,6 @@ func TestGenerateParamsGoTemplate(t *testing.T) {
"name": "staging-01",
"nameNormalized": "staging-01",
"server": "https://staging-01.example.com",
"project": "",
"metadata": map[string]interface{}{
"labels": map[string]string{
"argocd.argoproj.io/secret-type": "cluster",
@@ -472,7 +399,6 @@ func TestGenerateParamsGoTemplate(t *testing.T) {
"nameNormalized": "in-cluster",
"name": "in-cluster",
"server": "https://kubernetes.default.svc",
"project": "",
"values": map[string]string{
"lol1": "lol",
"lol2": "<no value><no value>",
@@ -501,7 +427,6 @@ func TestGenerateParamsGoTemplate(t *testing.T) {
"name": "production_01/west",
"nameNormalized": "production-01-west",
"server": "https://production-01.example.com",
"project": "",
"metadata": map[string]interface{}{
"labels": map[string]string{
"argocd.argoproj.io/secret-type": "cluster",
@@ -517,7 +442,6 @@ func TestGenerateParamsGoTemplate(t *testing.T) {
"name": "staging-01",
"nameNormalized": "staging-01",
"server": "https://staging-01.example.com",
"project": "",
"metadata": map[string]interface{}{
"labels": map[string]string{
"argocd.argoproj.io/secret-type": "cluster",
@@ -548,7 +472,6 @@ func TestGenerateParamsGoTemplate(t *testing.T) {
"name": "production_01/west",
"nameNormalized": "production-01-west",
"server": "https://production-01.example.com",
"project": "",
"metadata": map[string]interface{}{
"labels": map[string]string{
"argocd.argoproj.io/secret-type": "cluster",
@@ -589,7 +512,6 @@ func TestGenerateParamsGoTemplate(t *testing.T) {
"name": "production_01/west",
"nameNormalized": "production-01-west",
"server": "https://production-01.example.com",
"project": "",
"metadata": map[string]interface{}{
"labels": map[string]string{
"argocd.argoproj.io/secret-type": "cluster",
@@ -608,7 +530,6 @@ func TestGenerateParamsGoTemplate(t *testing.T) {
"name": "staging-01",
"nameNormalized": "staging-01",
"server": "https://staging-01.example.com",
"project": "",
"metadata": map[string]interface{}{
"labels": map[string]string{
"argocd.argoproj.io/secret-type": "cluster",
@@ -652,7 +573,6 @@ func TestGenerateParamsGoTemplate(t *testing.T) {
"name": "staging-01",
"nameNormalized": "staging-01",
"server": "https://staging-01.example.com",
"project": "",
"metadata": map[string]interface{}{
"labels": map[string]string{
"argocd.argoproj.io/secret-type": "cluster",
@@ -679,162 +599,6 @@ func TestGenerateParamsGoTemplate(t *testing.T) {
clientError: true,
expectedError: fmt.Errorf("error getting cluster secrets: could not list Secrets"),
},
{
name: "Clusters with flat list mode and no selector",
selector: metav1.LabelSelector{},
isFlatMode: true,
values: map[string]string{
"lol1": "lol",
"lol2": "{{ .values.lol1 }}{{ .values.lol1 }}",
"lol3": "{{ .values.lol2 }}{{ .values.lol2 }}{{ .values.lol2 }}",
"foo": "bar",
"bar": "{{ if not (empty .metadata) }}{{index .metadata.annotations \"foo.argoproj.io\" }}{{ end }}",
"bat": "{{ if not (empty .metadata) }}{{.metadata.labels.environment}}{{ end }}",
"aaa": "{{ .server }}",
"no-op": "{{ .thisDoesNotExist }}",
},
expected: []map[string]interface{}{
{
"clusters": []map[string]interface{}{
{
"nameNormalized": "in-cluster",
"name": "in-cluster",
"server": "https://kubernetes.default.svc",
"project": "",
"values": map[string]string{
"lol1": "lol",
"lol2": "<no value><no value>",
"lol3": "<no value><no value><no value>",
"foo": "bar",
"bar": "",
"bat": "",
"aaa": "https://kubernetes.default.svc",
"no-op": "<no value>",
},
},
{
"name": "production_01/west",
"nameNormalized": "production-01-west",
"server": "https://production-01.example.com",
"project": "",
"metadata": map[string]interface{}{
"labels": map[string]string{
"argocd.argoproj.io/secret-type": "cluster",
"environment": "production",
"org": "bar",
},
"annotations": map[string]string{
"foo.argoproj.io": "production",
},
},
"values": map[string]string{
"lol1": "lol",
"lol2": "<no value><no value>",
"lol3": "<no value><no value><no value>",
"foo": "bar",
"bar": "production",
"bat": "production",
"aaa": "https://production-01.example.com",
"no-op": "<no value>",
},
},
{
"name": "staging-01",
"nameNormalized": "staging-01",
"server": "https://staging-01.example.com",
"project": "",
"metadata": map[string]interface{}{
"labels": map[string]string{
"argocd.argoproj.io/secret-type": "cluster",
"environment": "staging",
"org": "foo",
},
"annotations": map[string]string{
"foo.argoproj.io": "staging",
},
},
"values": map[string]string{
"lol1": "lol",
"lol2": "<no value><no value>",
"lol3": "<no value><no value><no value>",
"foo": "bar",
"bar": "staging",
"bat": "staging",
"aaa": "https://staging-01.example.com",
"no-op": "<no value>",
},
},
},
},
},
clientError: false,
expectedError: nil,
},
{
name: "production or staging with flat mode",
selector: metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "environment",
Operator: "In",
Values: []string{
"production",
"staging",
},
},
},
},
isFlatMode: true,
values: map[string]string{
"foo": "bar",
},
expected: []map[string]interface{}{
{
"clusters": []map[string]interface{}{
{
"name": "production_01/west",
"nameNormalized": "production-01-west",
"server": "https://production-01.example.com",
"project": "",
"metadata": map[string]interface{}{
"labels": map[string]string{
"argocd.argoproj.io/secret-type": "cluster",
"environment": "production",
"org": "bar",
},
"annotations": map[string]string{
"foo.argoproj.io": "production",
},
},
"values": map[string]string{
"foo": "bar",
},
},
{
"name": "staging-01",
"nameNormalized": "staging-01",
"server": "https://staging-01.example.com",
"project": "",
"metadata": map[string]interface{}{
"labels": map[string]string{
"argocd.argoproj.io/secret-type": "cluster",
"environment": "staging",
"org": "foo",
},
"annotations": map[string]string{
"foo.argoproj.io": "staging",
},
},
"values": map[string]string{
"foo": "bar",
},
},
},
},
},
clientError: false,
expectedError: nil,
},
}
// convert []client.Object to []runtime.Object, for use by kubefake package
@@ -868,7 +632,6 @@ func TestGenerateParamsGoTemplate(t *testing.T) {
Clusters: &argoprojiov1alpha1.ClusterGenerator{
Selector: testCase.selector,
Values: testCase.values,
FlatList: testCase.isFlatMode,
},
}, &applicationSetInfo, nil)

View File

@@ -52,7 +52,7 @@ func (g *DuckTypeGenerator) GetRequeueAfter(appSetGenerator *argoprojiov1alpha1.
return time.Duration(*appSetGenerator.ClusterDecisionResource.RequeueAfterSeconds) * time.Second
}
return getDefaultRequeueAfter()
return DefaultRequeueAfterSeconds
}
func (g *DuckTypeGenerator) GetTemplate(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) *argoprojiov1alpha1.ApplicationSetTemplate {

View File

@@ -199,7 +199,6 @@ func TestTransForm(t *testing.T) {
"name": "production_01/west",
"nameNormalized": "production-01-west",
"server": "https://production-01.example.com",
"project": "",
}},
},
{
@@ -215,7 +214,6 @@ func TestTransForm(t *testing.T) {
"name": "some-really-long-server-url",
"nameNormalized": "some-really-long-server-url",
"server": "https://some-really-long-url-that-will-exceed-63-characters.com",
"project": "",
}},
},
}

View File

@@ -48,7 +48,7 @@ func (g *GitGenerator) GetRequeueAfter(appSetGenerator *argoprojiov1alpha1.Appli
return time.Duration(*appSetGenerator.Git.RequeueAfterSeconds) * time.Second
}
return getDefaultRequeueAfter()
return DefaultRequeueAfterSeconds
}
func (g *GitGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator, appSet *argoprojiov1alpha1.ApplicationSet, client client.Client) ([]map[string]interface{}, error) {

View File

@@ -7,7 +7,6 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/v2/util/env"
)
// Generator defines the interface implemented by all ApplicationSet generators.
@@ -31,11 +30,7 @@ var (
NoRequeueAfter time.Duration
)
// DefaultRequeueAfterSeconds is used when GetRequeueAfter is not specified, it is the default time to wait before the next reconcile loop
const (
DefaultRequeueAfterSeconds = 3 * time.Minute
)
func getDefaultRequeueAfter() time.Duration {
// Default is 3 minutes, min is 1 second, max is 1 year
return env.ParseDurationFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_REQUEUE_AFTER", DefaultRequeueAfterSeconds, 1*time.Second, 8760*time.Hour)
}

View File

@@ -1,29 +0,0 @@
package generators
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func Test_getDefaultRequeueAfter(t *testing.T) {
tests := []struct {
name string
requeueAfterEnv string
want time.Duration
}{
{name: "Default", requeueAfterEnv: "", want: DefaultRequeueAfterSeconds},
{name: "Min", requeueAfterEnv: "1s", want: 1 * time.Second},
{name: "Max", requeueAfterEnv: "8760h", want: 8760 * time.Hour},
{name: "Override", requeueAfterEnv: "10m", want: 10 * time.Minute},
{name: "LessThanMin", requeueAfterEnv: "1ms", want: DefaultRequeueAfterSeconds},
{name: "MoreThanMax", requeueAfterEnv: "8761h", want: DefaultRequeueAfterSeconds},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Setenv("ARGOCD_APPLICATIONSET_CONTROLLER_REQUEUE_AFTER", tt.requeueAfterEnv)
assert.Equalf(t, tt.want, getDefaultRequeueAfter(), "getDefaultRequeueAfter()")
})
}
}

View File

@@ -578,8 +578,8 @@ func TestInterpolatedMatrixGenerate(t *testing.T) {
},
},
expected: []map[string]interface{}{
{"path": "examples/git-generator-files-discovery/cluster-config/dev/config.json", "path.basename": "dev", "path.basenameNormalized": "dev", "name": "dev-01", "nameNormalized": "dev-01", "server": "https://dev-01.example.com", "metadata.labels.environment": "dev", "metadata.labels.argocd.argoproj.io/secret-type": "cluster", "project": ""},
{"path": "examples/git-generator-files-discovery/cluster-config/prod/config.json", "path.basename": "prod", "path.basenameNormalized": "prod", "name": "prod-01", "nameNormalized": "prod-01", "server": "https://prod-01.example.com", "metadata.labels.environment": "prod", "metadata.labels.argocd.argoproj.io/secret-type": "cluster", "project": ""},
{"path": "examples/git-generator-files-discovery/cluster-config/dev/config.json", "path.basename": "dev", "path.basenameNormalized": "dev", "name": "dev-01", "nameNormalized": "dev-01", "server": "https://dev-01.example.com", "metadata.labels.environment": "dev", "metadata.labels.argocd.argoproj.io/secret-type": "cluster"},
{"path": "examples/git-generator-files-discovery/cluster-config/prod/config.json", "path.basename": "prod", "path.basenameNormalized": "prod", "name": "prod-01", "nameNormalized": "prod-01", "server": "https://prod-01.example.com", "metadata.labels.environment": "prod", "metadata.labels.argocd.argoproj.io/secret-type": "cluster"},
},
clientError: false,
},
@@ -734,7 +734,6 @@ func TestInterpolatedMatrixGenerateGoTemplate(t *testing.T) {
"name": "dev-01",
"nameNormalized": "dev-01",
"server": "https://dev-01.example.com",
"project": "",
"metadata": map[string]interface{}{
"labels": map[string]string{
"environment": "dev",
@@ -751,7 +750,6 @@ func TestInterpolatedMatrixGenerateGoTemplate(t *testing.T) {
"name": "prod-01",
"nameNormalized": "prod-01",
"server": "https://prod-01.example.com",
"project": "",
"metadata": map[string]interface{}{
"labels": map[string]string{
"environment": "prod",

View File

@@ -197,7 +197,6 @@ func TestMergeGenerate(t *testing.T) {
}
func toAPIExtensionsJSON(t *testing.T, g interface{}) *apiextensionsv1.JSON {
t.Helper()
resVal, err := json.Marshal(g)
if err != nil {
t.Error("unable to unmarshal json", g)

View File

@@ -139,7 +139,7 @@ func (g *PullRequestGenerator) selectServiceProvider(ctx context.Context, genera
return nil, fmt.Errorf("error fetching CA certificates from ConfigMap: %w", prErr)
}
}
token, err := utils.GetSecretRef(ctx, g.client, providerConfig.TokenRef, applicationSetInfo.Namespace, g.tokenRefStrictMode)
token, err := utils.GetSecretRef(ctx, g.client, providerConfig.TokenRef, applicationSetInfo.Namespace)
if err != nil {
return nil, fmt.Errorf("error fetching Secret token: %w", err)
}
@@ -147,7 +147,7 @@ func (g *PullRequestGenerator) selectServiceProvider(ctx context.Context, genera
}
if generatorConfig.Gitea != nil {
providerConfig := generatorConfig.Gitea
token, err := utils.GetSecretRef(ctx, g.client, providerConfig.TokenRef, applicationSetInfo.Namespace, g.tokenRefStrictMode)
token, err := utils.GetSecretRef(ctx, g.client, providerConfig.TokenRef, applicationSetInfo.Namespace)
if err != nil {
return nil, fmt.Errorf("error fetching Secret token: %w", err)
}
@@ -164,13 +164,13 @@ func (g *PullRequestGenerator) selectServiceProvider(ctx context.Context, genera
}
}
if providerConfig.BearerToken != nil {
appToken, err := utils.GetSecretRef(ctx, g.client, providerConfig.BearerToken.TokenRef, applicationSetInfo.Namespace, g.tokenRefStrictMode)
appToken, err := utils.GetSecretRef(ctx, g.client, providerConfig.BearerToken.TokenRef, applicationSetInfo.Namespace)
if err != nil {
return nil, fmt.Errorf("error fetching Secret Bearer token: %w", err)
}
return pullrequest.NewBitbucketServiceBearerToken(ctx, appToken, providerConfig.API, providerConfig.Project, providerConfig.Repo, g.scmRootCAPath, providerConfig.Insecure, caCerts)
} else if providerConfig.BasicAuth != nil {
password, err := utils.GetSecretRef(ctx, g.client, providerConfig.BasicAuth.PasswordRef, applicationSetInfo.Namespace, g.tokenRefStrictMode)
password, err := utils.GetSecretRef(ctx, g.client, providerConfig.BasicAuth.PasswordRef, applicationSetInfo.Namespace)
if err != nil {
return nil, fmt.Errorf("error fetching Secret token: %w", err)
}
@@ -182,13 +182,13 @@ func (g *PullRequestGenerator) selectServiceProvider(ctx context.Context, genera
if generatorConfig.Bitbucket != nil {
providerConfig := generatorConfig.Bitbucket
if providerConfig.BearerToken != nil {
appToken, err := utils.GetSecretRef(ctx, g.client, providerConfig.BearerToken.TokenRef, applicationSetInfo.Namespace, g.tokenRefStrictMode)
appToken, err := utils.GetSecretRef(ctx, g.client, providerConfig.BearerToken.TokenRef, applicationSetInfo.Namespace)
if err != nil {
return nil, fmt.Errorf("error fetching Secret Bearer token: %w", err)
}
return pullrequest.NewBitbucketCloudServiceBearerToken(providerConfig.API, appToken, providerConfig.Owner, providerConfig.Repo)
} else if providerConfig.BasicAuth != nil {
password, err := utils.GetSecretRef(ctx, g.client, providerConfig.BasicAuth.PasswordRef, applicationSetInfo.Namespace, g.tokenRefStrictMode)
password, err := utils.GetSecretRef(ctx, g.client, providerConfig.BasicAuth.PasswordRef, applicationSetInfo.Namespace)
if err != nil {
return nil, fmt.Errorf("error fetching Secret token: %w", err)
}
@@ -199,7 +199,7 @@ func (g *PullRequestGenerator) selectServiceProvider(ctx context.Context, genera
}
if generatorConfig.AzureDevOps != nil {
providerConfig := generatorConfig.AzureDevOps
token, err := utils.GetSecretRef(ctx, g.client, providerConfig.TokenRef, applicationSetInfo.Namespace, g.tokenRefStrictMode)
token, err := utils.GetSecretRef(ctx, g.client, providerConfig.TokenRef, applicationSetInfo.Namespace)
if err != nil {
return nil, fmt.Errorf("error fetching Secret token: %w", err)
}
@@ -219,7 +219,7 @@ func (g *PullRequestGenerator) github(ctx context.Context, cfg *argoprojiov1alph
}
// always default to token, even if not set (public access)
token, err := utils.GetSecretRef(ctx, g.client, cfg.TokenRef, applicationSetInfo.Namespace, g.tokenRefStrictMode)
token, err := utils.GetSecretRef(ctx, g.client, cfg.TokenRef, applicationSetInfo.Namespace)
if err != nil {
return nil, fmt.Errorf("error fetching Secret token: %w", err)
}

View File

@@ -283,7 +283,7 @@ func TestAllowedSCMProviderPullRequest(t *testing.T) {
"gitea.myorg.com",
"bitbucket.myorg.com",
"azuredevops.myorg.com",
}, true, nil, true))
}, true, nil))
applicationSetInfo := argoprojiov1alpha1.ApplicationSet{
ObjectMeta: metav1.ObjectMeta{
@@ -306,7 +306,7 @@ func TestAllowedSCMProviderPullRequest(t *testing.T) {
}
func TestSCMProviderDisabled_PRGenerator(t *testing.T) {
generator := NewPullRequestGenerator(nil, NewSCMConfig("", []string{}, false, nil, true))
generator := NewPullRequestGenerator(nil, NewSCMConfig("", []string{}, false, nil))
applicationSetInfo := argoprojiov1alpha1.ApplicationSet{
ObjectMeta: metav1.ObjectMeta{

View File

@@ -35,16 +35,14 @@ type SCMConfig struct {
allowedSCMProviders []string
enableSCMProviders bool
GitHubApps github_app_auth.Credentials
tokenRefStrictMode bool
}
func NewSCMConfig(scmRootCAPath string, allowedSCMProviders []string, enableSCMProviders bool, gitHubApps github_app_auth.Credentials, tokenRefStrictMode bool) SCMConfig {
func NewSCMConfig(scmRootCAPath string, allowedSCMProviders []string, enableSCMProviders bool, gitHubApps github_app_auth.Credentials) SCMConfig {
return SCMConfig{
scmRootCAPath: scmRootCAPath,
allowedSCMProviders: allowedSCMProviders,
enableSCMProviders: enableSCMProviders,
GitHubApps: gitHubApps,
tokenRefStrictMode: tokenRefStrictMode,
}
}
@@ -156,7 +154,7 @@ func (g *SCMProviderGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha
return nil, fmt.Errorf("error fetching CA certificates from ConfigMap: %w", scmError)
}
}
token, err := utils.GetSecretRef(ctx, g.client, providerConfig.TokenRef, applicationSetInfo.Namespace, g.tokenRefStrictMode)
token, err := utils.GetSecretRef(ctx, g.client, providerConfig.TokenRef, applicationSetInfo.Namespace)
if err != nil {
return nil, fmt.Errorf("error fetching Gitlab token: %w", err)
}
@@ -165,7 +163,7 @@ func (g *SCMProviderGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha
return nil, fmt.Errorf("error initializing Gitlab service: %w", err)
}
} else if providerConfig.Gitea != nil {
token, err := utils.GetSecretRef(ctx, g.client, providerConfig.Gitea.TokenRef, applicationSetInfo.Namespace, g.tokenRefStrictMode)
token, err := utils.GetSecretRef(ctx, g.client, providerConfig.Gitea.TokenRef, applicationSetInfo.Namespace)
if err != nil {
return nil, fmt.Errorf("error fetching Gitea token: %w", err)
}
@@ -184,13 +182,13 @@ func (g *SCMProviderGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha
}
}
if providerConfig.BearerToken != nil {
appToken, err := utils.GetSecretRef(ctx, g.client, providerConfig.BearerToken.TokenRef, applicationSetInfo.Namespace, g.tokenRefStrictMode)
appToken, err := utils.GetSecretRef(ctx, g.client, providerConfig.BearerToken.TokenRef, applicationSetInfo.Namespace)
if err != nil {
return nil, fmt.Errorf("error fetching Secret Bearer token: %w", err)
}
provider, scmError = scm_provider.NewBitbucketServerProviderBearerToken(ctx, appToken, providerConfig.API, providerConfig.Project, providerConfig.AllBranches, g.scmRootCAPath, providerConfig.Insecure, caCerts)
} else if providerConfig.BasicAuth != nil {
password, err := utils.GetSecretRef(ctx, g.client, providerConfig.BasicAuth.PasswordRef, applicationSetInfo.Namespace, g.tokenRefStrictMode)
password, err := utils.GetSecretRef(ctx, g.client, providerConfig.BasicAuth.PasswordRef, applicationSetInfo.Namespace)
if err != nil {
return nil, fmt.Errorf("error fetching Secret token: %w", err)
}
@@ -202,7 +200,7 @@ func (g *SCMProviderGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha
return nil, fmt.Errorf("error initializing Bitbucket Server service: %w", scmError)
}
} else if providerConfig.AzureDevOps != nil {
token, err := utils.GetSecretRef(ctx, g.client, providerConfig.AzureDevOps.AccessTokenRef, applicationSetInfo.Namespace, g.tokenRefStrictMode)
token, err := utils.GetSecretRef(ctx, g.client, providerConfig.AzureDevOps.AccessTokenRef, applicationSetInfo.Namespace)
if err != nil {
return nil, fmt.Errorf("error fetching Azure Devops access token: %w", err)
}
@@ -211,7 +209,7 @@ func (g *SCMProviderGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha
return nil, fmt.Errorf("error initializing Azure Devops service: %w", err)
}
} else if providerConfig.Bitbucket != nil {
appPassword, err := utils.GetSecretRef(ctx, g.client, providerConfig.Bitbucket.AppPasswordRef, applicationSetInfo.Namespace, g.tokenRefStrictMode)
appPassword, err := utils.GetSecretRef(ctx, g.client, providerConfig.Bitbucket.AppPasswordRef, applicationSetInfo.Namespace)
if err != nil {
return nil, fmt.Errorf("error fetching Bitbucket cloud appPassword: %w", err)
}
@@ -285,7 +283,7 @@ func (g *SCMProviderGenerator) githubProvider(ctx context.Context, github *argop
)
}
token, err := utils.GetSecretRef(ctx, g.client, github.TokenRef, applicationSetInfo.Namespace, g.tokenRefStrictMode)
token, err := utils.GetSecretRef(ctx, g.client, github.TokenRef, applicationSetInfo.Namespace)
if err != nil {
return nil, fmt.Errorf("error fetching Github token: %w", err)
}

View File

@@ -178,7 +178,7 @@ func TestApplicationsetCollector(t *testing.T) {
appsetCollector := newAppsetCollector(utils.NewAppsetLister(client), collectedLabels, filter)
metrics.Registry.MustRegister(appsetCollector)
req, err := http.NewRequest(http.MethodGet, "/metrics", nil)
req, err := http.NewRequest("GET", "/metrics", nil)
require.NoError(t, err)
rr := httptest.NewRecorder()
handler := promhttp.HandlerFor(metrics.Registry, promhttp.HandlerOpts{})
@@ -220,7 +220,7 @@ func TestObserveReconcile(t *testing.T) {
appsetMetrics := NewApplicationsetMetrics(utils.NewAppsetLister(client), collectedLabels, filter)
req, err := http.NewRequest(http.MethodGet, "/metrics", nil)
req, err := http.NewRequest("GET", "/metrics", nil)
require.NoError(t, err)
rr := httptest.NewRecorder()
handler := promhttp.HandlerFor(metrics.Registry, promhttp.HandlerOpts{})

View File

@@ -134,7 +134,7 @@ func (c *Client) Do(ctx context.Context, req *http.Request, v interface{}) (*htt
// CheckResponse checks the API response for errors, and returns them if present.
func CheckResponse(resp *http.Response) error {
if c := resp.StatusCode; http.StatusOK <= c && c < http.StatusMultipleChoices {
if c := resp.StatusCode; 200 <= c && c <= 299 {
return nil
}

View File

@@ -77,7 +77,7 @@ func TestClientDo(t *testing.T) {
"key3": float64(123),
},
},
expectedCode: http.StatusOK,
expectedCode: 200,
expectedError: nil,
},
{
@@ -109,7 +109,7 @@ func TestClientDo(t *testing.T) {
})),
clientOptionFns: nil,
expected: []map[string]interface{}(nil),
expectedCode: http.StatusUnauthorized,
expectedCode: 401,
expectedError: fmt.Errorf("API error with status code 401: "),
},
} {

View File

@@ -82,7 +82,6 @@ func (a *AzureDevOpsService) List(ctx context.Context) ([]*PullRequest, error) {
pr.Repository.Name == nil ||
pr.PullRequestId == nil ||
pr.SourceRefName == nil ||
pr.TargetRefName == nil ||
pr.LastMergeSourceCommit == nil ||
pr.LastMergeSourceCommit.CommitId == nil {
continue
@@ -95,13 +94,12 @@ func (a *AzureDevOpsService) List(ctx context.Context) ([]*PullRequest, error) {
if *pr.Repository.Name == a.repo {
pullRequests = append(pullRequests, &PullRequest{
Number: *pr.PullRequestId,
Title: *pr.Title,
Branch: strings.Replace(*pr.SourceRefName, "refs/heads/", "", 1),
TargetBranch: strings.Replace(*pr.TargetRefName, "refs/heads/", "", 1),
HeadSHA: *pr.LastMergeSourceCommit.CommitId,
Labels: azureDevOpsLabels,
Author: strings.Split(*pr.CreatedBy.UniqueName, "@")[0], // Get the part before the @ in the email-address
Number: *pr.PullRequestId,
Title: *pr.Title,
Branch: strings.Replace(*pr.SourceRefName, "refs/heads/", "", 1),
HeadSHA: *pr.LastMergeSourceCommit.CommitId,
Labels: azureDevOpsLabels,
Author: strings.Split(*pr.CreatedBy.UniqueName, "@")[0], // Get the part before the @ in the email-address
})
}
}

View File

@@ -72,7 +72,6 @@ func TestListPullRequest(t *testing.T) {
PullRequestId: createIntPtr(pr_id),
Title: createStringPtr(pr_title),
SourceRefName: createStringPtr("refs/heads/feature-branch"),
TargetRefName: createStringPtr("refs/heads/main"),
LastMergeSourceCommit: &git.GitCommitRef{
CommitId: createStringPtr(pr_head_sha),
},
@@ -107,7 +106,6 @@ func TestListPullRequest(t *testing.T) {
require.NoError(t, err)
assert.Len(t, list, 1)
assert.Equal(t, "feature-branch", list[0].Branch)
assert.Equal(t, "main", list[0].TargetBranch)
assert.Equal(t, pr_head_sha, list[0].HeadSHA)
assert.Equal(t, "feat(123)", list[0].Title)
assert.Equal(t, pr_id, list[0].Number)

View File

@@ -15,7 +15,6 @@ import (
)
func defaultHandlerCloud(t *testing.T) func(http.ResponseWriter, *http.Request) {
t.Helper()
return func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
var err error
@@ -234,7 +233,7 @@ func TestListPullRequestPaginationCloud(t *testing.T) {
func TestListResponseErrorCloud(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusInternalServerError)
w.WriteHeader(500)
}))
defer ts.Close()
svc, _ := NewBitbucketCloudServiceNoAuth(ts.URL, "OWNER", "REPO")

View File

@@ -16,7 +16,6 @@ import (
)
func defaultHandler(t *testing.T) func(http.ResponseWriter, *http.Request) {
t.Helper()
return func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
var err error

View File

@@ -14,7 +14,6 @@ import (
)
func giteaMockHandler(t *testing.T) func(http.ResponseWriter, *http.Request) {
t.Helper()
return func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
fmt.Println(r.RequestURI)

View File

@@ -15,7 +15,6 @@ import (
)
func writeMRListResponse(t *testing.T, w io.Writer) {
t.Helper()
f, err := os.Open("fixtures/gitlab_mr_list_response.json")
if err != nil {
t.Fatalf("error opening fixture file: %v", err)

View File

@@ -129,7 +129,7 @@ func (b *BitbucketServerProvider) RepoHasPath(_ context.Context, repo *Repositor
}
// No need to query for all pages here
response, err := b.client.DefaultApi.GetContent_0(repo.Organization, repo.Repository, path, opts)
if response != nil && response.StatusCode == http.StatusNotFound {
if response != nil && response.StatusCode == 404 {
// File/directory not found
return false, nil
}
@@ -203,7 +203,7 @@ func (b *BitbucketServerProvider) getDefaultBranch(org string, repo string) (*bi
response, err := b.client.DefaultApi.GetDefaultBranch(org, repo)
// The API will return 404 if a default branch is set but doesn't exist. In case the repo is empty and default branch is unset,
// we will get an EOF and a nil response.
if (response != nil && response.StatusCode == http.StatusNotFound) || (response == nil && err != nil && errors.Is(err, io.EOF)) {
if (response != nil && response.StatusCode == 404) || (response == nil && err != nil && errors.Is(err, io.EOF)) {
return nil, nil
}
if err != nil {

View File

@@ -14,7 +14,6 @@ import (
)
func defaultHandler(t *testing.T) func(http.ResponseWriter, *http.Request) {
t.Helper()
return func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
var err error
@@ -83,7 +82,6 @@ func defaultHandler(t *testing.T) func(http.ResponseWriter, *http.Request) {
}
func verifyDefaultRepo(t *testing.T, err error, repos []*Repository) {
t.Helper()
require.NoError(t, err)
assert.Len(t, repos, 1)
assert.Equal(t, Repository{

View File

@@ -128,7 +128,7 @@ func (g *GiteaProvider) ListRepos(ctx context.Context, cloneProtocol string) ([]
func (g *GiteaProvider) RepoHasPath(ctx context.Context, repo *Repository, path string) (bool, error) {
_, resp, err := g.client.GetContents(repo.Organization, repo.Repository, repo.Branch, path)
if resp != nil && resp.StatusCode == http.StatusNotFound {
if resp != nil && resp.StatusCode == 404 {
return false, nil
}
if fmt.Sprint(err) == "expect file, got directory" {

View File

@@ -15,7 +15,6 @@ import (
)
func giteaMockHandler(t *testing.T) func(http.ResponseWriter, *http.Request) {
t.Helper()
return func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
switch r.RequestURI {

View File

@@ -107,7 +107,7 @@ func (g *GithubProvider) RepoHasPath(ctx context.Context, repo *Repository, path
Ref: repo.Branch,
})
// 404s are not an error here, just a normal false.
if resp != nil && resp.StatusCode == http.StatusNotFound {
if resp != nil && resp.StatusCode == 404 {
return false, nil
}
if err != nil {

View File

@@ -14,7 +14,6 @@ import (
)
func githubMockHandler(t *testing.T) func(http.ResponseWriter, *http.Request) {
t.Helper()
return func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
switch r.RequestURI {

View File

@@ -17,7 +17,6 @@ import (
)
func gitlabMockHandler(t *testing.T) func(http.ResponseWriter, *http.Request) {
t.Helper()
return func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
switch r.RequestURI {

View File

@@ -30,7 +30,7 @@ func Test_secretToCluster(t *testing.T) {
Data: map[string][]byte{
"name": []byte("test"),
"server": []byte("http://mycluster"),
"config": []byte("{\"username\":\"foo\", \"disableCompression\":true}"),
"config": []byte("{\"username\":\"foo\"}"),
},
}
cluster, err := secretToCluster(secret)
@@ -39,8 +39,7 @@ func Test_secretToCluster(t *testing.T) {
Name: "test",
Server: "http://mycluster",
Config: argoappv1.ClusterConfig{
Username: "foo",
DisableCompression: true,
Username: "foo",
},
}, *cluster)
}

View File

@@ -4,18 +4,14 @@ import (
"context"
"fmt"
"github.com/argoproj/argo-cd/v2/common"
corev1 "k8s.io/api/core/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
)
var ErrDisallowedSecretAccess = fmt.Errorf("secret must have label %q=%q", common.LabelKeySecretType, common.LabelValueSecretTypeSCMCreds)
// getSecretRef gets the value of the key for the specified Secret resource.
func GetSecretRef(ctx context.Context, k8sClient client.Client, ref *argoprojiov1alpha1.SecretRef, namespace string, tokenRefStrictMode bool) (string, error) {
func GetSecretRef(ctx context.Context, k8sClient client.Client, ref *argoprojiov1alpha1.SecretRef, namespace string) (string, error) {
if ref == nil {
return "", nil
}
@@ -31,11 +27,6 @@ func GetSecretRef(ctx context.Context, k8sClient client.Client, ref *argoprojiov
if err != nil {
return "", fmt.Errorf("error fetching secret %s/%s: %w", namespace, ref.SecretName, err)
}
if tokenRefStrictMode && secret.GetLabels()[common.LabelKeySecretType] != common.LabelValueSecretTypeSCMCreds {
return "", fmt.Errorf("secret %s/%s is not a valid SCM creds secret: %w", namespace, ref.SecretName, ErrDisallowedSecretAccess)
}
tokenBytes, ok := secret.Data[ref.Key]
if !ok {
return "", fmt.Errorf("key %q in secret %s/%s not found", ref.Key, namespace, ref.SecretName)

View File

@@ -67,7 +67,7 @@ func TestGetSecretRef(t *testing.T) {
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
token, err := GetSecretRef(ctx, client, c.ref, c.namespace, false)
token, err := GetSecretRef(ctx, client, c.ref, c.namespace)
if c.hasError {
require.Error(t, err)
} else {

19
assets/swagger.json generated
View File

@@ -4557,9 +4557,6 @@
"namespace": {
"type": "string"
},
"requiresDeletionConfirmation": {
"type": "boolean"
},
"requiresPruning": {
"type": "boolean"
},
@@ -6620,10 +6617,6 @@
"type": "boolean",
"title": "SkipCrds skips custom resource definition installation step (Helm's --skip-crds)"
},
"skipTests": {
"description": "SkipTests skips test manifest installation step (Helm's --skip-tests).",
"type": "boolean"
},
"valueFiles": {
"type": "array",
"title": "ValuesFiles is a list of Helm value files to use when generating a template",
@@ -7147,20 +7140,12 @@
"description": "Server requires Bearer authentication. This client will not attempt to use\nrefresh tokens for an OAuth2 flow.\nTODO: demonstrate an OAuth2 compatible client.",
"type": "string"
},
"disableCompression": {
"description": "DisableCompression bypasses automatic GZip compression requests to the server.",
"type": "boolean"
},
"execProviderConfig": {
"$ref": "#/definitions/v1alpha1ExecProviderConfig"
},
"password": {
"type": "string"
},
"proxyUrl": {
"type": "string",
"title": "ProxyURL is the URL to the proxy to be used for all requests send to the server"
},
"tlsClientConfig": {
"$ref": "#/definitions/v1alpha1TLSClientConfig"
},
@@ -7174,10 +7159,6 @@
"description": "ClusterGenerator defines a generator to match against clusters registered with ArgoCD.",
"type": "object",
"properties": {
"flatList": {
"type": "boolean",
"title": "returns the clusters a single 'clusters' value in the template"
},
"selector": {
"$ref": "#/definitions/v1LabelSelector"
},

View File

@@ -25,7 +25,6 @@ import (
appclientset "github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned"
"github.com/argoproj/argo-cd/v2/pkg/ratelimiter"
"github.com/argoproj/argo-cd/v2/reposerver/apiclient"
"github.com/argoproj/argo-cd/v2/util/argo"
"github.com/argoproj/argo-cd/v2/util/argo/normalizers"
cacheutil "github.com/argoproj/argo-cd/v2/util/cache"
appstatecache "github.com/argoproj/argo-cd/v2/util/cache/appstate"
@@ -83,9 +82,6 @@ func NewCommand() *cobra.Command {
enableDynamicClusterDistribution bool
serverSideDiff bool
ignoreNormalizerOpts normalizers.IgnoreNormalizerOpts
// argocd k8s event logging flag
enableK8sEvent []string
)
command := cobra.Command{
Use: cliName,
@@ -194,7 +190,6 @@ func NewCommand() *cobra.Command {
serverSideDiff,
enableDynamicClusterDistribution,
ignoreNormalizerOpts,
enableK8sEvent,
)
errors.CheckError(err)
cacheutil.CollectMetrics(redisClient, appController.GetMetricsServer())
@@ -272,9 +267,6 @@ func NewCommand() *cobra.Command {
command.Flags().BoolVar(&enableDynamicClusterDistribution, "dynamic-cluster-distribution-enabled", env.ParseBoolFromEnv(common.EnvEnableDynamicClusterDistribution, false), "Enables dynamic cluster distribution.")
command.Flags().BoolVar(&serverSideDiff, "server-side-diff-enabled", env.ParseBoolFromEnv(common.EnvServerSideDiff, false), "Feature flag to enable ServerSide diff. Default (\"false\")")
command.Flags().DurationVar(&ignoreNormalizerOpts.JQExecutionTimeout, "ignore-normalizer-jq-execution-timeout-seconds", env.ParseDurationFromEnv("ARGOCD_IGNORE_NORMALIZER_JQ_TIMEOUT", 0*time.Second, 0, math.MaxInt64), "Set ignore normalizer JQ execution timeout")
// argocd k8s event logging flag
command.Flags().StringSliceVar(&enableK8sEvent, "enable-k8s-event", env.StringsFromEnv("ARGOCD_ENABLE_K8S_EVENT", argo.DefaultEnableEventList(), ","), "Enable ArgoCD to use k8s event. For disabling all events, set the value as `none`. (e.g --enable-k8s-event=none), For enabling specific events, set the value as `event reason`. (e.g --enable-k8s-event=StatusRefreshed,ResourceCreated)")
cacheSource = appstatecache.AddCacheFlagsToCmd(&command, cacheutil.Options{
OnClientCreated: func(client *redis.Client) {
redisClient = client

View File

@@ -38,6 +38,7 @@ import (
appsetmetrics "github.com/argoproj/argo-cd/v2/applicationset/metrics"
"github.com/argoproj/argo-cd/v2/applicationset/services"
appv1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
appclientset "github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned"
"github.com/argoproj/argo-cd/v2/util/cli"
"github.com/argoproj/argo-cd/v2/util/db"
"github.com/argoproj/argo-cd/v2/util/errors"
@@ -72,7 +73,6 @@ func NewCommand() *cobra.Command {
metricsAplicationsetLabels []string
enableScmProviders bool
webhookParallelism int
tokenRefStrictMode bool
)
scheme := runtime.NewScheme()
_ = clientgoscheme.AddToScheme(scheme)
@@ -162,9 +162,10 @@ func NewCommand() *cobra.Command {
errors.CheckError(err)
argoSettingsMgr := argosettings.NewSettingsManager(ctx, k8sClient, namespace)
appSetConfig := appclientset.NewForConfigOrDie(mgr.GetConfig())
argoCDDB := db.NewDB(namespace, argoSettingsMgr, k8sClient)
scmConfig := generators.NewSCMConfig(scmRootCAPath, allowedScmProviders, enableScmProviders, github_app.NewAuthCredentials(argoCDDB.(db.RepoCredsDB)), tokenRefStrictMode)
scmConfig := generators.NewSCMConfig(scmRootCAPath, allowedScmProviders, enableScmProviders, github_app.NewAuthCredentials(argoCDDB.(db.RepoCredsDB)))
tlsConfig := apiclient.TLSConfiguration{
DisableTLS: repoServerPlaintext,
@@ -210,6 +211,7 @@ func NewCommand() *cobra.Command {
Renderer: &utils.Render{},
Policy: policyObj,
EnablePolicyOverride: enablePolicyOverride,
ArgoAppClientset: appSetConfig,
KubeClientset: k8sClient,
ArgoDB: argoCDDB,
ArgoCDNamespace: namespace,
@@ -250,7 +252,6 @@ func NewCommand() *cobra.Command {
command.Flags().StringSliceVar(&allowedScmProviders, "allowed-scm-providers", env.StringsFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_ALLOWED_SCM_PROVIDERS", []string{}, ","), "The list of allowed custom SCM provider API URLs. This restriction does not apply to SCM or PR generators which do not accept a custom API URL. (Default: Empty = all)")
command.Flags().BoolVar(&enableScmProviders, "enable-scm-providers", env.ParseBoolFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_SCM_PROVIDERS", true), "Enable retrieving information from SCM providers, used by the SCM and PR generators (Default: true)")
command.Flags().BoolVar(&dryRun, "dry-run", env.ParseBoolFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_DRY_RUN", false), "Enable dry run mode")
command.Flags().BoolVar(&tokenRefStrictMode, "token-ref-strict-mode", env.ParseBoolFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_TOKENREF_STRICT_MODE", false), fmt.Sprintf("Set to true to require secrets referenced by SCM providers to have the %s=%s label set (Default: false)", common.LabelKeySecretType, common.LabelValueSecretTypeSCMCreds))
command.Flags().BoolVar(&enableProgressiveSyncs, "enable-progressive-syncs", env.ParseBoolFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_PROGRESSIVE_SYNCS", false), "Enable use of the experimental progressive syncs feature.")
command.Flags().BoolVar(&enableNewGitFileGlobbing, "enable-new-git-file-globbing", env.ParseBoolFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_NEW_GIT_FILE_GLOBBING", false), "Enable new globbing in Git files generator.")
command.Flags().BoolVar(&repoServerPlaintext, "repo-server-plaintext", env.ParseBoolFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_REPO_SERVER_PLAINTEXT", false), "Disable TLS on connections to repo server")

View File

@@ -75,7 +75,6 @@ func NewCommand() *cobra.Command {
helmRegistryMaxIndexSize string
disableManifestMaxExtractedSize bool
includeHiddenDirectories bool
cmpUseManifestGeneratePaths bool
)
command := cobra.Command{
Use: cliName,
@@ -137,7 +136,6 @@ func NewCommand() *cobra.Command {
HelmManifestMaxExtractedSize: helmManifestMaxExtractedSizeQuantity.ToDec().Value(),
HelmRegistryMaxIndexSize: helmRegistryMaxIndexSizeQuantity.ToDec().Value(),
IncludeHiddenDirectories: includeHiddenDirectories,
CMPUseManifestGeneratePaths: cmpUseManifestGeneratePaths,
}, askPassServer)
errors.CheckError(err)
@@ -243,7 +241,6 @@ func NewCommand() *cobra.Command {
command.Flags().StringVar(&helmRegistryMaxIndexSize, "helm-registry-max-index-size", env.StringFromEnv("ARGOCD_REPO_SERVER_HELM_MANIFEST_MAX_INDEX_SIZE", "1G"), "Maximum size of registry index file")
command.Flags().BoolVar(&disableManifestMaxExtractedSize, "disable-helm-manifest-max-extracted-size", env.ParseBoolFromEnv("ARGOCD_REPO_SERVER_DISABLE_HELM_MANIFEST_MAX_EXTRACTED_SIZE", false), "Disable maximum size of helm manifest archives when extracted")
command.Flags().BoolVar(&includeHiddenDirectories, "include-hidden-directories", env.ParseBoolFromEnv("ARGOCD_REPO_SERVER_INCLUDE_HIDDEN_DIRECTORIES", false), "Include hidden directories from Git")
command.Flags().BoolVar(&cmpUseManifestGeneratePaths, "plugin-use-manifest-generate-paths", env.ParseBoolFromEnv("ARGOCD_REPO_SERVER_PLUGIN_USE_MANIFEST_GENERATE_PATHS", false), "Pass the resources described in argocd.argoproj.io/manifest-generate-paths value to the cmpserver to generate the application manifests.")
tlsConfigCustomizerSrc = tls.AddTLSFlagsToCmd(&command)
cacheSrc = reposervercache.AddCacheFlagsToCmd(&command, cacheutil.Options{
OnClientCreated: func(client *redis.Client) {

View File

@@ -27,7 +27,6 @@ import (
reposervercache "github.com/argoproj/argo-cd/v2/reposerver/cache"
"github.com/argoproj/argo-cd/v2/server"
servercache "github.com/argoproj/argo-cd/v2/server/cache"
"github.com/argoproj/argo-cd/v2/util/argo"
cacheutil "github.com/argoproj/argo-cd/v2/util/cache"
"github.com/argoproj/argo-cd/v2/util/cli"
"github.com/argoproj/argo-cd/v2/util/dex"
@@ -92,9 +91,6 @@ func NewCommand() *cobra.Command {
scmRootCAPath string
allowedScmProviders []string
enableScmProviders bool
// argocd k8s event logging flag
enableK8sEvent []string
)
command := &cobra.Command{
Use: cliName,
@@ -233,7 +229,6 @@ func NewCommand() *cobra.Command {
ApplicationNamespaces: applicationNamespaces,
EnableProxyExtension: enableProxyExtension,
WebhookParallelism: webhookParallelism,
EnableK8sEvent: enableK8sEvent,
}
appsetOpts := server.ApplicationSetOpts{
@@ -308,7 +303,6 @@ func NewCommand() *cobra.Command {
command.Flags().StringSliceVar(&applicationNamespaces, "application-namespaces", env.StringsFromEnv("ARGOCD_APPLICATION_NAMESPACES", []string{}, ","), "List of additional namespaces where application resources can be managed in")
command.Flags().BoolVar(&enableProxyExtension, "enable-proxy-extension", env.ParseBoolFromEnv("ARGOCD_SERVER_ENABLE_PROXY_EXTENSION", false), "Enable Proxy Extension feature")
command.Flags().IntVar(&webhookParallelism, "webhook-parallelism-limit", env.ParseNumFromEnv("ARGOCD_SERVER_WEBHOOK_PARALLELISM_LIMIT", 50, 1, 1000), "Number of webhook requests processed concurrently")
command.Flags().StringSliceVar(&enableK8sEvent, "enable-k8s-event", env.StringsFromEnv("ARGOCD_ENABLE_K8S_EVENT", argo.DefaultEnableEventList(), ","), "Enable ArgoCD to use k8s event. For disabling all events, set the value as `none`. (e.g --enable-k8s-event=none), For enabling specific events, set the value as `event reason`. (e.g --enable-k8s-event=StatusRefreshed,ResourceCreated)")
// Flags related to the applicationSet component.
command.Flags().StringVar(&scmRootCAPath, "appset-scm-root-ca-path", env.StringFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_SCM_ROOT_CA_PATH", ""), "Provide Root CA Path for self-signed TLS Certificates")

View File

@@ -188,12 +188,12 @@ func NewDiffReconcileResults() *cobra.Command {
func toUnstructured(val interface{}) (*unstructured.Unstructured, error) {
data, err := json.Marshal(val)
if err != nil {
return nil, fmt.Errorf("error while marhsalling value: %w", err)
return nil, err
}
res := make(map[string]interface{})
err = json.Unmarshal(data, &res)
if err != nil {
return nil, fmt.Errorf("error while unmarhsalling data: %w", err)
return nil, err
}
return &unstructured.Unstructured{Object: res}, nil
}
@@ -227,7 +227,7 @@ func diffReconcileResults(res1 reconcileResults, res2 reconcileResults) error {
for k, v := range resMap2 {
secondUn, err := toUnstructured(v)
if err != nil {
return fmt.Errorf("error converting second resource of second map to unstructure: %w", err)
return err
}
pairs = append(pairs, diffPair{name: k, first: nil, second: secondUn})
}
@@ -338,7 +338,7 @@ func saveToFile(err error, outputFormat string, result reconcileResults, outputP
func getReconcileResults(ctx context.Context, appClientset appclientset.Interface, namespace string, selector string) ([]appReconcileResult, error) {
appsList, err := appClientset.ArgoprojV1alpha1().Applications(namespace).List(ctx, v1.ListOptions{LabelSelector: selector})
if err != nil {
return nil, fmt.Errorf("error listing namespaced apps: %w", err)
return nil, err
}
var items []appReconcileResult
@@ -389,11 +389,11 @@ func reconcileApplications(
return nil
}, []string{}, []string{})
if err != nil {
return nil, fmt.Errorf("error starting new metrics server: %w", err)
return nil, err
}
stateCache := createLiveStateCache(argoDB, appInformer, settingsMgr, server)
if err := stateCache.Init(); err != nil {
return nil, fmt.Errorf("error initializing state cache: %w", err)
return nil, err
}
cache := appstatecache.NewCache(
@@ -406,7 +406,7 @@ func reconcileApplications(
appsList, err := appClientset.ArgoprojV1alpha1().Applications(namespace).List(ctx, v1.ListOptions{LabelSelector: selector})
if err != nil {
return nil, fmt.Errorf("error listing namespaced apps: %w", err)
return nil, err
}
sort.Slice(appsList.Items, func(i, j int) bool {
@@ -429,7 +429,7 @@ func reconcileApplications(
proj, err := projLister.AppProjects(namespace).Get(app.Spec.Project)
if err != nil {
return nil, fmt.Errorf("error getting namespaced project: %w", err)
return nil, err
}
sources := make([]v1alpha1.ApplicationSource, 0)
@@ -439,7 +439,7 @@ func reconcileApplications(
res, err := appStateManager.CompareAppState(&app, proj, revisions, sources, false, false, nil, false, false)
if err != nil {
return nil, fmt.Errorf("error comparing app states: %w", err)
return nil, err
}
items = append(items, appReconcileResult{
Name: app.Name,

View File

@@ -16,12 +16,10 @@ import (
"k8s.io/client-go/tools/clientcmd"
"sigs.k8s.io/yaml"
"github.com/argoproj/argo-cd/v2/cmd/argocd/commands/utils"
"github.com/argoproj/argo-cd/v2/common"
"github.com/argoproj/argo-cd/v2/pkg/apis/application"
"github.com/argoproj/argo-cd/v2/util/cli"
"github.com/argoproj/argo-cd/v2/util/errors"
"github.com/argoproj/argo-cd/v2/util/localconfig"
secutil "github.com/argoproj/argo-cd/v2/util/security"
)
@@ -139,7 +137,6 @@ func NewImportCommand() *cobra.Command {
verbose bool
stopOperation bool
ignoreTracking bool
promptsEnabled bool
applicationNamespaces []string
applicationsetNamespaces []string
)
@@ -311,8 +308,6 @@ func NewImportCommand() *cobra.Command {
}
}
promptUtil := utils.NewPrompt(promptsEnabled)
// Delete objects not in backup
for key, liveObj := range pruneObjects {
if prune {
@@ -340,19 +335,13 @@ func NewImportCommand() *cobra.Command {
log.Fatalf("Unexpected kind '%s' in prune list", key.Kind)
}
isForbidden := false
if !dryRun {
canPrune := promptUtil.Confirm(fmt.Sprintf("Are you sure you want to prune %s/%s %s ? [y/n]", key.Group, key.Kind, key.Name))
if canPrune {
err = dynClient.Delete(ctx, key.Name, v1.DeleteOptions{})
if apierr.IsForbidden(err) || apierr.IsNotFound(err) {
isForbidden = true
log.Warnf("%s/%s %s: %v\n", key.Group, key.Kind, key.Name, err)
} else {
errors.CheckError(err)
}
err = dynClient.Delete(ctx, key.Name, v1.DeleteOptions{})
if apierr.IsForbidden(err) || apierr.IsNotFound(err) {
isForbidden = true
log.Warnf("%s/%s %s: %v\n", key.Group, key.Kind, key.Name, err)
} else {
fmt.Printf("The command to prune %s/%s %s was cancelled.\n", key.Group, key.Kind, key.Name)
errors.CheckError(err)
}
}
if !isForbidden {
@@ -373,7 +362,6 @@ func NewImportCommand() *cobra.Command {
command.Flags().BoolVar(&stopOperation, "stop-operation", false, "Stop any existing operations")
command.Flags().StringSliceVarP(&applicationNamespaces, "application-namespaces", "", []string{}, fmt.Sprintf("Comma separated list of namespace globs to which import of applications is allowed. If not provided value from '%s' in %s will be used,if it's not defined only applications without an explicit namespace will be imported to the Argo CD namespace", applicationNamespacesCmdParamsKey, common.ArgoCDCmdParamsConfigMapName))
command.Flags().StringSliceVarP(&applicationsetNamespaces, "applicationset-namespaces", "", []string{}, fmt.Sprintf("Comma separated list of namespace globs which import of applicationsets is allowed. If not provided value from '%s' in %s will be used,if it's not defined only applicationsets without an explicit namespace will be imported to the Argo CD namespace", applicationsetNamespacesCmdParamsKey, common.ArgoCDCmdParamsConfigMapName))
command.PersistentFlags().BoolVar(&promptsEnabled, "force-prompts-enabled", localconfig.GetPromptsEnabled(true), "Force optional interactive prompts to be enabled or disabled, overriding local configuration. If not specified, the local configuration value will be used, which is false by default.")
return &command
}

View File

@@ -565,9 +565,7 @@ argocd admin cluster kubeconfig https://cluster-api-url:6443 /path/to/output/kub
cluster, err := db.NewDB(namespace, settings.NewSettingsManager(ctx, kubeclientset, namespace), kubeclientset).GetCluster(ctx, serverUrl)
errors.CheckError(err)
rawConfig, err := cluster.RawRestConfig()
errors.CheckError(err)
err = kube.WriteKubeConfig(rawConfig, namespace, output)
err = kube.WriteKubeConfig(cluster.RawRestConfig(), namespace, output)
errors.CheckError(err)
},
}

View File

@@ -45,14 +45,9 @@ func NewRedisInitialPasswordCommand() *cobra.Command {
namespace, _, err := clientConfig.Namespace()
errors.CheckError(err)
// redisInitialCredentials is the kubernetes secret containing
// the redis password
redisInitialCredentials := common.RedisInitialCredentials
// redisInitialCredentialsKey is the key in the redisInitialCredentials
// secret which maps to the redis password
redisInitialCredentialsKey := common.RedisInitialCredentialsKey
fmt.Printf("Checking for initial Redis password in secret %s/%s at key %s. \n", namespace, redisInitialCredentials, redisInitialCredentialsKey)
redisInitialPasswordSecretName := common.DefaultRedisInitialPasswordSecretName
redisInitialPasswordKey := common.DefaultRedisInitialPasswordKey
fmt.Printf("Checking for initial Redis password in secret %s/%s at key %s. \n", namespace, redisInitialPasswordSecretName, redisInitialPasswordKey)
config, err := clientConfig.ClientConfig()
errors.CheckError(err)
@@ -64,11 +59,11 @@ func NewRedisInitialPasswordCommand() *cobra.Command {
errors.CheckError(err)
data := map[string][]byte{
redisInitialCredentialsKey: []byte(randomPassword),
redisInitialPasswordKey: []byte(randomPassword),
}
secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: redisInitialCredentials,
Name: redisInitialPasswordSecretName,
Namespace: namespace,
},
Data: data,
@@ -79,14 +74,14 @@ func NewRedisInitialPasswordCommand() *cobra.Command {
errors.CheckError(err)
}
fmt.Printf("Argo CD Redis secret state confirmed: secret name %s.\n", redisInitialCredentials)
secret, err = kubeClientset.CoreV1().Secrets(namespace).Get(context.Background(), redisInitialCredentials, v1.GetOptions{})
fmt.Println("Argo CD Redis secret state confirmed: secret name argocd-redis.")
secret, err = kubeClientset.CoreV1().Secrets(namespace).Get(context.Background(), redisInitialPasswordSecretName, v1.GetOptions{})
errors.CheckError(err)
if _, ok := secret.Data[redisInitialCredentialsKey]; ok {
if _, ok := secret.Data[redisInitialPasswordKey]; ok {
fmt.Println("Password secret is configured properly.")
} else {
err := fmt.Errorf("key %s doesn't exist in secret %s. \n", redisInitialCredentialsKey, redisInitialCredentials)
err := fmt.Errorf("key %s doesn't exist in secret %s. \n", redisInitialPasswordKey, redisInitialPasswordSecretName)
errors.CheckError(err)
}
},

View File

@@ -579,7 +579,7 @@ func NewResourceActionRunCommand(cmdCtx commandContext) *cobra.Command {
Short: "Executes resource action",
Long: "Executes resource action using the lua script configured in the 'resource.customizations' field of 'argocd-cm' ConfigMap and outputs updated fields",
Example: `
argocd admin settings resource-overrides action /tmp/deploy.yaml restart --argocd-cm-path ./argocd-cm.yaml`,
argocd admin settings resource-overrides action run /tmp/deploy.yaml restart --argocd-cm-path ./argocd-cm.yaml`,
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()

View File

@@ -200,7 +200,8 @@ admissionregistration.k8s.io/MutatingWebhookConfiguration:
require.NoError(t, err)
assert.Contains(t, summary, tc.containsSummary)
} else if tc.containsError != "" {
assert.ErrorContains(t, err, tc.containsError)
require.Error(t, err)
assert.Contains(t, err.Error(), tc.containsError)
}
})
}

View File

@@ -27,7 +27,6 @@ import (
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
k8swatch "k8s.io/apimachinery/pkg/watch"
@@ -99,7 +98,6 @@ func NewApplicationCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comman
command.AddCommand(NewApplicationLogsCommand(clientOpts))
command.AddCommand(NewApplicationAddSourceCommand(clientOpts))
command.AddCommand(NewApplicationRemoveSourceCommand(clientOpts))
command.AddCommand(NewApplicationConfirmDeletionCommand(clientOpts))
return command
}
@@ -1083,18 +1081,17 @@ func getLocalObjectsString(ctx context.Context, app *argoappv1.Application, proj
) []string {
source := app.Spec.GetSource()
res, err := repository.GenerateManifests(ctx, local, localRepoRoot, source.TargetRevision, &repoapiclient.ManifestRequest{
Repo: &argoappv1.Repository{Repo: source.RepoURL},
AppLabelKey: appLabelKey,
AppName: app.Name,
Namespace: app.Spec.Destination.Namespace,
ApplicationSource: &source,
KustomizeOptions: kustomizeOptions,
KubeVersion: kubeVersion,
ApiVersions: apiVersions,
TrackingMethod: trackingMethod,
ProjectName: proj.Name,
ProjectSourceRepos: proj.Spec.SourceRepos,
AnnotationManifestGeneratePaths: app.GetAnnotation(argoappv1.AnnotationKeyManifestGeneratePaths),
Repo: &argoappv1.Repository{Repo: source.RepoURL},
AppLabelKey: appLabelKey,
AppName: app.Name,
Namespace: app.Spec.Destination.Namespace,
ApplicationSource: &source,
KustomizeOptions: kustomizeOptions,
KubeVersion: kubeVersion,
ApiVersions: apiVersions,
TrackingMethod: trackingMethod,
ProjectName: proj.Name,
ProjectSourceRepos: proj.Spec.SourceRepos,
}, true, &git.NoopCredsStore{}, resource.MustParse("0"), nil)
errors.CheckError(err)
@@ -1143,7 +1140,6 @@ func NewApplicationDiffCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
refresh bool
hardRefresh bool
exitCode bool
diffExitCode int
local string
revision string
localRepoRoot string
@@ -1246,14 +1242,13 @@ func NewApplicationDiffCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
proj := getProject(c, clientOpts, ctx, app.Spec.Project)
foundDiffs := findandPrintDiff(ctx, app, proj.Project, resources, argoSettings, diffOption, ignoreNormalizerOpts)
if foundDiffs && exitCode {
os.Exit(diffExitCode)
os.Exit(1)
}
},
}
command.Flags().BoolVar(&refresh, "refresh", false, "Refresh application data when retrieving")
command.Flags().BoolVar(&hardRefresh, "hard-refresh", false, "Refresh application data as well as target manifests cache")
command.Flags().BoolVar(&exitCode, "exit-code", true, "Return non-zero exit code when there is a diff. May also return non-zero exit code if there is an error.")
command.Flags().IntVar(&diffExitCode, "diff-exit-code", 1, "Return specified exit code when there is a diff. Typical error code is 20.")
command.Flags().BoolVar(&exitCode, "exit-code", true, "Return non-zero exit code when there is a diff")
command.Flags().StringVar(&local, "local", "", "Compare live app to a local manifests")
command.Flags().StringVar(&revision, "revision", "", "Compare live app to a particular revision")
command.Flags().StringVar(&localRepoRoot, "local-repo-root", "/", "Path to the repository root. Used together with --local allows setting the repository root")
@@ -3204,50 +3199,3 @@ func NewApplicationRemoveSourceCommand(clientOpts *argocdclient.ClientOptions) *
command.Flags().IntVar(&sourcePosition, "source-position", -1, "Position of the source from the list of sources of the app. Counting starts at 1.")
return command
}
func NewApplicationConfirmDeletionCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
var appNamespace string
command := &cobra.Command{
Use: "confirm-deletion APPNAME",
Short: "Confirms deletion/pruning of an application resources",
Run: func(c *cobra.Command, args []string) {
ctx := c.Context()
if len(args) != 1 {
c.HelpFunc()(c, args)
os.Exit(1)
}
argocdClient := headless.NewClientOrDie(clientOpts, c)
conn, appIf := argocdClient.NewApplicationClientOrDie()
defer argoio.Close(conn)
appName, appNs := argo.ParseFromQualifiedName(args[0], appNamespace)
app, err := appIf.Get(ctx, &application.ApplicationQuery{
Name: &appName,
Refresh: getRefreshType(false, false),
AppNamespace: &appNs,
})
errors.CheckError(err)
annotations := app.Annotations
if annotations == nil {
annotations = map[string]string{}
app.Annotations = annotations
}
annotations[common.AnnotationDeletionApproved] = metav1.Now().Format(time.RFC3339)
_, err = appIf.Update(ctx, &application.ApplicationUpdateRequest{
Application: app,
Validate: ptr.To(false),
Project: &app.Spec.Project,
})
errors.CheckError(err)
fmt.Printf("Application '%s' updated successfully\n", app.ObjectMeta.Name)
},
}
command.Flags().StringVarP(&appNamespace, "app-namespace", "N", "", "Namespace of the target application where the source will be appended")
return command
}

View File

@@ -11,7 +11,6 @@ import (
"github.com/spf13/cobra"
"github.com/argoproj/argo-cd/v2/cmd/argocd/commands/headless"
"github.com/argoproj/argo-cd/v2/cmd/argocd/commands/utils"
argocdclient "github.com/argoproj/argo-cd/v2/pkg/apiclient"
certificatepkg "github.com/argoproj/argo-cd/v2/pkg/apiclient/certificate"
appsv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
@@ -237,26 +236,19 @@ func NewCertRemoveCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command
err := fmt.Errorf("A single wildcard is not allowed as REPOSERVER name.")
errors.CheckError(err)
}
promptUtil := utils.NewPrompt(clientOpts.PromptsEnabled)
canDelete := promptUtil.Confirm(fmt.Sprintf("Are you sure you want to remove all certificates for '%s'? [y/n]", hostNamePattern))
if canDelete {
certQuery = certificatepkg.RepositoryCertificateQuery{
HostNamePattern: hostNamePattern,
CertType: certType,
CertSubType: certSubType,
}
removed, err := certIf.DeleteCertificate(ctx, &certQuery)
errors.CheckError(err)
if len(removed.Items) > 0 {
for _, cert := range removed.Items {
fmt.Printf("Removed cert for '%s' of type '%s' (subtype '%s')\n", cert.ServerName, cert.CertType, cert.CertSubType)
}
} else {
fmt.Println("No certificates were removed (none matched the given pattern)")
certQuery = certificatepkg.RepositoryCertificateQuery{
HostNamePattern: hostNamePattern,
CertType: certType,
CertSubType: certSubType,
}
removed, err := certIf.DeleteCertificate(ctx, &certQuery)
errors.CheckError(err)
if len(removed.Items) > 0 {
for _, cert := range removed.Items {
fmt.Printf("Removed cert for '%s' of type '%s' (subtype '%s')\n", cert.ServerName, cert.CertType, cert.CertSubType)
}
} else {
fmt.Printf("The command to remove all certificates for '%s' was cancelled.\n", hostNamePattern)
fmt.Println("No certificates were removed (none matched the given patterns)")
}
},
}

View File

@@ -2,7 +2,6 @@ package commands
import (
"fmt"
"net/http"
"os"
"regexp"
"strings"
@@ -107,11 +106,6 @@ func NewClusterAddCommand(clientOpts *argocdclient.ClientOptions, pathOpts *clie
contextName := args[0]
conf, err := getRestConfig(pathOpts, contextName)
errors.CheckError(err)
if clusterOpts.ProxyUrl != "" {
u, err := argoappv1.ParseProxyUrl(clusterOpts.ProxyUrl)
errors.CheckError(err)
conf.Proxy = http.ProxyURL(u)
}
clientset, err := kubernetes.NewForConfig(conf)
errors.CheckError(err)
managerBearerToken := ""
@@ -197,7 +191,6 @@ func NewClusterAddCommand(clientOpts *argocdclient.ClientOptions, pathOpts *clie
command.Flags().BoolVarP(&skipConfirmation, "yes", "y", false, "Skip explicit confirmation")
command.Flags().StringArrayVar(&labels, "label", nil, "Set metadata labels (e.g. --label key=value)")
command.Flags().StringArrayVar(&annotations, "annotation", nil, "Set metadata annotations (e.g. --annotation key=value)")
command.Flags().StringVar(&clusterOpts.ProxyUrl, "proxy-url", "", "use proxy to connect cluster")
cmdutil.AddClusterFlags(command, &clusterOpts)
return command
}
@@ -380,8 +373,6 @@ func printClusterDetails(clusters []argoappv1.Cluster) {
fmt.Printf(" Basic authentication: %v\n", cluster.Config.Username != "")
fmt.Printf(" oAuth authentication: %v\n", cluster.Config.BearerToken != "")
fmt.Printf(" AWS authentication: %v\n", cluster.Config.AWSAuthConfig != nil)
fmt.Printf("\nDisable compression: %v\n", cluster.Config.DisableCompression)
fmt.Printf("\nUse proxy: %v\n", cluster.Config.ProxyUrl != "")
fmt.Println()
}
}

View File

@@ -32,12 +32,11 @@ func Test_printClusterTable(t *testing.T) {
Server: "my-server",
Name: "my-name",
Config: v1alpha1.ClusterConfig{
Username: "my-username",
Password: "my-password",
BearerToken: "my-bearer-token",
TLSClientConfig: v1alpha1.TLSClientConfig{},
AWSAuthConfig: nil,
DisableCompression: false,
Username: "my-username",
Password: "my-password",
BearerToken: "my-bearer-token",
TLSClientConfig: v1alpha1.TLSClientConfig{},
AWSAuthConfig: nil,
},
ConnectionState: v1alpha1.ConnectionState{
Status: "my-status",

View File

@@ -1,44 +0,0 @@
package commands
import (
"fmt"
"strconv"
"github.com/spf13/cobra"
argocdclient "github.com/argoproj/argo-cd/v2/pkg/apiclient"
"github.com/argoproj/argo-cd/v2/util/errors"
"github.com/argoproj/argo-cd/v2/util/localconfig"
)
// NewConfigureCommand returns a new instance of an `argocd configure` command
func NewConfigureCommand(globalClientOpts *argocdclient.ClientOptions) *cobra.Command {
var promptsEnabled bool
command := &cobra.Command{
Use: "configure",
Short: "Manage local configuration",
Example: `# Enable optional interactive prompts
argocd configure --prompts-enabled
argocd configure --prompts-enabled=true
# Disable optional interactive prompts
argocd configure --prompts-enabled=false`,
Run: func(c *cobra.Command, args []string) {
localCfg, err := localconfig.ReadLocalConfig(globalClientOpts.ConfigPath)
errors.CheckError(err)
localCfg.PromptsEnabled = promptsEnabled
err = localconfig.WriteLocalConfig(*localCfg, globalClientOpts.ConfigPath)
errors.CheckError(err)
fmt.Println("Successfully updated the following configuration settings:")
fmt.Printf("prompts-enabled: %v\n", strconv.FormatBool(localCfg.PromptsEnabled))
},
}
command.Flags().BoolVar(&promptsEnabled, "prompts-enabled", localconfig.GetPromptsEnabled(false), "Enable (or disable) optional interactive prompts")
return command
}

View File

@@ -1,97 +0,0 @@
package commands
import (
"os"
"testing"
argocdclient "github.com/argoproj/argo-cd/v2/pkg/apiclient"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/argoproj/argo-cd/v2/util/localconfig"
)
func TestNewConfigureCommand_PromptsEnabled_DefaultTrue(t *testing.T) {
// Write the test config file
err := os.WriteFile(testConfigFilePath, []byte(testConfig), os.ModePerm)
require.NoError(t, err)
defer os.Remove(testConfigFilePath)
err = os.Chmod(testConfigFilePath, 0o600)
require.NoError(t, err, "Could not change the file permission to 0600 %v", err)
localConfig, err := localconfig.ReadLocalConfig(testConfigFilePath)
require.NoError(t, err)
assert.False(t, localConfig.PromptsEnabled)
// Set `PromptsEnabled` to `true` using `argocd configure --prompts-enabled`
cmd := NewConfigureCommand(&argocdclient.ClientOptions{ConfigPath: testConfigFilePath})
cmd.SetArgs([]string{"--prompts-enabled"})
err = cmd.Execute()
require.NoError(t, err)
// Read the test config file
localConfig, err = localconfig.ReadLocalConfig(testConfigFilePath)
require.NoError(t, err)
assert.True(t, localConfig.PromptsEnabled)
}
func TestNewConfigureCommand_PromptsEnabled_True(t *testing.T) {
// Write the test config file
err := os.WriteFile(testConfigFilePath, []byte(testConfig), os.ModePerm)
require.NoError(t, err)
defer os.Remove(testConfigFilePath)
err = os.Chmod(testConfigFilePath, 0o600)
require.NoError(t, err, "Could not change the file permission to 0600 %v", err)
localConfig, err := localconfig.ReadLocalConfig(testConfigFilePath)
require.NoError(t, err)
assert.False(t, localConfig.PromptsEnabled)
// Set `PromptsEnabled` to `true` using `argocd configure --prompts-enabled=true`
cmd := NewConfigureCommand(&argocdclient.ClientOptions{ConfigPath: testConfigFilePath})
cmd.SetArgs([]string{"--prompts-enabled=true"})
err = cmd.Execute()
require.NoError(t, err)
// Read the test config file
localConfig, err = localconfig.ReadLocalConfig(testConfigFilePath)
require.NoError(t, err)
assert.True(t, localConfig.PromptsEnabled)
}
func TestNewConfigureCommand_PromptsEnabled_False(t *testing.T) {
// Write the test config file
err := os.WriteFile(testConfigFilePath, []byte(testConfig), os.ModePerm)
require.NoError(t, err)
defer os.Remove(testConfigFilePath)
err = os.Chmod(testConfigFilePath, 0o600)
require.NoError(t, err, "Could not change the file permission to 0600 %v", err)
localConfig, err := localconfig.ReadLocalConfig(testConfigFilePath)
require.NoError(t, err)
assert.False(t, localConfig.PromptsEnabled)
// Set `PromptsEnabled` to `false` using `argocd configure --prompts-enabled=false`
cmd := NewConfigureCommand(&argocdclient.ClientOptions{ConfigPath: testConfigFilePath})
cmd.SetArgs([]string{"--prompts-enabled=false"})
err = cmd.Execute()
require.NoError(t, err)
// Read the test config file
localConfig, err = localconfig.ReadLocalConfig(testConfigFilePath)
require.NoError(t, err)
assert.False(t, localConfig.PromptsEnabled)
}

View File

@@ -9,7 +9,6 @@ import (
"github.com/spf13/cobra"
"github.com/argoproj/argo-cd/v2/cmd/argocd/commands/headless"
"github.com/argoproj/argo-cd/v2/cmd/argocd/commands/utils"
argocdclient "github.com/argoproj/argo-cd/v2/pkg/apiclient"
gpgkeypkg "github.com/argoproj/argo-cd/v2/pkg/apiclient/gpgkey"
appsv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
@@ -168,21 +167,11 @@ func NewGPGDeleteCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command
if len(args) != 1 {
errors.CheckError(fmt.Errorf("Missing KEYID argument"))
}
keyId := args[0]
conn, gpgIf := headless.NewClientOrDie(clientOpts, c).NewGPGKeyClientOrDie()
defer argoio.Close(conn)
promptUtil := utils.NewPrompt(clientOpts.PromptsEnabled)
canDelete := promptUtil.Confirm(fmt.Sprintf("Are you sure you want to remove '%s'? [y/n] ", keyId))
if canDelete {
_, err := gpgIf.Delete(ctx, &gpgkeypkg.GnuPGPublicKeyQuery{KeyID: keyId})
errors.CheckError(err)
fmt.Printf("Deleted key with key ID %s\n", keyId)
} else {
fmt.Printf("The command to delete key with key ID '%s' was cancelled.\n", keyId)
}
_, err := gpgIf.Delete(ctx, &gpgkeypkg.GnuPGPublicKeyQuery{KeyID: args[0]})
errors.CheckError(err)
fmt.Printf("Deleted key with key ID %s\n", args[0])
},
}
return command

View File

@@ -9,7 +9,6 @@ import (
"github.com/spf13/cobra"
"github.com/argoproj/argo-cd/v2/cmd/argocd/commands/headless"
"github.com/argoproj/argo-cd/v2/cmd/argocd/commands/utils"
"github.com/argoproj/argo-cd/v2/common"
argocdclient "github.com/argoproj/argo-cd/v2/pkg/apiclient"
repocredspkg "github.com/argoproj/argo-cd/v2/pkg/apiclient/repocreds"
@@ -210,18 +209,10 @@ func NewRepoCredsRemoveCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
}
conn, repoIf := headless.NewClientOrDie(clientOpts, c).NewRepoCredsClientOrDie()
defer io.Close(conn)
promptUtil := utils.NewPrompt(clientOpts.PromptsEnabled)
for _, repoURL := range args {
canDelete := promptUtil.Confirm(fmt.Sprintf("Are you sure you want to remove '%s'? [y/n] ", repoURL))
if canDelete {
_, err := repoIf.DeleteRepositoryCredentials(ctx, &repocredspkg.RepoCredsDeleteRequest{Url: repoURL})
errors.CheckError(err)
fmt.Printf("Repository credentials for '%s' removed\n", repoURL)
} else {
fmt.Printf("The command to remove '%s' was cancelled.\n", repoURL)
}
_, err := repoIf.DeleteRepositoryCredentials(ctx, &repocredspkg.RepoCredsDeleteRequest{Url: repoURL})
errors.CheckError(err)
fmt.Printf("Repository credentials for '%s' removed\n", repoURL)
}
},
}

View File

@@ -60,7 +60,6 @@ func NewCommand() *cobra.Command {
command.AddCommand(initialize.InitCommand(NewCertCommand(&clientOpts)))
command.AddCommand(initialize.InitCommand(NewGPGCommand(&clientOpts)))
command.AddCommand(admin.NewAdminCommand(&clientOpts))
command.AddCommand(initialize.InitCommand(NewConfigureCommand(&clientOpts)))
defaultLocalConfigPath, err := localconfig.DefaultLocalConfigPath()
errors.CheckError(err)
@@ -71,7 +70,7 @@ func NewCommand() *cobra.Command {
command.PersistentFlags().StringVar(&clientOpts.CertFile, "server-crt", config.GetFlag("server-crt", ""), "Server certificate file")
command.PersistentFlags().StringVar(&clientOpts.ClientCertFile, "client-crt", config.GetFlag("client-crt", ""), "Client certificate file")
command.PersistentFlags().StringVar(&clientOpts.ClientCertKeyFile, "client-crt-key", config.GetFlag("client-crt-key", ""), "Client certificate key file")
command.PersistentFlags().StringVar(&clientOpts.AuthToken, "auth-token", config.GetFlag("auth-token", env.StringFromEnv(common.EnvAuthToken, "")), fmt.Sprintf("Authentication token; set this or the %s environment variable", common.EnvAuthToken))
command.PersistentFlags().StringVar(&clientOpts.AuthToken, "auth-token", config.GetFlag("auth-token", ""), "Authentication token")
command.PersistentFlags().BoolVar(&clientOpts.GRPCWeb, "grpc-web", config.GetBoolFlag("grpc-web"), "Enables gRPC-web protocol. Useful if Argo CD server is behind proxy which does not support HTTP2.")
command.PersistentFlags().StringVar(&clientOpts.GRPCWebRootPath, "grpc-web-root-path", config.GetFlag("grpc-web-root-path", ""), "Enables gRPC-web protocol. Useful if Argo CD server is behind proxy which does not support HTTP2. Set web root.")
command.PersistentFlags().StringVar(&cmdutil.LogFormat, "logformat", config.GetFlag("logformat", "text"), "Set the logging format. One of: text|json")
@@ -87,7 +86,6 @@ func NewCommand() *cobra.Command {
command.PersistentFlags().StringVar(&clientOpts.RedisHaProxyName, "redis-haproxy-name", env.StringFromEnv(common.EnvRedisHaProxyName, common.DefaultRedisHaProxyName), fmt.Sprintf("Name of the Redis HA Proxy; set this or the %s environment variable when the HA Proxy's name label differs from the default, for example when installing via the Helm chart", common.EnvRedisHaProxyName))
command.PersistentFlags().StringVar(&clientOpts.RedisName, "redis-name", env.StringFromEnv(common.EnvRedisName, common.DefaultRedisName), fmt.Sprintf("Name of the Redis deployment; set this or the %s environment variable when the Redis's name label differs from the default, for example when installing via the Helm chart", common.EnvRedisName))
command.PersistentFlags().StringVar(&clientOpts.RepoServerName, "repo-server-name", env.StringFromEnv(common.EnvRepoServerName, common.DefaultRepoServerName), fmt.Sprintf("Name of the Argo CD Repo server; set this or the %s environment variable when the server's name label differs from the default, for example when installing via the Helm chart", common.EnvRepoServerName))
command.PersistentFlags().BoolVar(&clientOpts.PromptsEnabled, "force-prompts-enabled", localconfig.GetPromptsEnabled(true), "Force optional interactive prompts to be enabled or disabled, overriding local configuration. If not specified, the local configuration value will be used, which is false by default.")
clientOpts.KubeOverrides = &clientcmd.ConfigOverrides{}
command.PersistentFlags().StringVar(&clientOpts.KubeOverrides.CurrentContext, "kube-context", "", "Directs the command to the given kube-context")

View File

@@ -1,23 +0,0 @@
package utils
import (
"github.com/argoproj/argo-cd/v2/util/cli"
)
type Prompt struct {
enabled bool
}
func NewPrompt(promptsEnabled bool) *Prompt {
return &Prompt{
enabled: promptsEnabled,
}
}
func (p *Prompt) Confirm(message string) bool {
if !p.enabled {
return true
}
return cli.AskToProceed(message)
}

View File

@@ -1,22 +0,0 @@
package utils
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestNewPrompt_PromptsEnabled_True(t *testing.T) {
prompt := NewPrompt(true)
assert.True(t, prompt.enabled)
}
func TestNewPrompt_PromptsEnabled_False(t *testing.T) {
prompt := NewPrompt(false)
assert.False(t, prompt.enabled)
}
func TestConfirm_PromptsEnabled_False(t *testing.T) {
prompt := NewPrompt(false)
assert.True(t, prompt.Confirm("Are you sure you want to run this command? (y/n) "))
}

View File

@@ -4,10 +4,10 @@ import (
"os"
"path/filepath"
"github.com/argoproj/argo-cd/v2/cmd/util"
"github.com/spf13/cobra"
_ "go.uber.org/automaxprocs"
appcontroller "github.com/argoproj/argo-cd/v2/cmd/argocd-application-controller/commands"
applicationset "github.com/argoproj/argo-cd/v2/cmd/argocd-applicationset-controller/commands"
cmpserver "github.com/argoproj/argo-cd/v2/cmd/argocd-cmp-server/commands"
@@ -31,12 +31,9 @@ func main() {
if val := os.Getenv(binaryNameEnv); val != "" {
binaryName = val
}
isCLI := false
switch binaryName {
case "argocd", "argocd-linux-amd64", "argocd-darwin-amd64", "argocd-windows-amd64.exe":
command = cli.NewCommand()
isCLI = true
case "argocd-server":
command = apiserver.NewCommand()
case "argocd-application-controller":
@@ -45,24 +42,19 @@ func main() {
command = reposerver.NewCommand()
case "argocd-cmp-server":
command = cmpserver.NewCommand()
isCLI = true
case "argocd-dex":
command = dex.NewCommand()
case "argocd-notifications":
command = notification.NewCommand()
case "argocd-git-ask-pass":
command = gitaskpass.NewCommand()
isCLI = true
case "argocd-applicationset-controller":
command = applicationset.NewCommand()
case "argocd-k8s-auth":
command = k8sauth.NewCommand()
isCLI = true
default:
command = cli.NewCommand()
isCLI = true
}
util.SetAutoMaxProcs(isCLI)
if err := command.Execute(); err != nil {
os.Exit(1)

View File

@@ -9,8 +9,6 @@ import (
"strings"
"time"
"go.uber.org/automaxprocs/maxprocs"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"github.com/argoproj/gitops-engine/pkg/utils/kube"
@@ -50,7 +48,6 @@ type AppOptions struct {
helmVersion string
helmPassCredentials bool
helmSkipCrds bool
helmSkipTests bool
helmNamespace string
helmKubeVersion string
helmApiVersions []string
@@ -91,19 +88,6 @@ type AppOptions struct {
ref string
}
// SetAutoMaxProcs sets the GOMAXPROCS value based on the binary name.
// It suppresses logs for CLI binaries and logs the setting for services.
func SetAutoMaxProcs(isCLI bool) {
if isCLI {
_, _ = maxprocs.Set() // Intentionally ignore errors for CLI binaries
} else {
_, err := maxprocs.Set(maxprocs.Logger(log.Infof))
if err != nil {
log.Errorf("Error setting GOMAXPROCS: %v", err)
}
}
}
func AddAppFlags(command *cobra.Command, opts *AppOptions) {
command.Flags().StringVar(&opts.repoURL, "repo", "", "Repository URL, ignored if a file is set")
command.Flags().StringVar(&opts.appPath, "path", "", "Path in repository to the app directory, ignored if a file is set")
@@ -125,7 +109,6 @@ func AddAppFlags(command *cobra.Command, opts *AppOptions) {
command.Flags().StringArrayVar(&opts.helmSetStrings, "helm-set-string", []string{}, "Helm set STRING values on the command line (can be repeated to set several values: --helm-set-string key1=val1 --helm-set-string key2=val2)")
command.Flags().StringArrayVar(&opts.helmSetFiles, "helm-set-file", []string{}, "Helm set values from respective files specified via the command line (can be repeated to set several values: --helm-set-file key1=path1 --helm-set-file key2=path2)")
command.Flags().BoolVar(&opts.helmSkipCrds, "helm-skip-crds", false, "Skip helm crd installation step")
command.Flags().BoolVar(&opts.helmSkipTests, "helm-skip-tests", false, "Skip helm test manifests installation step")
command.Flags().StringVar(&opts.helmNamespace, "helm-namespace", "", "Helm namespace to use when running helm template. If not set, use app.spec.destination.namespace")
command.Flags().StringVar(&opts.helmKubeVersion, "helm-kube-version", "", "Helm kube-version to use when running helm template. If not set, use the kube version from the destination cluster")
command.Flags().StringArrayVar(&opts.helmApiVersions, "helm-api-versions", []string{}, "Helm api-versions (in format [group/]version/kind) to use when running helm template (Can be repeated to set several values: --helm-api-versions traefik.io/v1alpha1/TLSOption --helm-api-versions v1/Service). If not set, use the api-versions from the destination cluster")
@@ -375,7 +358,6 @@ type helmOpts struct {
helmSetFiles []string
passCredentials bool
skipCrds bool
skipTests bool
namespace string
kubeVersion string
apiVersions []string
@@ -409,9 +391,6 @@ func setHelmOpt(src *argoappv1.ApplicationSource, opts helmOpts) {
if opts.skipCrds {
src.Helm.SkipCrds = opts.skipCrds
}
if opts.skipTests {
src.Helm.SkipTests = opts.skipTests
}
if opts.namespace != "" {
src.Helm.Namespace = opts.namespace
}
@@ -679,8 +658,6 @@ func ConstructSource(source *argoappv1.ApplicationSource, appOpts AppOptions, fl
setHelmOpt(source, helmOpts{helmSetFiles: appOpts.helmSetFiles})
case "helm-skip-crds":
setHelmOpt(source, helmOpts{skipCrds: appOpts.helmSkipCrds})
case "helm-skip-tests":
setHelmOpt(source, helmOpts{skipTests: appOpts.helmSkipTests})
case "helm-namespace":
setHelmOpt(source, helmOpts{namespace: appOpts.helmNamespace})
case "helm-kube-version":

View File

@@ -1,11 +1,10 @@
package util
import (
"bytes"
"log"
"os"
"testing"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -66,11 +65,6 @@ func Test_setHelmOpt(t *testing.T) {
setHelmOpt(&src, helmOpts{skipCrds: true})
assert.True(t, src.Helm.SkipCrds)
})
t.Run("HelmSkipTests", func(t *testing.T) {
src := v1alpha1.ApplicationSource{}
setHelmOpt(&src, helmOpts{skipTests: true})
assert.True(t, src.Helm.SkipTests)
})
t.Run("HelmNamespace", func(t *testing.T) {
src := v1alpha1.ApplicationSource{}
setHelmOpt(&src, helmOpts{namespace: "custom-namespace"})
@@ -535,27 +529,3 @@ func TestFilterResources(t *testing.T) {
assert.Nil(t, filteredResources)
})
}
func TestSetAutoMaxProcs(t *testing.T) {
t.Run("CLI mode ignores errors", func(t *testing.T) {
logBuffer := &bytes.Buffer{}
oldLogger := log.Default()
log.SetOutput(logBuffer)
defer log.SetOutput(oldLogger.Writer())
SetAutoMaxProcs(true)
assert.Empty(t, logBuffer.String(), "Expected no log output when isCLI is true")
})
t.Run("Non-CLI mode logs error on failure", func(t *testing.T) {
logBuffer := &bytes.Buffer{}
oldLogger := log.Default()
log.SetOutput(logBuffer)
defer log.SetOutput(oldLogger.Writer())
SetAutoMaxProcs(false)
assert.NotContains(t, logBuffer.String(), "Error setting GOMAXPROCS", "Unexpected log output detected")
})
}

View File

@@ -100,18 +100,11 @@ func NewCluster(name string, namespaces []string, clusterResources bool, conf *r
TLSClientConfig: tlsClientConfig,
AWSAuthConfig: awsAuthConf,
ExecProviderConfig: execProviderConf,
DisableCompression: conf.DisableCompression,
},
Labels: labels,
Annotations: annotations,
}
// it's a tradeoff to get proxy url from rest config
// more detail: https://github.com/kubernetes/kubernetes/pull/81443
if conf.Proxy != nil {
if url, err := conf.Proxy(nil); err == nil {
clst.Config.ProxyUrl = url.String()
}
}
// Bearer token will preferentially be used for auth if present,
// Even in presence of key/cert credentials
// So set bearer token only if the key/cert data is absent
@@ -165,8 +158,6 @@ type ClusterOptions struct {
ExecProviderAPIVersion string
ExecProviderInstallHint string
ClusterEndpoint string
DisableCompression bool
ProxyUrl string
}
// InClusterEndpoint returns true if ArgoCD should reference the in-cluster
@@ -191,5 +182,4 @@ func AddClusterFlags(command *cobra.Command, opts *ClusterOptions) {
command.Flags().StringVar(&opts.ExecProviderAPIVersion, "exec-command-api-version", "", "Preferred input version of the ExecInfo for the --exec-command executable")
command.Flags().StringVar(&opts.ExecProviderInstallHint, "exec-command-install-hint", "", "Text shown to the user when the --exec-command executable doesn't seem to be present")
command.Flags().StringVar(&opts.ClusterEndpoint, "cluster-endpoint", "", "Cluster endpoint to use. Can be one of the following: 'kubeconfig', 'kube-public', or 'internal'.")
command.Flags().BoolVar(&opts.DisableCompression, "disable-compression", false, "Bypasses automatic GZip compression requests to the server")
}

View File

@@ -37,7 +37,6 @@ func Test_newCluster(t *testing.T) {
assert.Equal(t, "", clusterWithData.Config.BearerToken)
assert.Equal(t, labels, clusterWithData.Labels)
assert.Equal(t, annotations, clusterWithData.Annotations)
assert.False(t, clusterWithData.Config.DisableCompression)
clusterWithFiles := NewCluster("test-cluster", []string{"test-namespace"}, false, &rest.Config{
TLSClientConfig: rest.TLSClientConfig{
@@ -74,20 +73,6 @@ func Test_newCluster(t *testing.T) {
assert.Equal(t, "test-bearer-token", clusterWithBearerToken.Config.BearerToken)
assert.Nil(t, clusterWithBearerToken.Labels)
assert.Nil(t, clusterWithBearerToken.Annotations)
clusterWithDisableCompression := NewCluster("test-cluster", []string{"test-namespace"}, false, &rest.Config{
TLSClientConfig: rest.TLSClientConfig{
Insecure: false,
ServerName: "test-endpoint.example.com",
CAData: []byte("test-ca-data"),
},
DisableCompression: true,
Host: "test-endpoint.example.com",
}, "test-bearer-token",
&v1alpha1.AWSAuthConfig{},
&v1alpha1.ExecProviderConfig{}, labels, annotations)
assert.True(t, clusterWithDisableCompression.Config.DisableCompression)
}
func TestGetKubePublicEndpoint(t *testing.T) {

View File

@@ -451,7 +451,8 @@ func Test_getParametersAnnouncement_invalid_json(t *testing.T) {
Args: []string{`[`},
}
_, err := getParametersAnnouncement(context.Background(), "", []*repoclient.ParameterAnnouncement{}, command, []*apiclient.EnvEntry{})
assert.ErrorContains(t, err, "unexpected end of JSON input")
require.Error(t, err)
assert.Contains(t, err.Error(), "unexpected end of JSON input")
}
func Test_getParametersAnnouncement_bad_command(t *testing.T) {
@@ -460,7 +461,8 @@ func Test_getParametersAnnouncement_bad_command(t *testing.T) {
Args: []string{"1"},
}
_, err := getParametersAnnouncement(context.Background(), "", []*repoclient.ParameterAnnouncement{}, command, []*apiclient.EnvEntry{})
assert.ErrorContains(t, err, "error executing dynamic parameter output command")
require.Error(t, err)
assert.Contains(t, err.Error(), "error executing dynamic parameter output command")
}
func Test_getTempDirMustCleanup(t *testing.T) {

View File

@@ -175,8 +175,6 @@ const (
LabelValueSecretTypeRepository = "repository"
// LabelValueSecretTypeRepoCreds indicates a secret type of repository credentials
LabelValueSecretTypeRepoCreds = "repo-creds"
// LabelValueSecretTypeSCMCreds indicates a secret type of SCM credentials
LabelValueSecretTypeSCMCreds = "scm-creds"
// AnnotationKeyAppInstance is the Argo CD application name is used as the instance name
AnnotationKeyAppInstance = "argocd.argoproj.io/tracking-id"
@@ -257,8 +255,6 @@ const (
EnvHelmIndexCacheDuration = "ARGOCD_HELM_INDEX_CACHE_DURATION"
// EnvAppConfigPath allows to override the configuration path for repo server
EnvAppConfigPath = "ARGOCD_APP_CONF_PATH"
// EnvAuthToken is the environment variable name for the auth token used by the CLI
EnvAuthToken = "ARGOCD_AUTH_TOKEN"
// EnvLogFormat log format that is defined by `--logformat` option
EnvLogFormat = "ARGOCD_LOG_FORMAT"
// EnvLogLevel log level that is defined by `--loglevel` option
@@ -318,10 +314,7 @@ const (
// Constants used by util/clusterauth package
const (
ClusterAuthRequestTimeout = 10 * time.Second
)
const (
BearerTokenTimeout = 30 * time.Second
BearerTokenTimeout = 30 * time.Second
)
const (
@@ -431,10 +424,8 @@ var PermissionDeniedAPIError = status.Error(codes.PermissionDenied, "permission
// Redis password consts
const (
// RedisInitialCredentials is the name for the argocd kubernetes secret which will have the redis password
RedisInitialCredentials = "argocd-redis"
// RedisInitialCredentialsKey is the key for the argocd kubernetes secret that maps to the redis password
RedisInitialCredentialsKey = "auth"
DefaultRedisInitialPasswordSecretName = "argocd-redis"
DefaultRedisInitialPasswordKey = "auth"
)
/*
@@ -443,17 +434,17 @@ SetOptionalRedisPasswordFromKubeConfig sets the optional Redis password if it ex
We specify kubeClient as kubernetes.Interface to allow for mocking in tests, but this should be treated as a kubernetes.Clientset param.
*/
func SetOptionalRedisPasswordFromKubeConfig(ctx context.Context, kubeClient kubernetes.Interface, namespace string, redisOptions *redis.Options) error {
secret, err := kubeClient.CoreV1().Secrets(namespace).Get(ctx, RedisInitialCredentials, v1.GetOptions{})
secret, err := kubeClient.CoreV1().Secrets(namespace).Get(ctx, DefaultRedisInitialPasswordSecretName, v1.GetOptions{})
if err != nil {
return fmt.Errorf("failed to get secret %s/%s: %w", namespace, RedisInitialCredentials, err)
return fmt.Errorf("failed to get secret %s/%s: %w", namespace, DefaultRedisInitialPasswordSecretName, err)
}
if secret == nil {
return fmt.Errorf("failed to get secret %s/%s: secret is nil", namespace, RedisInitialCredentials)
return fmt.Errorf("failed to get secret %s/%s: secret is nil", namespace, DefaultRedisInitialPasswordSecretName)
}
_, ok := secret.Data[RedisInitialCredentialsKey]
_, ok := secret.Data[DefaultRedisInitialPasswordKey]
if !ok {
return fmt.Errorf("secret %s/%s does not contain key %s", namespace, RedisInitialCredentials, RedisInitialCredentialsKey)
return fmt.Errorf("secret %s/%s does not contain key %s", namespace, DefaultRedisInitialPasswordSecretName, DefaultRedisInitialPasswordKey)
}
redisOptions.Password = string(secret.Data[RedisInitialCredentialsKey])
redisOptions.Password = string(secret.Data[DefaultRedisInitialPasswordKey])
return nil
}

View File

@@ -63,24 +63,24 @@ func TestSetOptionalRedisPasswordFromKubeConfig(t *testing.T) {
expectedPassword: "password123",
expectedErr: "",
secret: &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{Name: RedisInitialCredentials},
Data: map[string][]byte{RedisInitialCredentialsKey: []byte("password123")},
ObjectMeta: metav1.ObjectMeta{Name: DefaultRedisInitialPasswordSecretName},
Data: map[string][]byte{DefaultRedisInitialPasswordKey: []byte("password123")},
},
},
{
name: "Secret does not exist",
namespace: "default",
expectedPassword: "",
expectedErr: fmt.Sprintf("failed to get secret default/%s", RedisInitialCredentials),
expectedErr: fmt.Sprintf("failed to get secret default/%s", DefaultRedisInitialPasswordSecretName),
secret: nil,
},
{
name: "Secret exists without correct key",
namespace: "default",
expectedPassword: "",
expectedErr: fmt.Sprintf("secret default/%s does not contain key %s", RedisInitialCredentials, RedisInitialCredentialsKey),
expectedErr: fmt.Sprintf("secret default/%s does not contain key %s", DefaultRedisInitialPasswordSecretName, DefaultRedisInitialPasswordKey),
secret: &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{Name: RedisInitialCredentials},
ObjectMeta: metav1.ObjectMeta{Name: DefaultRedisInitialPasswordSecretName},
Data: map[string][]byte{},
},
},
@@ -101,7 +101,8 @@ func TestSetOptionalRedisPasswordFromKubeConfig(t *testing.T) {
}
err := SetOptionalRedisPasswordFromKubeConfig(ctx, kubeClient, tc.namespace, redisOptions)
if tc.expectedErr != "" {
require.ErrorContains(t, err, tc.expectedErr)
require.Error(t, err)
require.Contains(t, err.Error(), tc.expectedErr)
} else {
require.NoError(t, err)
}

View File

@@ -131,6 +131,7 @@ type ApplicationController struct {
statusRefreshJitter time.Duration
selfHealTimeout time.Duration
selfHealBackOff *wait.Backoff
repoClientset apiclient.Clientset
db db.ArgoDB
settingsMgr *settings_util.SettingsManager
refreshRequestedApps map[string]CompareWith
@@ -174,7 +175,6 @@ func NewApplicationController(
serverSideDiff bool,
dynamicClusterDistributionEnabled bool,
ignoreNormalizerOpts normalizers.IgnoreNormalizerOpts,
enableK8sEvent []string,
) (*ApplicationController, error) {
log.Infof("appResyncPeriod=%v, appHardResyncPeriod=%v, appResyncJitter=%v", appResyncPeriod, appHardResyncPeriod, appResyncJitter)
db := db.NewDB(namespace, settingsMgr, kubeClientset)
@@ -188,6 +188,7 @@ func NewApplicationController(
kubeClientset: kubeClientset,
kubectl: kubectl,
applicationClientset: applicationClientset,
repoClientset: repoClientset,
appRefreshQueue: workqueue.NewTypedRateLimitingQueueWithConfig(ratelimiter.NewCustomAppControllerRateLimiter(rateLimiterConfig), workqueue.TypedRateLimitingQueueConfig[string]{Name: "app_reconciliation_queue"}),
appOperationQueue: workqueue.NewTypedRateLimitingQueueWithConfig(ratelimiter.NewCustomAppControllerRateLimiter(rateLimiterConfig), workqueue.TypedRateLimitingQueueConfig[string]{Name: "app_operation_processing_queue"}),
projectRefreshQueue: workqueue.NewTypedRateLimitingQueueWithConfig(ratelimiter.NewCustomAppControllerRateLimiter(rateLimiterConfig), workqueue.TypedRateLimitingQueueConfig[string]{Name: "project_reconciliation_queue"}),
@@ -198,7 +199,7 @@ func NewApplicationController(
statusRefreshJitter: appResyncJitter,
refreshRequestedApps: make(map[string]CompareWith),
refreshRequestedAppsMutex: &sync.Mutex{},
auditLogger: argo.NewAuditLogger(namespace, kubeClientset, common.ApplicationController, enableK8sEvent),
auditLogger: argo.NewAuditLogger(namespace, kubeClientset, common.ApplicationController),
settingsMgr: settingsMgr,
selfHealTimeout: selfHealTimeout,
selfHealBackOff: selfHealBackoff,
@@ -1164,15 +1165,9 @@ func (ctrl *ApplicationController) finalizeApplicationDeletion(app *appv1.Applic
logCtx.Infof("Resource entries removed from undefined cluster")
return nil
}
clusterRESTConfig, err := cluster.RESTConfig()
if err != nil {
return err
}
config := metrics.AddMetricsTransportWrapper(ctrl.metricsServer, app, clusterRESTConfig)
config := metrics.AddMetricsTransportWrapper(ctrl.metricsServer, app, cluster.RESTConfig())
if app.CascadedDeletion() {
deletionApproved := app.IsDeletionConfirmed(app.DeletionTimestamp.Time)
logCtx.Infof("Deleting resources")
// ApplicationDestination points to a valid cluster, so we may clean up the live objects
objs := make([]*unstructured.Unstructured, 0)
@@ -1190,10 +1185,6 @@ func (ctrl *ApplicationController) finalizeApplicationDeletion(app *appv1.Applic
if ctrl.shouldBeDeleted(app, objsMap[k]) {
objs = append(objs, objsMap[k])
if res, ok := app.Status.FindResource(k); ok && res.RequiresDeletionConfirmation && !deletionApproved {
logCtx.Infof("Resource %v requires manual confirmation to delete", k)
return nil
}
}
}

View File

@@ -45,15 +45,12 @@ import (
"github.com/argoproj/argo-cd/v2/reposerver/apiclient"
mockrepoclient "github.com/argoproj/argo-cd/v2/reposerver/apiclient/mocks"
"github.com/argoproj/argo-cd/v2/test"
"github.com/argoproj/argo-cd/v2/util/argo"
"github.com/argoproj/argo-cd/v2/util/argo/normalizers"
cacheutil "github.com/argoproj/argo-cd/v2/util/cache"
appstatecache "github.com/argoproj/argo-cd/v2/util/cache/appstate"
"github.com/argoproj/argo-cd/v2/util/settings"
)
var testEnableEventList []string = argo.DefaultEnableEventList()
type namespacedResource struct {
v1alpha1.ResourceNode
AppName string
@@ -173,7 +170,6 @@ func newFakeController(data *fakeData, repoErr error) *ApplicationController {
false,
false,
normalizers.IgnoreNormalizerOpts{},
testEnableEventList,
)
db := &dbmocks.ArgoDB{}
db.On("GetApplicationControllerReplicas").Return(1)
@@ -823,7 +819,6 @@ func TestAutoSyncParameterOverrides(t *testing.T) {
// TestFinalizeAppDeletion verifies application deletion
func TestFinalizeAppDeletion(t *testing.T) {
now := metav1.Now()
defaultProj := v1alpha1.AppProject{
ObjectMeta: metav1.ObjectMeta{
Name: "default",
@@ -844,9 +839,11 @@ func TestFinalizeAppDeletion(t *testing.T) {
t.Run("CascadingDelete", func(t *testing.T) {
app := newFakeApp()
app.SetCascadedDeletion(v1alpha1.ResourcesFinalizerName)
app.DeletionTimestamp = &now
app.Spec.Destination.Namespace = test.FakeArgoCDNamespace
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app, &defaultProj}, managedLiveObjs: map[kube.ResourceKey]*unstructured.Unstructured{}}, nil)
appObj := kube.MustToUnstructured(&app)
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app, &defaultProj}, managedLiveObjs: map[kube.ResourceKey]*unstructured.Unstructured{
kube.GetResourceKey(appObj): appObj,
}}, nil)
patched := false
fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset)
defaultReactor := fakeAppCs.ReactionChain[0]
@@ -885,7 +882,6 @@ func TestFinalizeAppDeletion(t *testing.T) {
}
app := newFakeApp()
app.SetCascadedDeletion(v1alpha1.ResourcesFinalizerName)
app.DeletionTimestamp = &now
app.Spec.Destination.Namespace = test.FakeArgoCDNamespace
app.Spec.Project = "restricted"
appObj := kube.MustToUnstructured(&app)
@@ -931,8 +927,10 @@ func TestFinalizeAppDeletion(t *testing.T) {
t.Run("DeleteWithDestinationClusterName", func(t *testing.T) {
app := newFakeAppWithDestName()
app.SetCascadedDeletion(v1alpha1.ResourcesFinalizerName)
app.DeletionTimestamp = &now
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app, &defaultProj}, managedLiveObjs: map[kube.ResourceKey]*unstructured.Unstructured{}}, nil)
appObj := kube.MustToUnstructured(&app)
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app, &defaultProj}, managedLiveObjs: map[kube.ResourceKey]*unstructured.Unstructured{
kube.GetResourceKey(appObj): appObj,
}}, nil)
patched := false
fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset)
defaultReactor := fakeAppCs.ReactionChain[0]
@@ -2194,7 +2192,6 @@ func TestAlreadyAttemptSync(t *testing.T) {
}
func assertDurationAround(t *testing.T, expected time.Duration, actual time.Duration) {
t.Helper()
delta := time.Second / 2
assert.GreaterOrEqual(t, expected, actual-delta)
assert.LessOrEqual(t, expected, actual+delta)

View File

@@ -495,10 +495,7 @@ func (c *liveStateCache) getCluster(server string) (clustercache.ClusterCache, e
return nil, fmt.Errorf("error getting value for %v: %w", settings.RespectRBAC, err)
}
clusterCacheConfig, err := cluster.RESTConfig()
if err != nil {
return nil, fmt.Errorf("error getting cluster RESTConfig: %w", err)
}
clusterCacheConfig := cluster.RESTConfig()
// Controller dynamically fetches all resource types available on the cluster
// using a discovery API that may contain deprecated APIs.
// This causes log flooding when managing a large number of clusters.
@@ -829,12 +826,7 @@ func (c *liveStateCache) handleModEvent(oldCluster *appv1.Cluster, newCluster *a
var updateSettings []clustercache.UpdateSettingsFunc
if !reflect.DeepEqual(oldCluster.Config, newCluster.Config) {
newClusterRESTConfig, err := newCluster.RESTConfig()
if err == nil {
updateSettings = append(updateSettings, clustercache.SetConfig(newClusterRESTConfig))
} else {
log.Errorf("error getting cluster REST config: %v", err)
}
updateSettings = append(updateSettings, clustercache.SetConfig(newCluster.RESTConfig()))
}
if !reflect.DeepEqual(oldCluster.Namespaces, newCluster.Namespaces) {
updateSettings = append(updateSettings, clustercache.SetNamespaces(newCluster.Namespaces))

View File

@@ -66,8 +66,6 @@ func populateNodeInfo(un *unstructured.Unstructured, res *ResourceInfo, customLa
switch gvk.Kind {
case "VirtualService":
populateIstioVirtualServiceInfo(un, res)
case "ServiceEntry":
populateIstioServiceEntryInfo(un, res)
}
}
}
@@ -280,22 +278,6 @@ func populateIstioVirtualServiceInfo(un *unstructured.Unstructured, res *Resourc
res.NetworkingInfo = &v1alpha1.ResourceNetworkingInfo{TargetRefs: targets, ExternalURLs: urls}
}
func populateIstioServiceEntryInfo(un *unstructured.Unstructured, res *ResourceInfo) {
targetLabels, ok, err := unstructured.NestedStringMap(un.Object, "spec", "workloadSelector", "labels")
if err != nil {
return
}
if !ok {
return
}
res.NetworkingInfo = &v1alpha1.ResourceNetworkingInfo{
TargetLabels: targetLabels,
TargetRefs: []v1alpha1.ResourceRef{{
Kind: kube.PodKind,
}},
}
}
func isPodInitializedConditionTrue(status *v1.PodStatus) bool {
for _, condition := range status.Conditions {
if condition.Type != v1.PodInitialized {

View File

@@ -246,40 +246,10 @@ spec:
- destination:
host: service
`)
testIstioServiceEntry = strToUnstructured(`
apiVersion: networking.istio.io/v1beta1
kind: ServiceEntry
metadata:
name: echo
spec:
exportTo:
- '*'
hosts:
- echo.internal
location: MESH_INTERNAL
ports:
- name: http
number: 80
protocol: HTTP
targetPort: 5678
resolution: DNS
workloadSelector:
labels:
app.kubernetes.io/name: echo-2
`)
)
// These tests are equivalent to tests in ui/src/app/applications/components/utils.test.tsx. If you update tests here,
// please make sure to update the equivalent tests in the UI.
func TestGetPodInfo(t *testing.T) {
t.Parallel()
t.Run("TestGetPodInfo", func(t *testing.T) {
t.Parallel()
pod := strToUnstructured(`
pod := strToUnstructured(`
apiVersion: v1
kind: Pod
metadata:
@@ -301,22 +271,22 @@ func TestGetPodInfo(t *testing.T) {
memory: 128Mi
`)
info := &ResourceInfo{}
populateNodeInfo(pod, info, []string{})
assert.Equal(t, []v1alpha1.InfoItem{
{Name: "Node", Value: "minikube"},
{Name: "Containers", Value: "0/1"},
}, info.Info)
assert.Equal(t, []string{"bar"}, info.Images)
assert.Equal(t, &PodInfo{
NodeName: "minikube",
ResourceRequests: v1.ResourceList{v1.ResourceMemory: resource.MustParse("128Mi")},
}, info.PodInfo)
assert.Equal(t, &v1alpha1.ResourceNetworkingInfo{Labels: map[string]string{"app": "guestbook"}}, info.NetworkingInfo)
})
info := &ResourceInfo{}
populateNodeInfo(pod, info, []string{})
assert.Equal(t, []v1alpha1.InfoItem{
{Name: "Node", Value: "minikube"},
{Name: "Containers", Value: "0/1"},
}, info.Info)
assert.Equal(t, []string{"bar"}, info.Images)
assert.Equal(t, &PodInfo{
NodeName: "minikube",
ResourceRequests: v1.ResourceList{v1.ResourceMemory: resource.MustParse("128Mi")},
}, info.PodInfo)
assert.Equal(t, &v1alpha1.ResourceNetworkingInfo{Labels: map[string]string{"app": "guestbook"}}, info.NetworkingInfo)
}
t.Run("TestGetPodWithInitialContainerInfo", func(t *testing.T) {
pod := strToUnstructured(`
func TestGetPodWithInitialContainerInfo(t *testing.T) {
pod := strToUnstructured(`
apiVersion: "v1"
kind: "Pod"
metadata:
@@ -361,19 +331,17 @@ func TestGetPodInfo(t *testing.T) {
phase: "Running"
`)
info := &ResourceInfo{}
populateNodeInfo(pod, info, []string{})
assert.Equal(t, []v1alpha1.InfoItem{
{Name: "Status Reason", Value: "Running"},
{Name: "Node", Value: "minikube"},
{Name: "Containers", Value: "1/1"},
}, info.Info)
})
info := &ResourceInfo{}
populateNodeInfo(pod, info, []string{})
assert.Equal(t, []v1alpha1.InfoItem{
{Name: "Status Reason", Value: "Running"},
{Name: "Node", Value: "minikube"},
{Name: "Containers", Value: "1/1"},
}, info.Info)
}
t.Run("TestGetPodInfoWithSidecar", func(t *testing.T) {
t.Parallel()
pod := strToUnstructured(`
func TestGetPodInfoWithSidecar(t *testing.T) {
pod := strToUnstructured(`
apiVersion: v1
kind: Pod
metadata:
@@ -418,19 +386,17 @@ func TestGetPodInfo(t *testing.T) {
phase: Running
`)
info := &ResourceInfo{}
populateNodeInfo(pod, info, []string{})
assert.Equal(t, []v1alpha1.InfoItem{
{Name: "Status Reason", Value: "Running"},
{Name: "Node", Value: "minikube"},
{Name: "Containers", Value: "2/2"},
}, info.Info)
})
info := &ResourceInfo{}
populateNodeInfo(pod, info, []string{})
assert.Equal(t, []v1alpha1.InfoItem{
{Name: "Status Reason", Value: "Running"},
{Name: "Node", Value: "minikube"},
{Name: "Containers", Value: "2/2"},
}, info.Info)
}
t.Run("TestGetPodInfoWithInitialContainer", func(t *testing.T) {
t.Parallel()
pod := strToUnstructured(`
func TestGetPodInfoWithInitialContainer(t *testing.T) {
pod := strToUnstructured(`
apiVersion: v1
kind: Pod
metadata:
@@ -476,20 +442,18 @@ func TestGetPodInfo(t *testing.T) {
startTime: '2024-10-09T08:02:39Z'
`)
info := &ResourceInfo{}
populateNodeInfo(pod, info, []string{})
assert.Equal(t, []v1alpha1.InfoItem{
{Name: "Status Reason", Value: "Init:0/1"},
{Name: "Node", Value: "minikube"},
{Name: "Containers", Value: "0/1"},
}, info.Info)
})
info := &ResourceInfo{}
populateNodeInfo(pod, info, []string{})
assert.Equal(t, []v1alpha1.InfoItem{
{Name: "Status Reason", Value: "Init:0/1"},
{Name: "Node", Value: "minikube"},
{Name: "Containers", Value: "0/1"},
}, info.Info)
}
// Test pod has 2 restartable init containers, the first one running but not started.
t.Run("TestGetPodInfoWithRestartableInitContainer", func(t *testing.T) {
t.Parallel()
pod := strToUnstructured(`
// Test pod has 2 restartable init containers, the first one running but not started.
func TestGetPodInfoWithRestartableInitContainer(t *testing.T) {
pod := strToUnstructured(`
apiVersion: v1
kind: Pod
metadata:
@@ -532,21 +496,19 @@ func TestGetPodInfo(t *testing.T) {
status: "False"
`)
info := &ResourceInfo{}
populateNodeInfo(pod, info, []string{})
assert.Equal(t, []v1alpha1.InfoItem{
{Name: "Status Reason", Value: "Init:0/2"},
{Name: "Node", Value: "minikube"},
{Name: "Containers", Value: "0/3"},
{Name: "Restart Count", Value: "3"},
}, info.Info)
})
info := &ResourceInfo{}
populateNodeInfo(pod, info, []string{})
assert.Equal(t, []v1alpha1.InfoItem{
{Name: "Status Reason", Value: "Init:0/2"},
{Name: "Node", Value: "minikube"},
{Name: "Containers", Value: "0/3"},
{Name: "Restart Count", Value: "3"},
}, info.Info)
}
// Test pod has 2 restartable init containers, the first one started and the second one running but not started.
t.Run("TestGetPodInfoWithPartiallyStartedInitContainers", func(t *testing.T) {
t.Parallel()
pod := strToUnstructured(`
// Test pod has 2 restartable init containers, the first one started and the second one running but not started.
func TestGetPodInfoWithPartiallyStartedInitContainers(t *testing.T) {
pod := strToUnstructured(`
apiVersion: v1
kind: Pod
metadata:
@@ -589,21 +551,19 @@ func TestGetPodInfo(t *testing.T) {
status: "False"
`)
info := &ResourceInfo{}
populateNodeInfo(pod, info, []string{})
assert.Equal(t, []v1alpha1.InfoItem{
{Name: "Status Reason", Value: "Init:1/2"},
{Name: "Node", Value: "minikube"},
{Name: "Containers", Value: "0/3"},
{Name: "Restart Count", Value: "3"},
}, info.Info)
})
info := &ResourceInfo{}
populateNodeInfo(pod, info, []string{})
assert.Equal(t, []v1alpha1.InfoItem{
{Name: "Status Reason", Value: "Init:1/2"},
{Name: "Node", Value: "minikube"},
{Name: "Containers", Value: "0/3"},
{Name: "Restart Count", Value: "3"},
}, info.Info)
}
// Test pod has 2 restartable init containers started and 1 container running
t.Run("TestGetPodInfoWithStartedInitContainers", func(t *testing.T) {
t.Parallel()
pod := strToUnstructured(`
// Test pod has 2 restartable init containers started and 1 container running
func TestGetPodInfoWithStartedInitContainers(t *testing.T) {
pod := strToUnstructured(`
apiVersion: v1
kind: Pod
metadata:
@@ -649,21 +609,19 @@ func TestGetPodInfo(t *testing.T) {
status: "True"
`)
info := &ResourceInfo{}
populateNodeInfo(pod, info, []string{})
assert.Equal(t, []v1alpha1.InfoItem{
{Name: "Status Reason", Value: "Running"},
{Name: "Node", Value: "minikube"},
{Name: "Containers", Value: "1/3"},
{Name: "Restart Count", Value: "7"},
}, info.Info)
})
info := &ResourceInfo{}
populateNodeInfo(pod, info, []string{})
assert.Equal(t, []v1alpha1.InfoItem{
{Name: "Status Reason", Value: "Running"},
{Name: "Node", Value: "minikube"},
{Name: "Containers", Value: "1/3"},
{Name: "Restart Count", Value: "7"},
}, info.Info)
}
// Test pod has 1 init container restarting and 1 container not running
t.Run("TestGetPodInfoWithNormalInitContainer", func(t *testing.T) {
t.Parallel()
pod := strToUnstructured(`
// Test pod has 1 init container restarting and 1 container not running
func TestGetPodInfoWithNormalInitContainer(t *testing.T) {
pod := strToUnstructured(`
apiVersion: v1
kind: Pod
metadata:
@@ -691,21 +649,19 @@ func TestGetPodInfo(t *testing.T) {
waiting: {}
`)
info := &ResourceInfo{}
populateNodeInfo(pod, info, []string{})
assert.Equal(t, []v1alpha1.InfoItem{
{Name: "Status Reason", Value: "Init:0/1"},
{Name: "Node", Value: "minikube"},
{Name: "Containers", Value: "0/1"},
{Name: "Restart Count", Value: "3"},
}, info.Info)
})
info := &ResourceInfo{}
populateNodeInfo(pod, info, []string{})
assert.Equal(t, []v1alpha1.InfoItem{
{Name: "Status Reason", Value: "Init:0/1"},
{Name: "Node", Value: "minikube"},
{Name: "Containers", Value: "0/1"},
{Name: "Restart Count", Value: "3"},
}, info.Info)
}
// Test pod condition succeed
t.Run("TestPodConditionSucceeded", func(t *testing.T) {
t.Parallel()
pod := strToUnstructured(`
// Test pod condition succeed
func TestPodConditionSucceeded(t *testing.T) {
pod := strToUnstructured(`
apiVersion: v1
kind: Pod
metadata:
@@ -724,20 +680,18 @@ func TestGetPodInfo(t *testing.T) {
reason: Completed
exitCode: 0
`)
info := &ResourceInfo{}
populateNodeInfo(pod, info, []string{})
assert.Equal(t, []v1alpha1.InfoItem{
{Name: "Status Reason", Value: "Completed"},
{Name: "Node", Value: "minikube"},
{Name: "Containers", Value: "0/1"},
}, info.Info)
})
info := &ResourceInfo{}
populateNodeInfo(pod, info, []string{})
assert.Equal(t, []v1alpha1.InfoItem{
{Name: "Status Reason", Value: "Completed"},
{Name: "Node", Value: "minikube"},
{Name: "Containers", Value: "0/1"},
}, info.Info)
}
// Test pod condition failed
t.Run("TestPodConditionFailed", func(t *testing.T) {
t.Parallel()
pod := strToUnstructured(`
// Test pod condition failed
func TestPodConditionFailed(t *testing.T) {
pod := strToUnstructured(`
apiVersion: v1
kind: Pod
metadata:
@@ -756,20 +710,18 @@ func TestGetPodInfo(t *testing.T) {
reason: Error
exitCode: 1
`)
info := &ResourceInfo{}
populateNodeInfo(pod, info, []string{})
assert.Equal(t, []v1alpha1.InfoItem{
{Name: "Status Reason", Value: "Error"},
{Name: "Node", Value: "minikube"},
{Name: "Containers", Value: "0/1"},
}, info.Info)
})
info := &ResourceInfo{}
populateNodeInfo(pod, info, []string{})
assert.Equal(t, []v1alpha1.InfoItem{
{Name: "Status Reason", Value: "Error"},
{Name: "Node", Value: "minikube"},
{Name: "Containers", Value: "0/1"},
}, info.Info)
}
// Test pod condition succeed with deletion
t.Run("TestPodConditionSucceededWithDeletion", func(t *testing.T) {
t.Parallel()
pod := strToUnstructured(`
// Test pod condition succeed with deletion
func TestPodConditionSucceededWithDeletion(t *testing.T) {
pod := strToUnstructured(`
apiVersion: v1
kind: Pod
metadata:
@@ -789,20 +741,18 @@ func TestGetPodInfo(t *testing.T) {
reason: Completed
exitCode: 0
`)
info := &ResourceInfo{}
populateNodeInfo(pod, info, []string{})
assert.Equal(t, []v1alpha1.InfoItem{
{Name: "Status Reason", Value: "Completed"},
{Name: "Node", Value: "minikube"},
{Name: "Containers", Value: "0/1"},
}, info.Info)
})
info := &ResourceInfo{}
populateNodeInfo(pod, info, []string{})
assert.Equal(t, []v1alpha1.InfoItem{
{Name: "Status Reason", Value: "Completed"},
{Name: "Node", Value: "minikube"},
{Name: "Containers", Value: "0/1"},
}, info.Info)
}
// Test pod condition running with deletion
t.Run("TestPodConditionRunningWithDeletion", func(t *testing.T) {
t.Parallel()
pod := strToUnstructured(`
// Test pod condition running with deletion
func TestPodConditionRunningWithDeletion(t *testing.T) {
pod := strToUnstructured(`
apiVersion: v1
kind: Pod
metadata:
@@ -820,20 +770,18 @@ func TestGetPodInfo(t *testing.T) {
state:
running: {}
`)
info := &ResourceInfo{}
populateNodeInfo(pod, info, []string{})
assert.Equal(t, []v1alpha1.InfoItem{
{Name: "Status Reason", Value: "Terminating"},
{Name: "Node", Value: "minikube"},
{Name: "Containers", Value: "0/1"},
}, info.Info)
})
info := &ResourceInfo{}
populateNodeInfo(pod, info, []string{})
assert.Equal(t, []v1alpha1.InfoItem{
{Name: "Status Reason", Value: "Terminating"},
{Name: "Node", Value: "minikube"},
{Name: "Containers", Value: "0/1"},
}, info.Info)
}
// Test pod condition pending with deletion
t.Run("TestPodConditionPendingWithDeletion", func(t *testing.T) {
t.Parallel()
pod := strToUnstructured(`
// Test pod condition pending with deletion
func TestPodConditionPendingWithDeletion(t *testing.T) {
pod := strToUnstructured(`
apiVersion: v1
kind: Pod
metadata:
@@ -846,20 +794,18 @@ func TestGetPodInfo(t *testing.T) {
status:
phase: Pending
`)
info := &ResourceInfo{}
populateNodeInfo(pod, info, []string{})
assert.Equal(t, []v1alpha1.InfoItem{
{Name: "Status Reason", Value: "Terminating"},
{Name: "Node", Value: "minikube"},
{Name: "Containers", Value: "0/1"},
}, info.Info)
})
info := &ResourceInfo{}
populateNodeInfo(pod, info, []string{})
assert.Equal(t, []v1alpha1.InfoItem{
{Name: "Status Reason", Value: "Terminating"},
{Name: "Node", Value: "minikube"},
{Name: "Containers", Value: "0/1"},
}, info.Info)
}
// Test PodScheduled condition with reason SchedulingGated
t.Run("TestPodScheduledWithSchedulingGated", func(t *testing.T) {
t.Parallel()
pod := strToUnstructured(`
// Test PodScheduled condition with reason SchedulingGated
func TestPodScheduledWithSchedulingGated(t *testing.T) {
pod := strToUnstructured(`
apiVersion: v1
kind: Pod
metadata:
@@ -876,14 +822,13 @@ func TestGetPodInfo(t *testing.T) {
status: "False"
reason: SchedulingGated
`)
info := &ResourceInfo{}
populateNodeInfo(pod, info, []string{})
assert.Equal(t, []v1alpha1.InfoItem{
{Name: "Status Reason", Value: "SchedulingGated"},
{Name: "Node", Value: "minikube"},
{Name: "Containers", Value: "0/2"},
}, info.Info)
})
info := &ResourceInfo{}
populateNodeInfo(pod, info, []string{})
assert.Equal(t, []v1alpha1.InfoItem{
{Name: "Status Reason", Value: "SchedulingGated"},
{Name: "Node", Value: "minikube"},
{Name: "Containers", Value: "0/2"},
}, info.Info)
}
func TestGetNodeInfo(t *testing.T) {
@@ -956,21 +901,6 @@ func TestGetIstioVirtualServiceInfo(t *testing.T) {
})
}
func TestGetIstioServiceEntryInfo(t *testing.T) {
info := &ResourceInfo{}
populateNodeInfo(testIstioServiceEntry, info, []string{})
assert.Empty(t, info.Info)
require.NotNil(t, info.NetworkingInfo)
require.NotNil(t, info.NetworkingInfo.TargetRefs)
assert.Contains(t, info.NetworkingInfo.TargetRefs, v1alpha1.ResourceRef{
Kind: kube.PodKind,
})
assert.Equal(t, map[string]string{
"app.kubernetes.io/name": "echo-2",
}, info.NetworkingInfo.TargetLabels)
}
func TestGetIngressInfo(t *testing.T) {
tests := []struct {
Ingress *unstructured.Unstructured

View File

@@ -467,7 +467,6 @@ func assertMetricsPrinted(t *testing.T, expectedLines, body string) {
// assertMetricsNotPrinted
func assertMetricsNotPrinted(t *testing.T, expectedLines, body string) {
t.Helper()
for _, line := range strings.Split(expectedLines, "\n") {
if line == "" {
continue

View File

@@ -836,7 +836,6 @@ func TestGetClusterSharding(t *testing.T) {
{
name: "Default sharding with statefulset",
envsSetter: func(t *testing.T) {
t.Helper()
t.Setenv(common.EnvControllerReplicas, "1")
},
cleanup: func() {},
@@ -848,7 +847,6 @@ func TestGetClusterSharding(t *testing.T) {
{
name: "Default sharding with deployment",
envsSetter: func(t *testing.T) {
t.Helper()
t.Setenv(common.EnvAppControllerName, common.DefaultApplicationControllerName)
},
cleanup: func() {},
@@ -860,7 +858,6 @@ func TestGetClusterSharding(t *testing.T) {
{
name: "Default sharding with deployment and multiple replicas",
envsSetter: func(t *testing.T) {
t.Helper()
t.Setenv(common.EnvAppControllerName, "argocd-application-controller-multi-replicas")
},
cleanup: func() {},
@@ -872,7 +869,6 @@ func TestGetClusterSharding(t *testing.T) {
{
name: "Statefulset multiple replicas",
envsSetter: func(t *testing.T) {
t.Helper()
t.Setenv(common.EnvControllerReplicas, "3")
osHostnameFunction = func() (string, error) { return "example-shard-3", nil }
},
@@ -887,7 +883,6 @@ func TestGetClusterSharding(t *testing.T) {
{
name: "Explicit shard with statefulset and 1 replica",
envsSetter: func(t *testing.T) {
t.Helper()
t.Setenv(common.EnvControllerReplicas, "1")
t.Setenv(common.EnvControllerShard, "3")
},
@@ -900,7 +895,6 @@ func TestGetClusterSharding(t *testing.T) {
{
name: "Explicit shard with statefulset and 2 replica - and to high shard",
envsSetter: func(t *testing.T) {
t.Helper()
t.Setenv(common.EnvControllerReplicas, "2")
t.Setenv(common.EnvControllerShard, "3")
},
@@ -913,7 +907,6 @@ func TestGetClusterSharding(t *testing.T) {
{
name: "Explicit shard with statefulset and 2 replica",
envsSetter: func(t *testing.T) {
t.Helper()
t.Setenv(common.EnvControllerReplicas, "2")
t.Setenv(common.EnvControllerShard, "1")
},
@@ -926,7 +919,6 @@ func TestGetClusterSharding(t *testing.T) {
{
name: "Explicit shard with deployment",
envsSetter: func(t *testing.T) {
t.Helper()
t.Setenv(common.EnvControllerShard, "3")
},
cleanup: func() {},
@@ -938,7 +930,6 @@ func TestGetClusterSharding(t *testing.T) {
{
name: "Explicit shard with deployment and multiple replicas will read from configmap",
envsSetter: func(t *testing.T) {
t.Helper()
t.Setenv(common.EnvAppControllerName, "argocd-application-controller-multi-replicas")
t.Setenv(common.EnvControllerShard, "3")
},
@@ -951,7 +942,6 @@ func TestGetClusterSharding(t *testing.T) {
{
name: "Dynamic sharding but missing deployment",
envsSetter: func(t *testing.T) {
t.Helper()
t.Setenv(common.EnvAppControllerName, "missing-deployment")
},
cleanup: func() {},

View File

@@ -10,7 +10,6 @@ import (
goSync "sync"
"time"
synccommon "github.com/argoproj/gitops-engine/pkg/sync/common"
v1 "k8s.io/api/core/v1"
"github.com/argoproj/gitops-engine/pkg/diff"
@@ -256,29 +255,28 @@ func (m *appStateManager) GetRepoObjs(app *v1alpha1.Application, sources []v1alp
log.Debugf("Generating Manifest for source %s revision %s", source, revision)
manifestInfo, err := repoClient.GenerateManifest(context.Background(), &apiclient.ManifestRequest{
Repo: repo,
Repos: permittedHelmRepos,
Revision: revision,
NoCache: noCache,
NoRevisionCache: noRevisionCache,
AppLabelKey: appLabelKey,
AppName: app.InstanceName(m.namespace),
Namespace: app.Spec.Destination.Namespace,
ApplicationSource: &source,
KustomizeOptions: kustomizeOptions,
KubeVersion: serverVersion,
ApiVersions: argo.APIResourcesToStrings(apiResources, true),
VerifySignature: verifySignature,
HelmRepoCreds: permittedHelmCredentials,
TrackingMethod: string(argo.GetTrackingMethod(m.settingsMgr)),
EnabledSourceTypes: enabledSourceTypes,
HelmOptions: helmOptions,
HasMultipleSources: app.Spec.HasMultipleSources(),
RefSources: refSources,
ProjectName: proj.Name,
ProjectSourceRepos: proj.Spec.SourceRepos,
AnnotationManifestGeneratePaths: app.GetAnnotation(v1alpha1.AnnotationKeyManifestGeneratePaths),
InstallationID: installationID,
Repo: repo,
Repos: permittedHelmRepos,
Revision: revision,
NoCache: noCache,
NoRevisionCache: noRevisionCache,
AppLabelKey: appLabelKey,
AppName: app.InstanceName(m.namespace),
Namespace: app.Spec.Destination.Namespace,
ApplicationSource: &source,
KustomizeOptions: kustomizeOptions,
KubeVersion: serverVersion,
ApiVersions: argo.APIResourcesToStrings(apiResources, true),
VerifySignature: verifySignature,
HelmRepoCreds: permittedHelmCredentials,
TrackingMethod: string(argo.GetTrackingMethod(m.settingsMgr)),
EnabledSourceTypes: enabledSourceTypes,
HelmOptions: helmOptions,
HasMultipleSources: app.Spec.HasMultipleSources(),
RefSources: refSources,
ProjectName: proj.Name,
ProjectSourceRepos: proj.Spec.SourceRepos,
InstallationID: installationID,
})
if err != nil {
return nil, nil, false, fmt.Errorf("failed to generate manifest for source %d of %d: %w", i+1, len(sources), err)
@@ -300,8 +298,7 @@ func (m *appStateManager) GetRepoObjs(app *v1alpha1.Application, sources []v1alp
logCtx = logCtx.WithField("time_ms", time.Since(ts.StartTime).Milliseconds())
logCtx.Info("GetRepoObjs stats")
// If a revision in any of the sources cannot be updated,
// we should trigger self-healing whenever there are changes to the manifests.
// in case if annotation not exists, we should always execute selfheal if manifests changed
if atLeastOneRevisionIsNotPossibleToBeUpdated {
revisionUpdated = true
}
@@ -748,8 +745,6 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1
Group: gvk.Group,
Hook: isHook(obj),
RequiresPruning: targetObj == nil && liveObj != nil && isSelfReferencedObj,
RequiresDeletionConfirmation: targetObj != nil && resourceutil.HasAnnotationOption(targetObj, synccommon.AnnotationSyncOptions, synccommon.SyncOptionDeleteRequireConfirm) ||
liveObj != nil && resourceutil.HasAnnotationOption(liveObj, synccommon.AnnotationSyncOptions, synccommon.SyncOptionDeleteRequireConfirm),
}
if targetObj != nil {
resState.SyncWave = int64(syncwaves.Wave(targetObj))
@@ -965,6 +960,10 @@ func specEqualsCompareTo(spec v1alpha1.ApplicationSpec, comparedTo v1alpha1.Comp
currentSpec.Destination.Name = ""
}
// Set IsServerInferred to false on both, because that field is not important for comparison.
comparedTo.Destination.SetIsServerInferred(false)
currentSpec.Destination.SetIsServerInferred(false)
return reflect.DeepEqual(comparedTo, currentSpec)
}

View File

@@ -28,6 +28,7 @@ import (
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
argoappv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/v2/reposerver/apiclient"
mockrepoclient "github.com/argoproj/argo-cd/v2/reposerver/apiclient/mocks"
"github.com/argoproj/argo-cd/v2/test"
"github.com/argoproj/argo-cd/v2/util/argo"
)
@@ -547,7 +548,6 @@ func TestAppRevisionsMultiSource(t *testing.T) {
}
func toJSON(t *testing.T, obj *unstructured.Unstructured) string {
t.Helper()
data, err := json.Marshal(obj)
require.NoError(t, err)
return string(data)
@@ -680,6 +680,7 @@ func TestCompareAppStateWithManifestGeneratePath(t *testing.T) {
assert.NotNil(t, compRes)
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
assert.Equal(t, "abc123", compRes.syncStatus.Revision)
ctrl.repoClientset.(*mockrepoclient.Clientset).RepoServerServiceClient.(*mockrepoclient.RepoServerServiceClient).AssertNumberOfCalls(t, "UpdateRevisionForPaths", 1)
}
func TestSetHealth(t *testing.T) {
@@ -1528,6 +1529,10 @@ func TestUseDiffCache(t *testing.T) {
t.Fatalf("error merging app: %s", err)
}
}
if app.Spec.Destination.Name != "" && app.Spec.Destination.Server != "" {
// Simulate the controller's process for populating both of these fields.
app.Spec.Destination.SetInferredServer(app.Spec.Destination.Server)
}
return app
}

View File

@@ -80,12 +80,7 @@ func (m *appStateManager) getResourceOperations(server string) (kube.ResourceOpe
if err != nil {
return nil, nil, fmt.Errorf("error getting cluster: %w", err)
}
rawConfig, err := cluster.RawRestConfig()
if err != nil {
return nil, nil, fmt.Errorf("error getting cluster REST config: %w", err)
}
ops, cleanup, err := m.kubectl.ManageResources(rawConfig, clusterCache.GetOpenAPISchema())
ops, cleanup, err := m.kubectl.ManageResources(cluster.RawRestConfig(), clusterCache.GetOpenAPISchema())
if err != nil {
return nil, nil, fmt.Errorf("error creating kubectl ResourceOperations: %w", err)
}
@@ -228,20 +223,8 @@ func (m *appStateManager) SyncAppState(app *v1alpha1.Application, state *v1alpha
return
}
rawConfig, err := clst.RawRestConfig()
if err != nil {
state.Phase = common.OperationError
state.Message = err.Error()
return
}
clusterRESTConfig, err := clst.RESTConfig()
if err != nil {
state.Phase = common.OperationError
state.Message = err.Error()
return
}
restConfig := metrics.AddMetricsTransportWrapper(m.metricsServer, app, clusterRESTConfig)
rawConfig := clst.RawRestConfig()
restConfig := metrics.AddMetricsTransportWrapper(m.metricsServer, app, clst.RESTConfig())
resourceOverrides, err := m.settingsMgr.GetResourceOverrides()
if err != nil {
@@ -378,7 +361,6 @@ func (m *appStateManager) SyncAppState(app *v1alpha1.Application, state *v1alpha
sync.WithReplace(syncOp.SyncOptions.HasOption(common.SyncOptionReplace)),
sync.WithServerSideApply(syncOp.SyncOptions.HasOption(common.SyncOptionServerSideApply)),
sync.WithServerSideApplyManager(cdcommon.ArgoCDSSAManager),
sync.WithPruneConfirmed(app.IsDeletionConfirmed(state.StartedAt.Time)),
}
if syncOp.SyncOptions.HasOption("CreateNamespace=true") {

Binary file not shown.

Before

Width:  |  Height:  |  Size: 62 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 163 KiB

After

Width:  |  Height:  |  Size: 117 KiB

View File

@@ -4,8 +4,6 @@ const observerOptions = {
subtree: true
};
const VERSION_REGEX = /\/en\/(release-(?:v\d+|[\d\.]+|\w+)|latest|stable)\//;
const observerCallback = function(mutationsList, observer) {
for (let mutation of mutationsList) {
if (mutation.type === 'childList') {
@@ -22,7 +20,7 @@ const observer = new MutationObserver(observerCallback);
observer.observe(targetNode, observerOptions);
function getCurrentVersion() {
const currentVersion = window.location.href.match(VERSION_REGEX);
const currentVersion = window.location.href.match(/\/en\/(release-(?:v\d+|[\d\.]+|\w+)|latest|stable)\//);
if (currentVersion && currentVersion.length > 1) {
return currentVersion[1];
}
@@ -82,16 +80,11 @@ function sortVersionLinks(container) {
const dlElements = container.querySelectorAll('dl');
dlElements.forEach(dl => {
const ddElements = Array.from(dl.querySelectorAll('dd'));
const dt = dl.querySelector('dt');
if (dt && dt.textContent.trim().toLowerCase() === 'versions') {
// Found the Versions <dl>
const ddElements = Array.from(dl.querySelectorAll('dd'));
// Check if ddElements contain version links
const isVersionDl = ddElements.some(dd => {
const link = dd.querySelector('a');
return VERSION_REGEX.test(link?.getAttribute?.('href'));
});
// This dl contains version links, proceed to sort
if (isVersionDl) {
// Define sorting criteria
ddElements.sort((a, b) => {
const aText = a.textContent.trim().toLowerCase();
@@ -110,7 +103,7 @@ function sortVersionLinks(container) {
if (aVersionMatch && bVersionMatch) {
const aVersion = aVersionMatch[1].split('.').map(Number);
const bVersion = bVersionMatch[1].split('.').map(Number);
for (let i = 0; i < Math.max(aVersion.length, bVersion.length); i++) {
const aNum = aVersion[i] || 0;
const bNum = bVersion[i] || 0;

View File

@@ -7,7 +7,7 @@ enforced.
## Logical layers
The diagram below suggests 4 different logical layers (represented by
The diagram bellow suggests 4 different logical layers (represented by
4 boxes: HTTP, gRPC, AuthN and AuthZ) inside Argo CD API server that
collaborate to provide authentication and authorization.

View File

@@ -7,59 +7,24 @@ and the [toolchain guide](toolchain-guide.md).
## Getting Started
### Prerequisites
Before starting, ensure you have the following tools installed with the specified minimum versions:
* Git (v2.0.0+)
* Go (version specified in `go.mod` - check with `go version`)
* Docker (v20.10.0+) Or Podman (v3.0.0+)
* Kind (v0.11.0+) Or Minikube (v1.23.0+)
* Yarn (v1.22.0+)
* Goreman (latest version)
### Fork and Clone the Repository
1. Fork the Argo CD repository to your personal Github Account
2. Clone the forked repository:
```shell
mkdir -p $GOPATH/src/github.com/argoproj/
cd $GOPATH/src/github.com/argoproj/
git clone https://github.com/YOUR-USERNAME/argo-cd.git
```
3. Add the upstream remote for rebasing:
```shell
cd argo-cd
git remote add upstream https://github.com/argoproj/argo-cd.git
```
### Install Required Tools
1. Install development tools:
```shell
make install-go-tools-local
make install-code-gen-tools-local
```
### Install Go
<https://go.dev/doc/install/>
Install Go with a version equal to or greater than the version listed in `go.mod` (verify go version with `go version`).
### Clone the Argo CD repo
### Install Docker or Podman
```shell
mkdir -p $GOPATH/src/github.com/argoproj/ &&
cd $GOPATH/src/github.com/argoproj &&
git clone https://github.com/argoproj/argo-cd.git
```
#### Installation guide for docker:
### Install Docker
<https://docs.docker.com/engine/install/>
#### Installation guide for podman:
<https://podman.io/docs/installation>
### Install or Upgrade a Tool for Running Local Clusters (e.g. kind or minikube)
#### Installation guide for kind:
@@ -83,12 +48,6 @@ Or, if you are using minikube:
minikube start
```
Or, if you are using minikube with podman driver:
```shell
minikube start --driver=podman
```
### Install Argo CD
```shell
@@ -118,13 +77,6 @@ cd argo-cd
make start-local ARGOCD_GPG_ENABLED=false
```
By default, Argo CD uses Docker. To use Podman instead, set the `DOCKER` environment variable to `podman` before running the `make` command:
```shell
cd argo-cd
DOCKER=podman make start-local ARGOCD_GPG_ENABLED=false
```
- Navigate to [localhost:4000](http://localhost:4000) in your browser to load the Argo CD UI
- It may take a few minutes for the UI to be responsive
@@ -132,40 +84,8 @@ DOCKER=podman make start-local ARGOCD_GPG_ENABLED=false
If the UI is not working, check the logs from `make start-local`. The logs are `DEBUG` level by default. If the logs are
too noisy to find the problem, try editing log levels for the commands in the `Procfile` in the root of the Argo CD repo.
## Common Make Targets
Here are some frequently used make targets:
* `make start-local` - Start Argo CD locally
* `make test` - Run unit tests
* `make test-e2e` - Run end-to-end tests
* `make lint` - Run linting
* `make serve-docs` - Serve documentation locally
* `make pre-commit-local` - Run pre-commit checks locally
* `make build` - Build Argo CD binaries
## Making Changes
### Before Submitting a PR
1. Rebase your branch against upstream main:
```shell
git fetch upstream
git rebase upstream/main
```
2. Run pre-commit checks:
```shell
make pre-commit-local
```
### Docs Changes
Modifying the docs auto-reloads the changes on the [documentation website](https://argo-cd.readthedocs.io/) that can be locally built using `make serve-docs` command.
Once running, you can view your locally built documentation on port 8000.
Read more about this [here](https://argo-cd.readthedocs.io/en/latest/developer-guide/docs-site/).
### UI Changes
Modifying the User-Interface (by editing .tsx or .scss files) auto-reloads the changes on port 4000.

View File

@@ -17,25 +17,6 @@ Before submitting a PR build the website, to verify that there are no errors bui
make build-docs
```
If you want to build and test the site directly on your local machine without the use of docker container, follow the below steps:
1. Install the `mkdocs` using the `pip` command
```bash
pip install mkdocs
```
2. Install the required dependencies using the below command
```bash
pip install $(mkdocs get-deps)
```
3. Build the docs site locally from the root
```bash
make build-docs-local
```
4. Start the docs site locally
```bash
make serve-docs-local
```
## Analytics
!!! tip

Some files were not shown because too many files have changed in this diff Show More