mirror of
https://github.com/argoproj/argo-cd.git
synced 2026-02-21 01:58:46 +01:00
Compare commits
139 Commits
source-hyd
...
v2.14.19
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ba5b938858 | ||
|
|
4a133ce41b | ||
|
|
376525eea2 | ||
|
|
9fa9bb8c89 | ||
|
|
4359b3c774 | ||
|
|
4f6686fc3f | ||
|
|
caa4dc1bd2 | ||
|
|
981e7f762a | ||
|
|
3d76aa5daa | ||
|
|
fca3a7ed2a | ||
|
|
2bb617d02a | ||
|
|
72e2387795 | ||
|
|
f8eba3e6e9 | ||
|
|
510b77546e | ||
|
|
d77ecdf113 | ||
|
|
f9bb3b608e | ||
|
|
5d0a4f0dd3 | ||
|
|
8a3b2fdd2b | ||
|
|
ddb6073e52 | ||
|
|
d95b710055 | ||
|
|
6c7d6940cd | ||
|
|
ec5198949e | ||
|
|
da2ef7db67 | ||
|
|
c6166fd531 | ||
|
|
3c68b26d7a | ||
|
|
5f890629a9 | ||
|
|
e24ee58f28 | ||
|
|
14fa0e0d9f | ||
|
|
2aceb1dc44 | ||
|
|
a2361bf850 | ||
|
|
5ad281ef56 | ||
|
|
24d57220ca | ||
|
|
d213c305e4 | ||
|
|
25e327bb9a | ||
|
|
efe5d2956e | ||
|
|
5bc6f4722b | ||
|
|
25235fbc2d | ||
|
|
3a9ab77537 | ||
|
|
ced6a7822e | ||
|
|
fe93963970 | ||
|
|
78e61ba71c | ||
|
|
f7ad2adf50 | ||
|
|
b163de0784 | ||
|
|
82831155c2 | ||
|
|
91f54459d4 | ||
|
|
0451723be1 | ||
|
|
f6f7d29c11 | ||
|
|
5feb8c21f2 | ||
|
|
4826fb0ab8 | ||
|
|
3b308d66e2 | ||
|
|
b31d700188 | ||
|
|
be81419f27 | ||
|
|
6b15a04509 | ||
|
|
38985bdcd6 | ||
|
|
c868711d03 | ||
|
|
31a554568a | ||
|
|
a7178be1c1 | ||
|
|
9a9e62d392 | ||
|
|
9f832cd099 | ||
|
|
ec45e33800 | ||
|
|
872319e8e7 | ||
|
|
7acdaa96e0 | ||
|
|
d107d4e41a | ||
|
|
39407827d3 | ||
|
|
fe2a6e91b6 | ||
|
|
38c03769af | ||
|
|
defd4be943 | ||
|
|
f463a945d5 | ||
|
|
ed242b9eee | ||
|
|
3d901f2037 | ||
|
|
2b1e829986 | ||
|
|
52231dbc09 | ||
|
|
2eab10a3cb | ||
|
|
962d7a9ad9 | ||
|
|
54170a4fd8 | ||
|
|
71fd4e501d | ||
|
|
cb1df5d35f | ||
|
|
66db4b6876 | ||
|
|
3adb83c1df | ||
|
|
63edc3eb9c | ||
|
|
2dd70dede8 | ||
|
|
d79185a4fe | ||
|
|
8f925c6754 | ||
|
|
b5be1df890 | ||
|
|
92a3c3d727 | ||
|
|
aaed35c6d4 | ||
|
|
2b422d2c70 | ||
|
|
896a461ae6 | ||
|
|
ad2724661b | ||
|
|
efd9c32e25 | ||
|
|
3345d05a43 | ||
|
|
4745e08d4f | ||
|
|
46f494592c | ||
|
|
5964abd6af | ||
|
|
d59c85c5eb | ||
|
|
e4599e1a90 | ||
|
|
67b2336cac | ||
|
|
8a8fc37f3c | ||
|
|
2ef67d3e5c | ||
|
|
479b182552 | ||
|
|
bb8185e2ec | ||
|
|
70ea86523e | ||
|
|
35174dc196 | ||
|
|
bab2c41e10 | ||
|
|
bd755104ed | ||
|
|
2bf5dc6ed1 | ||
|
|
ebf754e3ab | ||
|
|
97704acded | ||
|
|
51471b3b8b | ||
|
|
c13c9c1be3 | ||
|
|
a4c1bffbea | ||
|
|
e2eb655e41 | ||
|
|
0a26e0f465 | ||
|
|
90146498fe | ||
|
|
018014c4b0 | ||
|
|
a89d01266b | ||
|
|
684ee0bceb | ||
|
|
2ac03b5152 | ||
|
|
9203dd16af | ||
|
|
0de5f60cdc | ||
|
|
1a69663a70 | ||
|
|
433b317c35 | ||
|
|
dc3f40c31e | ||
|
|
c090f849b0 | ||
|
|
a94a07ecd6 | ||
|
|
065700c5e1 | ||
|
|
8d4ae26686 | ||
|
|
8a9de6a8d3 | ||
|
|
4d17bf3d8b | ||
|
|
75b0b3c8ee | ||
|
|
bce16e9daf | ||
|
|
e878ad5f31 | ||
|
|
19eaeb9aca | ||
|
|
5cdb1a0a15 | ||
|
|
4471603de2 | ||
|
|
99efafb55a | ||
|
|
fdf539dc6a | ||
|
|
22fe65b4eb | ||
|
|
b60d28c71a |
25
.github/workflows/ci-build.yaml
vendored
25
.github/workflows/ci-build.yaml
vendored
@@ -14,7 +14,7 @@ on:
|
||||
env:
|
||||
# Golang version to use across CI steps
|
||||
# renovate: datasource=golang-version packageName=golang
|
||||
GOLANG_VERSION: '1.23.3'
|
||||
GOLANG_VERSION: '1.24.6'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
@@ -109,10 +109,10 @@ jobs:
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Run golangci-lint
|
||||
uses: golangci/golangci-lint-action@971e284b6050e8a5849b72094c50ab08da042db8 # v6.1.1
|
||||
uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0
|
||||
with:
|
||||
# renovate: datasource=go packageName=github.com/golangci/golangci-lint versioning=regex:^v(?<major>\d+)\.(?<minor>\d+)\.(?<patch>\d+)?$
|
||||
version: v1.62.2
|
||||
version: v2.1.6
|
||||
args: --verbose
|
||||
|
||||
test-go:
|
||||
@@ -370,11 +370,11 @@ jobs:
|
||||
path: test-results
|
||||
- name: combine-go-coverage
|
||||
# We generate coverage reports for all Argo CD components, but only the applicationset-controller,
|
||||
# app-controller, and repo-server report contain coverage data. The other components currently don't shut down
|
||||
# gracefully, so no coverage data is produced. Once those components are fixed, we can add references to their
|
||||
# coverage output directories.
|
||||
# app-controller, repo-server, and commit-server report contain coverage data. The other components currently
|
||||
# don't shut down gracefully, so no coverage data is produced. Once those components are fixed, we can add
|
||||
# references to their coverage output directories.
|
||||
run: |
|
||||
go tool covdata percent -i=test-results,e2e-code-coverage/applicationset-controller,e2e-code-coverage/repo-server,e2e-code-coverage/app-controller -o test-results/full-coverage.out
|
||||
go tool covdata percent -i=test-results,e2e-code-coverage/applicationset-controller,e2e-code-coverage/repo-server,e2e-code-coverage/app-controller,e2e-code-coverage/commit-server -o test-results/full-coverage.out
|
||||
- name: Upload code coverage information to codecov.io
|
||||
uses: codecov/codecov-action@b9fd7d16f6d7d1b5d2bec1a2887e65ceed900238 # v4.6.0
|
||||
with:
|
||||
@@ -393,7 +393,7 @@ jobs:
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}
|
||||
uses: SonarSource/sonarqube-scan-action@1b442ee39ac3fa7c2acdd410208dcb2bcfaae6c4 # v4.1.0
|
||||
uses: SonarSource/sonarqube-scan-action@bfd4e558cda28cda6b5defafb9232d191be8c203 # v4.2.1
|
||||
if: env.sonar_secret != ''
|
||||
test-e2e:
|
||||
name: Run end-to-end tests
|
||||
@@ -429,6 +429,13 @@ jobs:
|
||||
GITHUB_TOKEN: ${{ secrets.E2E_TEST_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
|
||||
GITLAB_TOKEN: ${{ secrets.E2E_TEST_GITLAB_TOKEN }}
|
||||
steps:
|
||||
- name: Free Disk Space (Ubuntu)
|
||||
uses: jlumbroso/free-disk-space@54081f138730dfa15788a46383842cd2f914a1be
|
||||
with:
|
||||
large-packages: false
|
||||
docker-images: false
|
||||
swap-storage: false
|
||||
tool-cache: false
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
|
||||
- name: Setup Golang
|
||||
@@ -542,4 +549,4 @@ jobs:
|
||||
exit 0
|
||||
else
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
2
.github/workflows/image-reuse.yaml
vendored
2
.github/workflows/image-reuse.yaml
vendored
@@ -17,11 +17,9 @@ on:
|
||||
platforms:
|
||||
required: true
|
||||
type: string
|
||||
default: linux/amd64
|
||||
push:
|
||||
required: true
|
||||
type: boolean
|
||||
default: false
|
||||
target:
|
||||
required: false
|
||||
type: string
|
||||
|
||||
4
.github/workflows/image.yaml
vendored
4
.github/workflows/image.yaml
vendored
@@ -53,7 +53,7 @@ jobs:
|
||||
with:
|
||||
# Note: cannot use env variables to set go-version (https://docs.github.com/en/actions/using-workflows/reusing-workflows#limitations)
|
||||
# renovate: datasource=golang-version packageName=golang
|
||||
go-version: 1.23.3
|
||||
go-version: 1.24.6
|
||||
platforms: ${{ needs.set-vars.outputs.platforms }}
|
||||
push: false
|
||||
|
||||
@@ -70,7 +70,7 @@ jobs:
|
||||
ghcr_image_name: ghcr.io/argoproj/argo-cd/argocd:${{ needs.set-vars.outputs.image-tag }}
|
||||
# Note: cannot use env variables to set go-version (https://docs.github.com/en/actions/using-workflows/reusing-workflows#limitations)
|
||||
# renovate: datasource=golang-version packageName=golang
|
||||
go-version: 1.23.3
|
||||
go-version: 1.24.6
|
||||
platforms: ${{ needs.set-vars.outputs.platforms }}
|
||||
push: true
|
||||
secrets:
|
||||
|
||||
72
.github/workflows/release.yaml
vendored
72
.github/workflows/release.yaml
vendored
@@ -11,7 +11,7 @@ permissions: {}
|
||||
|
||||
env:
|
||||
# renovate: datasource=golang-version packageName=golang
|
||||
GOLANG_VERSION: '1.23.3' # Note: go-version must also be set in job argocd-image.with.go-version
|
||||
GOLANG_VERSION: '1.24.6' # Note: go-version must also be set in job argocd-image.with.go-version
|
||||
|
||||
jobs:
|
||||
argocd-image:
|
||||
@@ -25,13 +25,49 @@ jobs:
|
||||
quay_image_name: quay.io/argoproj/argocd:${{ github.ref_name }}
|
||||
# Note: cannot use env variables to set go-version (https://docs.github.com/en/actions/using-workflows/reusing-workflows#limitations)
|
||||
# renovate: datasource=golang-version packageName=golang
|
||||
go-version: 1.23.3
|
||||
go-version: 1.24.6
|
||||
platforms: linux/amd64,linux/arm64,linux/s390x,linux/ppc64le
|
||||
push: true
|
||||
secrets:
|
||||
quay_username: ${{ secrets.RELEASE_QUAY_USERNAME }}
|
||||
quay_password: ${{ secrets.RELEASE_QUAY_TOKEN }}
|
||||
|
||||
setup-variables:
|
||||
name: Setup Release Variables
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
runs-on: ubuntu-22.04
|
||||
outputs:
|
||||
is_pre_release: ${{ steps.var.outputs.is_pre_release }}
|
||||
is_latest_release: ${{ steps.var.outputs.is_latest_release }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Setup variables
|
||||
id: var
|
||||
run: |
|
||||
set -xue
|
||||
# Fetch all tag information
|
||||
git fetch --prune --tags --force
|
||||
|
||||
LATEST_RELEASE_TAG=$(git -c 'versionsort.suffix=-rc' tag --list --sort=version:refname | grep -v '-' | tail -n1)
|
||||
|
||||
PRE_RELEASE=false
|
||||
# Check if latest tag is a pre-release
|
||||
if echo ${{ github.ref_name }} | grep -E -- '-rc[0-9]+$';then
|
||||
PRE_RELEASE=true
|
||||
fi
|
||||
|
||||
IS_LATEST=false
|
||||
# Ensure latest release tag matches github.ref_name
|
||||
if [[ $LATEST_RELEASE_TAG == ${{ github.ref_name }} ]];then
|
||||
IS_LATEST=true
|
||||
fi
|
||||
echo "is_pre_release=$PRE_RELEASE" >> $GITHUB_OUTPUT
|
||||
echo "is_latest_release=$IS_LATEST" >> $GITHUB_OUTPUT
|
||||
|
||||
argocd-image-provenance:
|
||||
needs: [argocd-image]
|
||||
permissions:
|
||||
@@ -50,15 +86,17 @@ jobs:
|
||||
|
||||
goreleaser:
|
||||
needs:
|
||||
- setup-variables
|
||||
- argocd-image
|
||||
- argocd-image-provenance
|
||||
permissions:
|
||||
contents: write # used for uploading assets
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
runs-on: ubuntu-22.04
|
||||
env:
|
||||
GORELEASER_MAKE_LATEST: ${{ needs.setup-variables.outputs.is_latest_release }}
|
||||
outputs:
|
||||
hashes: ${{ steps.hash.outputs.hashes }}
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
|
||||
@@ -140,7 +178,7 @@ jobs:
|
||||
permissions:
|
||||
contents: write # Needed for release uploads
|
||||
outputs:
|
||||
hashes: ${{ steps.sbom-hash.outputs.hashes}}
|
||||
hashes: ${{ steps.sbom-hash.outputs.hashes }}
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
@@ -195,7 +233,7 @@ jobs:
|
||||
echo "hashes=$(sha256sum /tmp/sbom.tar.gz | base64 -w0)" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Upload SBOM
|
||||
uses: softprops/action-gh-release@7b4da11513bf3f43f9999e90eabced41ab8bb048 # v2.2.0
|
||||
uses: softprops/action-gh-release@c95fe1489396fe8a9eb87c0abf8aa5b2ef267fda # v2.2.1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
@@ -218,6 +256,7 @@ jobs:
|
||||
|
||||
post-release:
|
||||
needs:
|
||||
- setup-variables
|
||||
- argocd-image
|
||||
- goreleaser
|
||||
- generate-sbom
|
||||
@@ -226,6 +265,8 @@ jobs:
|
||||
pull-requests: write # Needed to create PR for VERSION update.
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
runs-on: ubuntu-22.04
|
||||
env:
|
||||
TAG_STABLE: ${{ needs.setup-variables.outputs.is_latest_release }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
|
||||
@@ -239,27 +280,6 @@ jobs:
|
||||
git config --global user.email 'ci@argoproj.com'
|
||||
git config --global user.name 'CI'
|
||||
|
||||
- name: Check if tag is the latest version and not a pre-release
|
||||
run: |
|
||||
set -xue
|
||||
# Fetch all tag information
|
||||
git fetch --prune --tags --force
|
||||
|
||||
LATEST_TAG=$(git -c 'versionsort.suffix=-rc' tag --list --sort=version:refname | tail -n1)
|
||||
|
||||
PRE_RELEASE=false
|
||||
# Check if latest tag is a pre-release
|
||||
if echo $LATEST_TAG | grep -E -- '-rc[0-9]+$';then
|
||||
PRE_RELEASE=true
|
||||
fi
|
||||
|
||||
# Ensure latest tag matches github.ref_name & not a pre-release
|
||||
if [[ $LATEST_TAG == ${{ github.ref_name }} ]] && [[ $PRE_RELEASE != 'true' ]];then
|
||||
echo "TAG_STABLE=true" >> $GITHUB_ENV
|
||||
else
|
||||
echo "TAG_STABLE=false" >> $GITHUB_ENV
|
||||
fi
|
||||
|
||||
- name: Update stable tag to latest version
|
||||
run: |
|
||||
git tag -f stable ${{ github.ref_name }}
|
||||
|
||||
130
.golangci.yaml
130
.golangci.yaml
@@ -1,20 +1,12 @@
|
||||
issues:
|
||||
exclude:
|
||||
- SA5011
|
||||
max-issues-per-linter: 0
|
||||
max-same-issues: 0
|
||||
exclude-rules:
|
||||
- path: '(.+)_test\.go'
|
||||
linters:
|
||||
- unparam
|
||||
version: "2"
|
||||
run:
|
||||
timeout: 50m
|
||||
linters:
|
||||
default: none
|
||||
enable:
|
||||
- errcheck
|
||||
- errorlint
|
||||
- gocritic
|
||||
- gofumpt
|
||||
- goimports
|
||||
- gosimple
|
||||
- govet
|
||||
- ineffassign
|
||||
- misspell
|
||||
@@ -25,35 +17,85 @@ linters:
|
||||
- unparam
|
||||
- unused
|
||||
- usestdlibvars
|
||||
- whitespace
|
||||
linters-settings:
|
||||
gocritic:
|
||||
disabled-checks:
|
||||
- appendAssign
|
||||
- assignOp # Keep it disabled for readability
|
||||
- badCond
|
||||
- commentFormatting
|
||||
- exitAfterDefer
|
||||
- ifElseChain
|
||||
- mapKey
|
||||
- singleCaseSwitch
|
||||
- typeSwitchVar
|
||||
goimports:
|
||||
local-prefixes: github.com/argoproj/argo-cd/v2
|
||||
perfsprint:
|
||||
# Optimizes even if it requires an int or uint type cast.
|
||||
int-conversion: true
|
||||
# Optimizes into `err.Error()` even if it is only equivalent for non-nil errors.
|
||||
err-error: false
|
||||
# Optimizes `fmt.Errorf`.
|
||||
errorf: false
|
||||
# Optimizes `fmt.Sprintf` with only one argument.
|
||||
sprintf1: true
|
||||
# Optimizes into strings concatenation.
|
||||
strconcat: false
|
||||
testifylint:
|
||||
enable-all: true
|
||||
disable:
|
||||
- go-require
|
||||
run:
|
||||
timeout: 50m
|
||||
- whitespace
|
||||
settings:
|
||||
gocritic:
|
||||
disabled-checks:
|
||||
- appendAssign
|
||||
- assignOp
|
||||
- badCond
|
||||
- commentFormatting
|
||||
- exitAfterDefer
|
||||
- ifElseChain
|
||||
- mapKey
|
||||
- singleCaseSwitch
|
||||
- typeSwitchVar
|
||||
perfsprint:
|
||||
int-conversion: true
|
||||
err-error: false
|
||||
errorf: false
|
||||
sprintf1: true
|
||||
strconcat: false
|
||||
staticcheck:
|
||||
checks:
|
||||
- "all"
|
||||
- "-ST1001" # dot imports are discouraged
|
||||
- "-ST1003" # poorly chosen identifier
|
||||
- "-ST1005" # incorrectly formatted error string
|
||||
- "-ST1011" # poorly chosen name for variable of type time.Duration
|
||||
- "-ST1012" # poorly chosen name for an error variable
|
||||
- "-ST1016" # use consistent method receiver names
|
||||
- "-ST1017" # don't use Yoda conditions
|
||||
- "-ST1019" # importing the same package multiple times
|
||||
- "-ST1023" # redundant type in variable declaration
|
||||
- "-QF1001" # apply De Morgan’s law
|
||||
- "-QF1003" # convert if/else-if chain to tagged switch
|
||||
- "-QF1006" # lift if+break into loop condition
|
||||
- "-QF1007" # merge conditional assignment into variable declaration
|
||||
- "-QF1008" # omit embedded fields from selector expression
|
||||
- "-QF1009" # use time.Time.Equal instead of == operator
|
||||
- "-QF1011" # omit redundant type from variable declaration
|
||||
- "-QF1012" # use fmt.Fprintf(x, ...) instead of x.Write(fmt.Sprintf(...))
|
||||
testifylint:
|
||||
enable-all: true
|
||||
disable:
|
||||
- go-require
|
||||
- equal-values
|
||||
- empty
|
||||
- len
|
||||
- expected-actual
|
||||
- formatter
|
||||
exclusions:
|
||||
generated: lax
|
||||
presets:
|
||||
- comments
|
||||
- common-false-positives
|
||||
- legacy
|
||||
- std-error-handling
|
||||
rules:
|
||||
- linters:
|
||||
- unparam
|
||||
path: (.+)_test\.go
|
||||
- path: (.+)\.go$
|
||||
text: SA5011
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
issues:
|
||||
max-issues-per-linter: 0
|
||||
max-same-issues: 0
|
||||
formatters:
|
||||
enable:
|
||||
- gofumpt
|
||||
- goimports
|
||||
settings:
|
||||
goimports:
|
||||
local-prefixes:
|
||||
- github.com/argoproj/argo-cd/v2
|
||||
exclusions:
|
||||
generated: lax
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
|
||||
@@ -46,16 +46,17 @@ builds:
|
||||
archives:
|
||||
- id: argocd-archive
|
||||
builds:
|
||||
- argocd-cli
|
||||
- argocd-cli
|
||||
name_template: |-
|
||||
{{ .ProjectName }}-{{ .Os }}-{{ .Arch }}
|
||||
format: binary
|
||||
formats: [binary]
|
||||
|
||||
checksum:
|
||||
name_template: 'cli_checksums.txt'
|
||||
algorithm: sha256
|
||||
|
||||
release:
|
||||
make_latest: '{{ .Env.GORELEASER_MAKE_LATEST }}'
|
||||
prerelease: auto
|
||||
draft: false
|
||||
header: |
|
||||
@@ -85,16 +86,14 @@ release:
|
||||
If upgrading from a different minor version, be sure to read the [upgrading](https://argo-cd.readthedocs.io/en/stable/operator-manual/upgrading/overview/) documentation.
|
||||
footer: |
|
||||
**Full Changelog**: https://github.com/argoproj/argo-cd/compare/{{ .PreviousTag }}...{{ .Tag }}
|
||||
|
||||
|
||||
<a href="https://argoproj.github.io/cd/"><img src="https://raw.githubusercontent.com/argoproj/argo-site/master/content/pages/cd/gitops-cd.png" width="25%" ></a>
|
||||
|
||||
|
||||
snapshot: #### To be removed for PR
|
||||
name_template: "2.6.0"
|
||||
name_template: '2.6.0'
|
||||
|
||||
changelog:
|
||||
use:
|
||||
github
|
||||
use: github
|
||||
sort: asc
|
||||
abbrev: 0
|
||||
groups: # Regex use RE2 syntax as defined here: https://github.com/google/re2/wiki/Syntax.
|
||||
@@ -117,7 +116,4 @@ changelog:
|
||||
- '^test:'
|
||||
- '^.*?Bump(\([[:word:]]+\))?.+$'
|
||||
- '^.*?\[Bot\](\([[:word:]]+\))?.+$'
|
||||
|
||||
|
||||
# yaml-language-server: $schema=https://goreleaser.com/static/schema.json
|
||||
|
||||
|
||||
@@ -26,6 +26,13 @@ packages:
|
||||
github.com/argoproj/argo-cd/v2/applicationset/utils:
|
||||
interfaces:
|
||||
Renderer:
|
||||
github.com/argoproj/argo-cd/v2/commitserver/commit:
|
||||
interfaces:
|
||||
RepoClientFactory:
|
||||
github.com/argoproj/argo-cd/v2/commitserver/apiclient:
|
||||
interfaces:
|
||||
CommitServiceClient:
|
||||
Clientset:
|
||||
github.com/argoproj/argo-cd/v2/controller/cache:
|
||||
interfaces:
|
||||
LiveStateCache:
|
||||
@@ -67,3 +74,6 @@ packages:
|
||||
github.com/argoproj/argo-cd/v2/pkg/apiclient/cluster:
|
||||
interfaces:
|
||||
ClusterServiceServer:
|
||||
github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned/typed/application/v1alpha1:
|
||||
interfaces:
|
||||
AppProjectInterface:
|
||||
|
||||
@@ -2,6 +2,7 @@ version: 2
|
||||
formats: all
|
||||
mkdocs:
|
||||
fail_on_warning: false
|
||||
configuration: mkdocs.yml
|
||||
python:
|
||||
install:
|
||||
- requirements: docs/requirements.txt
|
||||
|
||||
@@ -4,7 +4,7 @@ ARG BASE_IMAGE=docker.io/library/ubuntu:24.04@sha256:3f85b7caad41a95462cf5b787d8
|
||||
# Initial stage which pulls prepares build dependencies and CLI tooling we need for our final image
|
||||
# Also used as the image in CI jobs so needs all dependencies
|
||||
####################################################################################################
|
||||
FROM docker.io/library/golang:1.23.3@sha256:d56c3e08fe5b27729ee3834854ae8f7015af48fd651cd25d1e3bcf3c19830174 AS builder
|
||||
FROM docker.io/library/golang:1.24.6@sha256:2c89c41fb9efc3807029b59af69645867cfe978d2b877d475be0d72f6c6ce6f6 AS builder
|
||||
|
||||
RUN echo 'deb http://archive.debian.org/debian buster-backports main' >> /etc/apt/sources.list
|
||||
|
||||
@@ -101,7 +101,7 @@ RUN HOST_ARCH=$TARGETARCH NODE_ENV='production' NODE_ONLINE_ENV='online' NODE_OP
|
||||
####################################################################################################
|
||||
# Argo CD Build stage which performs the actual build of Argo CD binaries
|
||||
####################################################################################################
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/golang:1.23.3@sha256:d56c3e08fe5b27729ee3834854ae8f7015af48fd651cd25d1e3bcf3c19830174 AS argocd-build
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/golang:1.24.6@sha256:2c89c41fb9efc3807029b59af69645867cfe978d2b877d475be0d72f6c6ce6f6 AS argocd-build
|
||||
|
||||
WORKDIR /go/src/github.com/argoproj/argo-cd
|
||||
|
||||
@@ -140,7 +140,8 @@ RUN ln -s /usr/local/bin/argocd /usr/local/bin/argocd-server && \
|
||||
ln -s /usr/local/bin/argocd /usr/local/bin/argocd-dex && \
|
||||
ln -s /usr/local/bin/argocd /usr/local/bin/argocd-notifications && \
|
||||
ln -s /usr/local/bin/argocd /usr/local/bin/argocd-applicationset-controller && \
|
||||
ln -s /usr/local/bin/argocd /usr/local/bin/argocd-k8s-auth
|
||||
ln -s /usr/local/bin/argocd /usr/local/bin/argocd-k8s-auth && \
|
||||
ln -s /usr/local/bin/argocd /usr/local/bin/argocd-commit-server
|
||||
|
||||
USER $ARGOCD_USER_ID
|
||||
ENTRYPOINT ["/usr/bin/tini", "--"]
|
||||
|
||||
2
Makefile
2
Makefile
@@ -472,6 +472,7 @@ start-e2e-local: mod-vendor-local dep-ui-local cli-local
|
||||
mkdir -p /tmp/coverage/repo-server
|
||||
mkdir -p /tmp/coverage/applicationset-controller
|
||||
mkdir -p /tmp/coverage/notification
|
||||
mkdir -p /tmp/coverage/commit-server
|
||||
# set paths for locally managed ssh known hosts and tls certs data
|
||||
ARGOCD_SSH_DATA_PATH=/tmp/argo-e2e/app/config/ssh \
|
||||
ARGOCD_TLS_DATA_PATH=/tmp/argo-e2e/app/config/tls \
|
||||
@@ -489,6 +490,7 @@ start-e2e-local: mod-vendor-local dep-ui-local cli-local
|
||||
ARGOCD_APPLICATIONSET_CONTROLLER_TOKENREF_STRICT_MODE=true \
|
||||
ARGOCD_APPLICATIONSET_CONTROLLER_ALLOWED_SCM_PROVIDERS=http://127.0.0.1:8341,http://127.0.0.1:8342,http://127.0.0.1:8343,http://127.0.0.1:8344 \
|
||||
ARGOCD_E2E_TEST=true \
|
||||
ARGOCD_HYDRATOR_ENABLED=true \
|
||||
goreman -f $(ARGOCD_PROCFILE) start ${ARGOCD_START}
|
||||
ls -lrt /tmp/coverage
|
||||
|
||||
|
||||
5
Procfile
5
Procfile
@@ -1,9 +1,10 @@
|
||||
controller: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "GOCOVERDIR=${ARGOCD_COVERAGE_DIR:-/tmp/coverage/app-controller} HOSTNAME=testappcontroller-1 FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-application-controller $COMMAND --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081} --otlp-address=${ARGOCD_OTLP_ADDRESS} --application-namespaces=${ARGOCD_APPLICATION_NAMESPACES:-''} --server-side-diff-enabled=${ARGOCD_APPLICATION_CONTROLLER_SERVER_SIDE_DIFF:-'false'}"
|
||||
api-server: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "GOCOVERDIR=${ARGOCD_COVERAGE_DIR:-/tmp/coverage/api-server} FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-server $COMMAND --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --disable-auth=${ARGOCD_E2E_DISABLE_AUTH:-'true'} --insecure --dex-server http://localhost:${ARGOCD_E2E_DEX_PORT:-5556} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081} --port ${ARGOCD_E2E_APISERVER_PORT:-8080} --otlp-address=${ARGOCD_OTLP_ADDRESS} --application-namespaces=${ARGOCD_APPLICATION_NAMESPACES:-''}"
|
||||
controller: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "GOCOVERDIR=${ARGOCD_COVERAGE_DIR:-/tmp/coverage/app-controller} HOSTNAME=testappcontroller-1 FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-application-controller $COMMAND --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081} --commit-server localhost:${ARGOCD_E2E_COMMITSERVER_PORT:-8086} --otlp-address=${ARGOCD_OTLP_ADDRESS} --application-namespaces=${ARGOCD_APPLICATION_NAMESPACES:-''} --server-side-diff-enabled=${ARGOCD_APPLICATION_CONTROLLER_SERVER_SIDE_DIFF:-'false'} --hydrator-enabled=${ARGOCD_HYDRATOR_ENABLED:='false'}"
|
||||
api-server: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "GOCOVERDIR=${ARGOCD_COVERAGE_DIR:-/tmp/coverage/api-server} FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-server $COMMAND --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --disable-auth=${ARGOCD_E2E_DISABLE_AUTH:-'true'} --insecure --dex-server http://localhost:${ARGOCD_E2E_DEX_PORT:-5556} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081} --port ${ARGOCD_E2E_APISERVER_PORT:-8080} --otlp-address=${ARGOCD_OTLP_ADDRESS} --application-namespaces=${ARGOCD_APPLICATION_NAMESPACES:-''} --hydrator-enabled=${ARGOCD_HYDRATOR_ENABLED:='false'}"
|
||||
dex: sh -c "ARGOCD_BINARY_NAME=argocd-dex go run github.com/argoproj/argo-cd/v2/cmd gendexcfg -o `pwd`/dist/dex.yaml && (test -f dist/dex.yaml || { echo 'Failed to generate dex configuration'; exit 1; }) && docker run --rm -p ${ARGOCD_E2E_DEX_PORT:-5556}:${ARGOCD_E2E_DEX_PORT:-5556} -v `pwd`/dist/dex.yaml:/dex.yaml ghcr.io/dexidp/dex:$(grep "image: ghcr.io/dexidp/dex" manifests/base/dex/argocd-dex-server-deployment.yaml | cut -d':' -f3) dex serve /dex.yaml"
|
||||
redis: hack/start-redis-with-password.sh
|
||||
repo-server: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "GOCOVERDIR=${ARGOCD_COVERAGE_DIR:-/tmp/coverage/repo-server} FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_GNUPGHOME=${ARGOCD_GNUPGHOME:-/tmp/argocd-local/gpg/keys} ARGOCD_PLUGINSOCKFILEPATH=${ARGOCD_PLUGINSOCKFILEPATH:-./test/cmp} ARGOCD_GPG_DATA_PATH=${ARGOCD_GPG_DATA_PATH:-/tmp/argocd-local/gpg/source} ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-repo-server ARGOCD_GPG_ENABLED=${ARGOCD_GPG_ENABLED:-false} $COMMAND --loglevel debug --port ${ARGOCD_E2E_REPOSERVER_PORT:-8081} --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --otlp-address=${ARGOCD_OTLP_ADDRESS}"
|
||||
cmp-server: [ "$ARGOCD_E2E_TEST" = 'true' ] && exit 0 || [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_BINARY_NAME=argocd-cmp-server ARGOCD_PLUGINSOCKFILEPATH=${ARGOCD_PLUGINSOCKFILEPATH:-./test/cmp} $COMMAND --config-dir-path ./test/cmp --loglevel debug --otlp-address=${ARGOCD_OTLP_ADDRESS}"
|
||||
commit-server: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "GOCOVERDIR=${ARGOCD_COVERAGE_DIR:-/tmp/coverage/commit-server} FORCE_LOG_COLORS=1 ARGOCD_BINARY_NAME=argocd-commit-server $COMMAND --loglevel debug --port ${ARGOCD_E2E_COMMITSERVER_PORT:-8086}"
|
||||
ui: sh -c 'cd ui && ${ARGOCD_E2E_YARN_CMD:-yarn} start'
|
||||
git-server: test/fixture/testrepos/start-git.sh
|
||||
helm-registry: test/fixture/testrepos/start-helm-registry.sh
|
||||
|
||||
1
USERS.md
1
USERS.md
@@ -335,6 +335,7 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Swisscom](https://www.swisscom.ch)
|
||||
1. [Swissquote](https://github.com/swissquote)
|
||||
1. [Syncier](https://syncier.com/)
|
||||
1. [Synergy](https://synergy.net.au)
|
||||
1. [Syself](https://syself.com)
|
||||
1. [TableCheck](https://tablecheck.com/)
|
||||
1. [Tailor Brands](https://www.tailorbrands.com)
|
||||
|
||||
@@ -91,6 +91,7 @@ type ApplicationSetReconciler struct {
|
||||
GlobalPreservedAnnotations []string
|
||||
GlobalPreservedLabels []string
|
||||
Metrics *metrics.ApplicationsetMetrics
|
||||
MaxResourcesStatusCount int
|
||||
}
|
||||
|
||||
// +kubebuilder:rbac:groups=argoproj.io,resources=applicationsets,verbs=get;list;watch;create;update;patch;delete
|
||||
@@ -127,8 +128,8 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
||||
}()
|
||||
|
||||
// Do not attempt to further reconcile the ApplicationSet if it is being deleted.
|
||||
if applicationSetInfo.ObjectMeta.DeletionTimestamp != nil {
|
||||
appsetName := applicationSetInfo.ObjectMeta.Name
|
||||
if applicationSetInfo.DeletionTimestamp != nil {
|
||||
appsetName := applicationSetInfo.Name
|
||||
logCtx.Debugf("DeletionTimestamp is set on %s", appsetName)
|
||||
deleteAllowed := utils.DefaultPolicy(applicationSetInfo.Spec.SyncPolicy, r.Policy, r.EnablePolicyOverride).AllowDelete()
|
||||
if !deleteAllowed {
|
||||
@@ -155,6 +156,7 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
||||
// desiredApplications is the main list of all expected Applications from all generators in this appset.
|
||||
desiredApplications, applicationSetReason, err := template.GenerateApplications(logCtx, applicationSetInfo, r.Generators, r.Renderer, r.Client)
|
||||
if err != nil {
|
||||
logCtx.Errorf("unable to generate applications: %v", err)
|
||||
_ = r.setApplicationSetStatusCondition(ctx,
|
||||
&applicationSetInfo,
|
||||
argov1alpha1.ApplicationSetCondition{
|
||||
@@ -164,7 +166,8 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
||||
Status: argov1alpha1.ApplicationSetConditionStatusTrue,
|
||||
}, parametersGenerated,
|
||||
)
|
||||
return ctrl.Result{RequeueAfter: ReconcileRequeueOnValidationError}, err
|
||||
// In order for the controller SDK to respect RequeueAfter, the error must be nil
|
||||
return ctrl.Result{RequeueAfter: ReconcileRequeueOnValidationError}, nil
|
||||
}
|
||||
|
||||
parametersGenerated = true
|
||||
@@ -208,16 +211,16 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
||||
appSyncMap := map[string]bool{}
|
||||
|
||||
if r.EnableProgressiveSyncs {
|
||||
if applicationSetInfo.Spec.Strategy == nil && len(applicationSetInfo.Status.ApplicationStatus) > 0 {
|
||||
// If appset used progressive sync but stopped, clean up the progressive sync application statuses
|
||||
if !isRollingSyncStrategy(&applicationSetInfo) && len(applicationSetInfo.Status.ApplicationStatus) > 0 {
|
||||
// If an appset was previously syncing with a `RollingSync` strategy but it has switched to the default strategy, clean up the progressive sync application statuses
|
||||
logCtx.Infof("Removing %v unnecessary AppStatus entries from ApplicationSet %v", len(applicationSetInfo.Status.ApplicationStatus), applicationSetInfo.Name)
|
||||
|
||||
err := r.setAppSetApplicationStatus(ctx, logCtx, &applicationSetInfo, []argov1alpha1.ApplicationSetApplicationStatus{})
|
||||
if err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("failed to clear previous AppSet application statuses for %v: %w", applicationSetInfo.Name, err)
|
||||
}
|
||||
} else if applicationSetInfo.Spec.Strategy != nil {
|
||||
// appset uses progressive sync
|
||||
} else if isRollingSyncStrategy(&applicationSetInfo) {
|
||||
// The appset uses progressive sync with `RollingSync` strategy
|
||||
for _, app := range currentApplications {
|
||||
appMap[app.Name] = app
|
||||
}
|
||||
@@ -312,7 +315,7 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
||||
|
||||
if applicationSetInfo.RefreshRequired() {
|
||||
delete(applicationSetInfo.Annotations, common.AnnotationApplicationSetRefresh)
|
||||
err := r.Client.Update(ctx, &applicationSetInfo)
|
||||
err := r.Update(ctx, &applicationSetInfo)
|
||||
if err != nil {
|
||||
logCtx.Warnf("error occurred while updating ApplicationSet: %v", err)
|
||||
_ = r.setApplicationSetStatusCondition(ctx,
|
||||
@@ -487,7 +490,7 @@ func (r *ApplicationSetReconciler) validateGeneratedApplications(ctx context.Con
|
||||
}
|
||||
|
||||
appProject := &argov1alpha1.AppProject{}
|
||||
err := r.Client.Get(ctx, types.NamespacedName{Name: app.Spec.Project, Namespace: r.ArgoCDNamespace}, appProject)
|
||||
err := r.Get(ctx, types.NamespacedName{Name: app.Spec.Project, Namespace: r.ArgoCDNamespace}, appProject)
|
||||
if err != nil {
|
||||
if apierr.IsNotFound(err) {
|
||||
errorsByIndex[i] = fmt.Errorf("application references project %s which does not exist", app.Spec.Project)
|
||||
@@ -525,11 +528,9 @@ func (r *ApplicationSetReconciler) getMinRequeueAfter(applicationSetInfo *argov1
|
||||
}
|
||||
|
||||
func ignoreNotAllowedNamespaces(namespaces []string) predicate.Predicate {
|
||||
return predicate.Funcs{
|
||||
CreateFunc: func(e event.CreateEvent) bool {
|
||||
return utils.IsNamespaceAllowed(namespaces, e.Object.GetNamespace())
|
||||
},
|
||||
}
|
||||
return predicate.NewPredicateFuncs(func(object client.Object) bool {
|
||||
return utils.IsNamespaceAllowed(namespaces, object.GetNamespace())
|
||||
})
|
||||
}
|
||||
|
||||
func appControllerIndexer(rawObj client.Object) []string {
|
||||
@@ -553,12 +554,13 @@ func (r *ApplicationSetReconciler) SetupWithManager(mgr ctrl.Manager, enableProg
|
||||
return fmt.Errorf("error setting up with manager: %w", err)
|
||||
}
|
||||
|
||||
ownsHandler := getOwnsHandlerPredicates(enableProgressiveSyncs)
|
||||
appOwnsHandler := getApplicationOwnsHandler(enableProgressiveSyncs)
|
||||
appSetOwnsHandler := getApplicationSetOwnsHandler(enableProgressiveSyncs)
|
||||
|
||||
return ctrl.NewControllerManagedBy(mgr).WithOptions(controller.Options{
|
||||
MaxConcurrentReconciles: maxConcurrentReconciliations,
|
||||
}).For(&argov1alpha1.ApplicationSet{}).
|
||||
Owns(&argov1alpha1.Application{}, builder.WithPredicates(ownsHandler)).
|
||||
}).For(&argov1alpha1.ApplicationSet{}, builder.WithPredicates(appSetOwnsHandler)).
|
||||
Owns(&argov1alpha1.Application{}, builder.WithPredicates(appOwnsHandler)).
|
||||
WithEventFilter(ignoreNotAllowedNamespaces(r.ApplicationSetNamespaces)).
|
||||
Watches(
|
||||
&corev1.Secret{},
|
||||
@@ -566,7 +568,6 @@ func (r *ApplicationSetReconciler) SetupWithManager(mgr ctrl.Manager, enableProg
|
||||
Client: mgr.GetClient(),
|
||||
Log: log.WithField("type", "createSecretEventHandler"),
|
||||
}).
|
||||
// TODO: also watch Applications and respond on changes if we own them.
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
@@ -625,7 +626,7 @@ func (r *ApplicationSetReconciler) createOrUpdateInCluster(ctx context.Context,
|
||||
preservedAnnotations = append(preservedAnnotations, defaultPreservedAnnotations...)
|
||||
|
||||
for _, key := range preservedAnnotations {
|
||||
if state, exists := found.ObjectMeta.Annotations[key]; exists {
|
||||
if state, exists := found.Annotations[key]; exists {
|
||||
if generatedApp.Annotations == nil {
|
||||
generatedApp.Annotations = map[string]string{}
|
||||
}
|
||||
@@ -634,7 +635,7 @@ func (r *ApplicationSetReconciler) createOrUpdateInCluster(ctx context.Context,
|
||||
}
|
||||
|
||||
for _, key := range preservedLabels {
|
||||
if state, exists := found.ObjectMeta.Labels[key]; exists {
|
||||
if state, exists := found.Labels[key]; exists {
|
||||
if generatedApp.Labels == nil {
|
||||
generatedApp.Labels = map[string]string{}
|
||||
}
|
||||
@@ -644,7 +645,7 @@ func (r *ApplicationSetReconciler) createOrUpdateInCluster(ctx context.Context,
|
||||
|
||||
// Preserve post-delete finalizers:
|
||||
// https://github.com/argoproj/argo-cd/issues/17181
|
||||
for _, finalizer := range found.ObjectMeta.Finalizers {
|
||||
for _, finalizer := range found.Finalizers {
|
||||
if strings.HasPrefix(finalizer, argov1alpha1.PostDeleteFinalizerName) {
|
||||
if generatedApp.Finalizers == nil {
|
||||
generatedApp.Finalizers = []string{}
|
||||
@@ -653,10 +654,10 @@ func (r *ApplicationSetReconciler) createOrUpdateInCluster(ctx context.Context,
|
||||
}
|
||||
}
|
||||
|
||||
found.ObjectMeta.Annotations = generatedApp.Annotations
|
||||
found.Annotations = generatedApp.Annotations
|
||||
|
||||
found.ObjectMeta.Finalizers = generatedApp.Finalizers
|
||||
found.ObjectMeta.Labels = generatedApp.Labels
|
||||
found.Finalizers = generatedApp.Finalizers
|
||||
found.Labels = generatedApp.Labels
|
||||
|
||||
return controllerutil.SetControllerReference(&applicationSet, found, r.Scheme)
|
||||
})
|
||||
@@ -710,7 +711,7 @@ func (r *ApplicationSetReconciler) createInCluster(ctx context.Context, logCtx *
|
||||
|
||||
func (r *ApplicationSetReconciler) getCurrentApplications(ctx context.Context, applicationSet argov1alpha1.ApplicationSet) ([]argov1alpha1.Application, error) {
|
||||
var current argov1alpha1.ApplicationList
|
||||
err := r.Client.List(ctx, ¤t, client.MatchingFields{".metadata.controller": applicationSet.Name}, client.InNamespace(applicationSet.Namespace))
|
||||
err := r.List(ctx, ¤t, client.MatchingFields{".metadata.controller": applicationSet.Name}, client.InNamespace(applicationSet.Namespace))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error retrieving applications: %w", err)
|
||||
}
|
||||
@@ -721,9 +722,6 @@ func (r *ApplicationSetReconciler) getCurrentApplications(ctx context.Context, a
|
||||
// deleteInCluster will delete Applications that are currently on the cluster, but not in appList.
|
||||
// The function must be called after all generators had been called and generated applications
|
||||
func (r *ApplicationSetReconciler) deleteInCluster(ctx context.Context, logCtx *log.Entry, applicationSet argov1alpha1.ApplicationSet, desiredApplications []argov1alpha1.Application) error {
|
||||
// settingsMgr := settings.NewSettingsManager(context.TODO(), r.KubeClientset, applicationSet.Namespace)
|
||||
// argoDB := db.NewDB(applicationSet.Namespace, settingsMgr, r.KubeClientset)
|
||||
// clusterList, err := argoDB.ListClusters(ctx)
|
||||
clusterList, err := utils.ListClusters(ctx, r.KubeClientset, r.ArgoCDNamespace)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error listing clusters: %w", err)
|
||||
@@ -758,7 +756,7 @@ func (r *ApplicationSetReconciler) deleteInCluster(ctx context.Context, logCtx *
|
||||
continue
|
||||
}
|
||||
|
||||
err = r.Client.Delete(ctx, &app)
|
||||
err = r.Delete(ctx, &app)
|
||||
if err != nil {
|
||||
logCtx.WithError(err).Error("failed to delete Application")
|
||||
if firstError != nil {
|
||||
@@ -830,7 +828,7 @@ func (r *ApplicationSetReconciler) removeFinalizerOnInvalidDestination(ctx conte
|
||||
if log.IsLevelEnabled(log.DebugLevel) {
|
||||
utils.LogPatch(appLog, patch, updated)
|
||||
}
|
||||
if err := r.Client.Patch(ctx, updated, patch); err != nil {
|
||||
if err := r.Patch(ctx, updated, patch); err != nil {
|
||||
return fmt.Errorf("error updating finalizers: %w", err)
|
||||
}
|
||||
// Application must have updated list of finalizers
|
||||
@@ -852,7 +850,7 @@ func (r *ApplicationSetReconciler) removeOwnerReferencesOnDeleteAppSet(ctx conte
|
||||
|
||||
for _, app := range applications {
|
||||
app.SetOwnerReferences([]metav1.OwnerReference{})
|
||||
err := r.Client.Update(ctx, &app)
|
||||
err := r.Update(ctx, &app)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error updating application: %w", err)
|
||||
}
|
||||
@@ -1008,8 +1006,14 @@ func appSyncEnabledForNextStep(appset *argov1alpha1.ApplicationSet, app argov1al
|
||||
return true
|
||||
}
|
||||
|
||||
func isRollingSyncStrategy(appset *argov1alpha1.ApplicationSet) bool {
|
||||
// It's only RollingSync if the type specifically sets it
|
||||
return appset.Spec.Strategy != nil && appset.Spec.Strategy.Type == "RollingSync" && appset.Spec.Strategy.RollingSync != nil
|
||||
}
|
||||
|
||||
func progressiveSyncsRollingSyncStrategyEnabled(appset *argov1alpha1.ApplicationSet) bool {
|
||||
return appset.Spec.Strategy != nil && appset.Spec.Strategy.RollingSync != nil && appset.Spec.Strategy.Type == "RollingSync" && len(appset.Spec.Strategy.RollingSync.Steps) > 0
|
||||
// ProgressiveSync is enabled if the strategy is set to `RollingSync` + steps slice is not empty
|
||||
return isRollingSyncStrategy(appset) && len(appset.Spec.Strategy.RollingSync.Steps) > 0
|
||||
}
|
||||
|
||||
func isApplicationHealthy(app argov1alpha1.Application) bool {
|
||||
@@ -1062,19 +1066,20 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatus(ctx con
|
||||
Message: "No Application status found, defaulting status to Waiting.",
|
||||
Status: "Waiting",
|
||||
Step: strconv.Itoa(getAppStep(app.Name, appStepMap)),
|
||||
TargetRevisions: app.Status.GetRevisions(),
|
||||
}
|
||||
} else {
|
||||
// we have an existing AppStatus
|
||||
currentAppStatus = applicationSet.Status.ApplicationStatus[idx]
|
||||
|
||||
// upgrade any existing AppStatus that might have been set by an older argo-cd version
|
||||
// note: currentAppStatus.TargetRevisions may be set to empty list earlier during migrations,
|
||||
// to prevent other usage of r.Client.Status().Update to fail before reaching here.
|
||||
if len(currentAppStatus.TargetRevisions) == 0 {
|
||||
currentAppStatus.TargetRevisions = app.Status.GetRevisions()
|
||||
if !reflect.DeepEqual(currentAppStatus.TargetRevisions, app.Status.GetRevisions()) {
|
||||
currentAppStatus.Message = "Application has pending changes, setting status to Waiting."
|
||||
}
|
||||
}
|
||||
if !reflect.DeepEqual(currentAppStatus.TargetRevisions, app.Status.GetRevisions()) {
|
||||
currentAppStatus.TargetRevisions = app.Status.GetRevisions()
|
||||
currentAppStatus.Status = "Waiting"
|
||||
currentAppStatus.LastTransitionTime = &now
|
||||
currentAppStatus.Step = strconv.Itoa(getAppStep(currentAppStatus.Application, appStepMap))
|
||||
}
|
||||
|
||||
appOutdated := false
|
||||
if progressiveSyncsRollingSyncStrategyEnabled(applicationSet) {
|
||||
@@ -1087,25 +1092,15 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatus(ctx con
|
||||
currentAppStatus.Status = "Waiting"
|
||||
currentAppStatus.Message = "Application has pending changes, setting status to Waiting."
|
||||
currentAppStatus.Step = strconv.Itoa(getAppStep(currentAppStatus.Application, appStepMap))
|
||||
currentAppStatus.TargetRevisions = app.Status.GetRevisions()
|
||||
}
|
||||
|
||||
if currentAppStatus.Status == "Pending" {
|
||||
if operationPhaseString == "Succeeded" {
|
||||
revisions := []string{}
|
||||
if len(app.Status.OperationState.SyncResult.Revisions) > 0 {
|
||||
revisions = app.Status.OperationState.SyncResult.Revisions
|
||||
} else if app.Status.OperationState.SyncResult.Revision != "" {
|
||||
revisions = append(revisions, app.Status.OperationState.SyncResult.Revision)
|
||||
}
|
||||
|
||||
if reflect.DeepEqual(currentAppStatus.TargetRevisions, revisions) {
|
||||
logCtx.Infof("Application %v has completed a sync successfully, updating its ApplicationSet status to Progressing", app.Name)
|
||||
currentAppStatus.LastTransitionTime = &now
|
||||
currentAppStatus.Status = "Progressing"
|
||||
currentAppStatus.Message = "Application resource completed a sync successfully, updating status from Pending to Progressing."
|
||||
currentAppStatus.Step = strconv.Itoa(getAppStep(currentAppStatus.Application, appStepMap))
|
||||
}
|
||||
if !appOutdated && operationPhaseString == "Succeeded" {
|
||||
logCtx.Infof("Application %v has completed a sync successfully, updating its ApplicationSet status to Progressing", app.Name)
|
||||
currentAppStatus.LastTransitionTime = &now
|
||||
currentAppStatus.Status = "Progressing"
|
||||
currentAppStatus.Message = "Application resource completed a sync successfully, updating status from Pending to Progressing."
|
||||
currentAppStatus.Step = strconv.Itoa(getAppStep(currentAppStatus.Application, appStepMap))
|
||||
} else if operationPhaseString == "Running" || healthStatusString == "Progressing" {
|
||||
logCtx.Infof("Application %v has entered Progressing status, updating its ApplicationSet status to Progressing", app.Name)
|
||||
currentAppStatus.LastTransitionTime = &now
|
||||
@@ -1320,6 +1315,11 @@ func (r *ApplicationSetReconciler) updateResourcesStatus(ctx context.Context, lo
|
||||
sort.Slice(statuses, func(i, j int) bool {
|
||||
return statuses[i].Name < statuses[j].Name
|
||||
})
|
||||
|
||||
if r.MaxResourcesStatusCount > 0 && len(statuses) > r.MaxResourcesStatusCount {
|
||||
logCtx.Warnf("Truncating ApplicationSet %s resource status from %d to max allowed %d entries", appset.Name, len(statuses), r.MaxResourcesStatusCount)
|
||||
statuses = statuses[:r.MaxResourcesStatusCount]
|
||||
}
|
||||
appset.Status.Resources = statuses
|
||||
// DefaultRetry will retry 5 times with a backoff factor of 1, jitter of 0.1 and a duration of 10ms
|
||||
err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
@@ -1467,7 +1467,7 @@ func syncApplication(application argov1alpha1.Application, prune bool) argov1alp
|
||||
return application
|
||||
}
|
||||
|
||||
func getOwnsHandlerPredicates(enableProgressiveSyncs bool) predicate.Funcs {
|
||||
func getApplicationOwnsHandler(enableProgressiveSyncs bool) predicate.Funcs {
|
||||
return predicate.Funcs{
|
||||
CreateFunc: func(e event.CreateEvent) bool {
|
||||
// if we are the owner and there is a create event, we most likely created it and do not need to
|
||||
@@ -1504,8 +1504,8 @@ func getOwnsHandlerPredicates(enableProgressiveSyncs bool) predicate.Funcs {
|
||||
if !isApp {
|
||||
return false
|
||||
}
|
||||
requeue := shouldRequeueApplicationSet(appOld, appNew, enableProgressiveSyncs)
|
||||
logCtx.WithField("requeue", requeue).Debugf("requeue: %t caused by application %s", requeue, appNew.Name)
|
||||
requeue := shouldRequeueForApplication(appOld, appNew, enableProgressiveSyncs)
|
||||
logCtx.WithField("requeue", requeue).Debugf("requeue caused by application %s", appNew.Name)
|
||||
return requeue
|
||||
},
|
||||
GenericFunc: func(e event.GenericEvent) bool {
|
||||
@@ -1522,13 +1522,13 @@ func getOwnsHandlerPredicates(enableProgressiveSyncs bool) predicate.Funcs {
|
||||
}
|
||||
}
|
||||
|
||||
// shouldRequeueApplicationSet determines when we want to requeue an ApplicationSet for reconciling based on an owned
|
||||
// shouldRequeueForApplication determines when we want to requeue an ApplicationSet for reconciling based on an owned
|
||||
// application change
|
||||
// The applicationset controller owns a subset of the Application CR.
|
||||
// We do not need to re-reconcile if parts of the application change outside the applicationset's control.
|
||||
// An example being, Application.ApplicationStatus.ReconciledAt which gets updated by the application controller.
|
||||
// Additionally, Application.ObjectMeta.ResourceVersion and Application.ObjectMeta.Generation which are set by K8s.
|
||||
func shouldRequeueApplicationSet(appOld *argov1alpha1.Application, appNew *argov1alpha1.Application, enableProgressiveSyncs bool) bool {
|
||||
func shouldRequeueForApplication(appOld *argov1alpha1.Application, appNew *argov1alpha1.Application, enableProgressiveSyncs bool) bool {
|
||||
if appOld == nil || appNew == nil {
|
||||
return false
|
||||
}
|
||||
@@ -1538,9 +1538,9 @@ func shouldRequeueApplicationSet(appOld *argov1alpha1.Application, appNew *argov
|
||||
// https://pkg.go.dev/reflect#DeepEqual
|
||||
// ApplicationDestination has an unexported field so we can just use the == for comparison
|
||||
if !cmp.Equal(appOld.Spec, appNew.Spec, cmpopts.EquateEmpty(), cmpopts.EquateComparable(argov1alpha1.ApplicationDestination{})) ||
|
||||
!cmp.Equal(appOld.ObjectMeta.GetAnnotations(), appNew.ObjectMeta.GetAnnotations(), cmpopts.EquateEmpty()) ||
|
||||
!cmp.Equal(appOld.ObjectMeta.GetLabels(), appNew.ObjectMeta.GetLabels(), cmpopts.EquateEmpty()) ||
|
||||
!cmp.Equal(appOld.ObjectMeta.GetFinalizers(), appNew.ObjectMeta.GetFinalizers(), cmpopts.EquateEmpty()) {
|
||||
!cmp.Equal(appOld.GetAnnotations(), appNew.GetAnnotations(), cmpopts.EquateEmpty()) ||
|
||||
!cmp.Equal(appOld.GetLabels(), appNew.GetLabels(), cmpopts.EquateEmpty()) ||
|
||||
!cmp.Equal(appOld.GetFinalizers(), appNew.GetFinalizers(), cmpopts.EquateEmpty()) {
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -1561,4 +1561,90 @@ func shouldRequeueApplicationSet(appOld *argov1alpha1.Application, appNew *argov
|
||||
return false
|
||||
}
|
||||
|
||||
func getApplicationSetOwnsHandler(enableProgressiveSyncs bool) predicate.Funcs {
|
||||
return predicate.Funcs{
|
||||
CreateFunc: func(e event.CreateEvent) bool {
|
||||
appSet, isApp := e.Object.(*argov1alpha1.ApplicationSet)
|
||||
if !isApp {
|
||||
return false
|
||||
}
|
||||
log.WithField("applicationset", appSet.QualifiedName()).Debugln("received create event")
|
||||
// Always queue a new applicationset
|
||||
return true
|
||||
},
|
||||
DeleteFunc: func(e event.DeleteEvent) bool {
|
||||
appSet, isApp := e.Object.(*argov1alpha1.ApplicationSet)
|
||||
if !isApp {
|
||||
return false
|
||||
}
|
||||
log.WithField("applicationset", appSet.QualifiedName()).Debugln("received delete event")
|
||||
// Always queue for the deletion of an applicationset
|
||||
return true
|
||||
},
|
||||
UpdateFunc: func(e event.UpdateEvent) bool {
|
||||
appSetOld, isAppSet := e.ObjectOld.(*argov1alpha1.ApplicationSet)
|
||||
if !isAppSet {
|
||||
return false
|
||||
}
|
||||
appSetNew, isAppSet := e.ObjectNew.(*argov1alpha1.ApplicationSet)
|
||||
if !isAppSet {
|
||||
return false
|
||||
}
|
||||
requeue := shouldRequeueForApplicationSet(appSetOld, appSetNew, enableProgressiveSyncs)
|
||||
log.WithField("applicationset", appSetNew.QualifiedName()).
|
||||
WithField("requeue", requeue).Debugln("received update event")
|
||||
return requeue
|
||||
},
|
||||
GenericFunc: func(e event.GenericEvent) bool {
|
||||
appSet, isApp := e.Object.(*argov1alpha1.ApplicationSet)
|
||||
if !isApp {
|
||||
return false
|
||||
}
|
||||
log.WithField("applicationset", appSet.QualifiedName()).Debugln("received generic event")
|
||||
// Always queue for the generic of an applicationset
|
||||
return true
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// shouldRequeueForApplicationSet determines when we need to requeue an applicationset
|
||||
func shouldRequeueForApplicationSet(appSetOld, appSetNew *argov1alpha1.ApplicationSet, enableProgressiveSyncs bool) bool {
|
||||
if appSetOld == nil || appSetNew == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// Requeue if any ApplicationStatus.Status changed for Progressive sync strategy
|
||||
if enableProgressiveSyncs {
|
||||
if !cmp.Equal(appSetOld.Status.ApplicationStatus, appSetNew.Status.ApplicationStatus, cmpopts.EquateEmpty()) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// only compare the applicationset spec, annotations, labels and finalizers, deletionTimestamp, specifically avoiding
|
||||
// the status field. status is owned by the applicationset controller,
|
||||
// and we do not need to requeue when it does bookkeeping
|
||||
// NB: the ApplicationDestination comes from the ApplicationSpec being embedded
|
||||
// in the ApplicationSetTemplate from the generators
|
||||
if !cmp.Equal(appSetOld.Spec, appSetNew.Spec, cmpopts.EquateEmpty(), cmpopts.EquateComparable(argov1alpha1.ApplicationDestination{})) ||
|
||||
!cmp.Equal(appSetOld.GetLabels(), appSetNew.GetLabels(), cmpopts.EquateEmpty()) ||
|
||||
!cmp.Equal(appSetOld.GetFinalizers(), appSetNew.GetFinalizers(), cmpopts.EquateEmpty()) ||
|
||||
!cmp.Equal(appSetOld.DeletionTimestamp, appSetNew.DeletionTimestamp, cmpopts.EquateEmpty()) {
|
||||
return true
|
||||
}
|
||||
|
||||
// Requeue only when the refresh annotation is newly added to the ApplicationSet.
|
||||
// Changes to other annotations made simultaneously might be missed, but such cases are rare.
|
||||
if !cmp.Equal(appSetOld.GetAnnotations(), appSetNew.GetAnnotations(), cmpopts.EquateEmpty()) {
|
||||
_, oldHasRefreshAnnotation := appSetOld.Annotations[common.AnnotationApplicationSetRefresh]
|
||||
_, newHasRefreshAnnotation := appSetNew.Annotations[common.AnnotationApplicationSetRefresh]
|
||||
|
||||
if oldHasRefreshAnnotation && !newHasRefreshAnnotation {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
var _ handler.EventHandler = &clusterSecretEventHandler{}
|
||||
|
||||
@@ -1885,7 +1885,7 @@ func TestRequeueGeneratorFails(t *testing.T) {
|
||||
}
|
||||
|
||||
res, err := r.Reconcile(context.Background(), req)
|
||||
require.Error(t, err)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, ReconcileRequeueOnValidationError, res.RequeueAfter)
|
||||
}
|
||||
|
||||
@@ -4733,6 +4733,9 @@ func TestUpdateApplicationSetApplicationStatus(t *testing.T) {
|
||||
Health: v1alpha1.HealthStatus{
|
||||
Status: health.HealthStatusProgressing,
|
||||
},
|
||||
Sync: v1alpha1.SyncStatus{
|
||||
Revision: "Next",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -4796,7 +4799,8 @@ func TestUpdateApplicationSetApplicationStatus(t *testing.T) {
|
||||
Phase: common.OperationRunning,
|
||||
},
|
||||
Sync: v1alpha1.SyncStatus{
|
||||
Status: v1alpha1.SyncStatusCodeSynced,
|
||||
Status: v1alpha1.SyncStatusCodeSynced,
|
||||
Revision: "Current",
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -4861,7 +4865,8 @@ func TestUpdateApplicationSetApplicationStatus(t *testing.T) {
|
||||
Phase: common.OperationSucceeded,
|
||||
},
|
||||
Sync: v1alpha1.SyncStatus{
|
||||
Status: v1alpha1.SyncStatusCodeSynced,
|
||||
Status: v1alpha1.SyncStatusCodeSynced,
|
||||
Revision: "Next",
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -4926,7 +4931,8 @@ func TestUpdateApplicationSetApplicationStatus(t *testing.T) {
|
||||
Phase: common.OperationSucceeded,
|
||||
},
|
||||
Sync: v1alpha1.SyncStatus{
|
||||
Status: v1alpha1.SyncStatusCodeSynced,
|
||||
Revision: "Current",
|
||||
Status: v1alpha1.SyncStatusCodeSynced,
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -5165,86 +5171,6 @@ func TestUpdateApplicationSetApplicationStatus(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "does not progresses a pending application with a successful sync triggered by controller with invalid revision to progressing",
|
||||
appSet: v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "name",
|
||||
Namespace: "argocd",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Strategy: &v1alpha1.ApplicationSetStrategy{
|
||||
Type: "RollingSync",
|
||||
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{
|
||||
Steps: []v1alpha1.ApplicationSetRolloutStep{
|
||||
{
|
||||
MatchExpressions: []v1alpha1.ApplicationMatchExpression{},
|
||||
},
|
||||
{
|
||||
MatchExpressions: []v1alpha1.ApplicationMatchExpression{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: v1alpha1.ApplicationSetStatus{
|
||||
ApplicationStatus: []v1alpha1.ApplicationSetApplicationStatus{
|
||||
{
|
||||
Application: "app1",
|
||||
LastTransitionTime: &metav1.Time{
|
||||
Time: time.Now().Add(time.Duration(-1) * time.Minute),
|
||||
},
|
||||
Message: "",
|
||||
Status: "Pending",
|
||||
Step: "1",
|
||||
TargetRevisions: []string{"Next"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
apps: []v1alpha1.Application{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app1",
|
||||
},
|
||||
Status: v1alpha1.ApplicationStatus{
|
||||
Health: v1alpha1.HealthStatus{
|
||||
Status: health.HealthStatusDegraded,
|
||||
},
|
||||
OperationState: &v1alpha1.OperationState{
|
||||
Phase: common.OperationSucceeded,
|
||||
StartedAt: metav1.Time{
|
||||
Time: time.Now(),
|
||||
},
|
||||
Operation: v1alpha1.Operation{
|
||||
InitiatedBy: v1alpha1.OperationInitiator{
|
||||
Username: "applicationset-controller",
|
||||
Automated: true,
|
||||
},
|
||||
},
|
||||
SyncResult: &v1alpha1.SyncOperationResult{
|
||||
Revision: "Previous",
|
||||
},
|
||||
},
|
||||
Sync: v1alpha1.SyncStatus{
|
||||
Status: v1alpha1.SyncStatusCodeSynced,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
appStepMap: map[string]int{
|
||||
"app1": 0,
|
||||
},
|
||||
expectedAppStatus: []v1alpha1.ApplicationSetApplicationStatus{
|
||||
{
|
||||
Application: "app1",
|
||||
Message: "",
|
||||
Status: "Pending",
|
||||
Step: "1",
|
||||
TargetRevisions: []string{"Next"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "removes the appStatus for applications that no longer exist",
|
||||
appSet: v1alpha1.ApplicationSet{
|
||||
@@ -5299,7 +5225,77 @@ func TestUpdateApplicationSetApplicationStatus(t *testing.T) {
|
||||
Phase: common.OperationSucceeded,
|
||||
},
|
||||
Sync: v1alpha1.SyncStatus{
|
||||
Status: v1alpha1.SyncStatusCodeSynced,
|
||||
Status: v1alpha1.SyncStatusCodeSynced,
|
||||
Revision: "Current",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
appStepMap: map[string]int{
|
||||
"app1": 0,
|
||||
},
|
||||
expectedAppStatus: []v1alpha1.ApplicationSetApplicationStatus{
|
||||
{
|
||||
Application: "app1",
|
||||
Message: "Application resource is already Healthy, updating status from Waiting to Healthy.",
|
||||
Status: "Healthy",
|
||||
Step: "1",
|
||||
TargetRevisions: []string{"Current"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "progresses a pending synced application with an old revision to progressing with the Current one",
|
||||
appSet: v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "name",
|
||||
Namespace: "argocd",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Strategy: &v1alpha1.ApplicationSetStrategy{
|
||||
Type: "RollingSync",
|
||||
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{
|
||||
Steps: []v1alpha1.ApplicationSetRolloutStep{
|
||||
{
|
||||
MatchExpressions: []v1alpha1.ApplicationMatchExpression{},
|
||||
},
|
||||
{
|
||||
MatchExpressions: []v1alpha1.ApplicationMatchExpression{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: v1alpha1.ApplicationSetStatus{
|
||||
ApplicationStatus: []v1alpha1.ApplicationSetApplicationStatus{
|
||||
{
|
||||
Application: "app1",
|
||||
Message: "",
|
||||
Status: "Pending",
|
||||
Step: "1",
|
||||
TargetRevisions: []string{"Old"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
apps: []v1alpha1.Application{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app1",
|
||||
},
|
||||
Status: v1alpha1.ApplicationStatus{
|
||||
Health: v1alpha1.HealthStatus{
|
||||
Status: health.HealthStatusHealthy,
|
||||
},
|
||||
OperationState: &v1alpha1.OperationState{
|
||||
Phase: common.OperationSucceeded,
|
||||
SyncResult: &v1alpha1.SyncOperationResult{
|
||||
Revision: "Current",
|
||||
},
|
||||
},
|
||||
Sync: v1alpha1.SyncStatus{
|
||||
Status: v1alpha1.SyncStatusCodeSynced,
|
||||
Revisions: []string{"Current"},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -6101,10 +6097,11 @@ func TestUpdateResourceStatus(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, cc := range []struct {
|
||||
name string
|
||||
appSet v1alpha1.ApplicationSet
|
||||
apps []v1alpha1.Application
|
||||
expectedResources []v1alpha1.ResourceStatus
|
||||
name string
|
||||
appSet v1alpha1.ApplicationSet
|
||||
apps []v1alpha1.Application
|
||||
expectedResources []v1alpha1.ResourceStatus
|
||||
maxResourcesStatusCount int
|
||||
}{
|
||||
{
|
||||
name: "handles an empty application list",
|
||||
@@ -6275,6 +6272,73 @@ func TestUpdateResourceStatus(t *testing.T) {
|
||||
apps: []v1alpha1.Application{},
|
||||
expectedResources: nil,
|
||||
},
|
||||
{
|
||||
name: "truncates resources status list to",
|
||||
appSet: v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "name",
|
||||
Namespace: "argocd",
|
||||
},
|
||||
Status: v1alpha1.ApplicationSetStatus{
|
||||
Resources: []v1alpha1.ResourceStatus{
|
||||
{
|
||||
Name: "app1",
|
||||
Status: v1alpha1.SyncStatusCodeOutOfSync,
|
||||
Health: &v1alpha1.HealthStatus{
|
||||
Status: health.HealthStatusProgressing,
|
||||
Message: "this is progressing",
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "app2",
|
||||
Status: v1alpha1.SyncStatusCodeOutOfSync,
|
||||
Health: &v1alpha1.HealthStatus{
|
||||
Status: health.HealthStatusProgressing,
|
||||
Message: "this is progressing",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
apps: []v1alpha1.Application{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app1",
|
||||
},
|
||||
Status: v1alpha1.ApplicationStatus{
|
||||
Sync: v1alpha1.SyncStatus{
|
||||
Status: v1alpha1.SyncStatusCodeSynced,
|
||||
},
|
||||
Health: v1alpha1.HealthStatus{
|
||||
Status: health.HealthStatusHealthy,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app2",
|
||||
},
|
||||
Status: v1alpha1.ApplicationStatus{
|
||||
Sync: v1alpha1.SyncStatus{
|
||||
Status: v1alpha1.SyncStatusCodeSynced,
|
||||
},
|
||||
Health: v1alpha1.HealthStatus{
|
||||
Status: health.HealthStatusHealthy,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedResources: []v1alpha1.ResourceStatus{
|
||||
{
|
||||
Name: "app1",
|
||||
Status: v1alpha1.SyncStatusCodeSynced,
|
||||
Health: &v1alpha1.HealthStatus{
|
||||
Status: health.HealthStatusHealthy,
|
||||
},
|
||||
},
|
||||
},
|
||||
maxResourcesStatusCount: 1,
|
||||
},
|
||||
} {
|
||||
t.Run(cc.name, func(t *testing.T) {
|
||||
kubeclientset := kubefake.NewSimpleClientset([]runtime.Object{}...)
|
||||
@@ -6283,13 +6347,14 @@ func TestUpdateResourceStatus(t *testing.T) {
|
||||
metrics := appsetmetrics.NewFakeAppsetMetrics(client)
|
||||
|
||||
r := ApplicationSetReconciler{
|
||||
Client: client,
|
||||
Scheme: scheme,
|
||||
Recorder: record.NewFakeRecorder(1),
|
||||
Generators: map[string]generators.Generator{},
|
||||
ArgoDB: &dbmocks.ArgoDB{},
|
||||
KubeClientset: kubeclientset,
|
||||
Metrics: metrics,
|
||||
Client: client,
|
||||
Scheme: scheme,
|
||||
Recorder: record.NewFakeRecorder(1),
|
||||
Generators: map[string]generators.Generator{},
|
||||
ArgoDB: &dbmocks.ArgoDB{},
|
||||
KubeClientset: kubeclientset,
|
||||
Metrics: metrics,
|
||||
MaxResourcesStatusCount: cc.maxResourcesStatusCount,
|
||||
}
|
||||
|
||||
err := r.updateResourcesStatus(context.TODO(), log.NewEntry(log.StandardLogger()), &cc.appSet, cc.apps)
|
||||
@@ -6397,11 +6462,11 @@ func TestResourceStatusAreOrdered(t *testing.T) {
|
||||
|
||||
func TestOwnsHandler(t *testing.T) {
|
||||
// progressive syncs do not affect create, delete, or generic
|
||||
ownsHandler := getOwnsHandlerPredicates(true)
|
||||
ownsHandler := getApplicationOwnsHandler(true)
|
||||
assert.False(t, ownsHandler.CreateFunc(event.CreateEvent{}))
|
||||
assert.True(t, ownsHandler.DeleteFunc(event.DeleteEvent{}))
|
||||
assert.True(t, ownsHandler.GenericFunc(event.GenericEvent{}))
|
||||
ownsHandler = getOwnsHandlerPredicates(false)
|
||||
ownsHandler = getApplicationOwnsHandler(false)
|
||||
assert.False(t, ownsHandler.CreateFunc(event.CreateEvent{}))
|
||||
assert.True(t, ownsHandler.DeleteFunc(event.DeleteEvent{}))
|
||||
assert.True(t, ownsHandler.GenericFunc(event.GenericEvent{}))
|
||||
@@ -6581,7 +6646,7 @@ func TestOwnsHandler(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
ownsHandler = getOwnsHandlerPredicates(tt.args.enableProgressiveSyncs)
|
||||
ownsHandler = getApplicationOwnsHandler(tt.args.enableProgressiveSyncs)
|
||||
assert.Equalf(t, tt.want, ownsHandler.UpdateFunc(tt.args.e), "UpdateFunc(%v)", tt.args.e)
|
||||
})
|
||||
}
|
||||
@@ -6657,3 +6722,497 @@ func TestMigrateStatus(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestApplicationSetOwnsHandlerUpdate(t *testing.T) {
|
||||
buildAppSet := func(annotations map[string]string) *v1alpha1.ApplicationSet {
|
||||
return &v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: annotations,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
appSetOld crtclient.Object
|
||||
appSetNew crtclient.Object
|
||||
enableProgressiveSyncs bool
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "Different Spec",
|
||||
appSetOld: &v1alpha1.ApplicationSet{
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Generators: []v1alpha1.ApplicationSetGenerator{
|
||||
{List: &v1alpha1.ListGenerator{}},
|
||||
},
|
||||
},
|
||||
},
|
||||
appSetNew: &v1alpha1.ApplicationSet{
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Generators: []v1alpha1.ApplicationSetGenerator{
|
||||
{Git: &v1alpha1.GitGenerator{}},
|
||||
},
|
||||
},
|
||||
},
|
||||
enableProgressiveSyncs: false,
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "Different Annotations",
|
||||
appSetOld: buildAppSet(map[string]string{"key1": "value1"}),
|
||||
appSetNew: buildAppSet(map[string]string{"key1": "value2"}),
|
||||
enableProgressiveSyncs: false,
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "Different Labels",
|
||||
appSetOld: &v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{"key1": "value1"},
|
||||
},
|
||||
},
|
||||
appSetNew: &v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{"key1": "value2"},
|
||||
},
|
||||
},
|
||||
enableProgressiveSyncs: false,
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "Different Finalizers",
|
||||
appSetOld: &v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Finalizers: []string{"finalizer1"},
|
||||
},
|
||||
},
|
||||
appSetNew: &v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Finalizers: []string{"finalizer2"},
|
||||
},
|
||||
},
|
||||
enableProgressiveSyncs: false,
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "No Changes",
|
||||
appSetOld: &v1alpha1.ApplicationSet{
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Generators: []v1alpha1.ApplicationSetGenerator{
|
||||
{List: &v1alpha1.ListGenerator{}},
|
||||
},
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: map[string]string{"key1": "value1"},
|
||||
Labels: map[string]string{"key1": "value1"},
|
||||
Finalizers: []string{"finalizer1"},
|
||||
},
|
||||
},
|
||||
appSetNew: &v1alpha1.ApplicationSet{
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Generators: []v1alpha1.ApplicationSetGenerator{
|
||||
{List: &v1alpha1.ListGenerator{}},
|
||||
},
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: map[string]string{"key1": "value1"},
|
||||
Labels: map[string]string{"key1": "value1"},
|
||||
Finalizers: []string{"finalizer1"},
|
||||
},
|
||||
},
|
||||
enableProgressiveSyncs: false,
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "annotation removed",
|
||||
appSetOld: buildAppSet(map[string]string{
|
||||
argocommon.AnnotationApplicationSetRefresh: "true",
|
||||
}),
|
||||
appSetNew: buildAppSet(map[string]string{}),
|
||||
enableProgressiveSyncs: false,
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "annotation not removed",
|
||||
appSetOld: buildAppSet(map[string]string{
|
||||
argocommon.AnnotationApplicationSetRefresh: "true",
|
||||
}),
|
||||
appSetNew: buildAppSet(map[string]string{
|
||||
argocommon.AnnotationApplicationSetRefresh: "true",
|
||||
}),
|
||||
enableProgressiveSyncs: false,
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "annotation added",
|
||||
appSetOld: buildAppSet(map[string]string{}),
|
||||
appSetNew: buildAppSet(map[string]string{
|
||||
argocommon.AnnotationApplicationSetRefresh: "true",
|
||||
}),
|
||||
enableProgressiveSyncs: false,
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "old object is not an appset",
|
||||
appSetOld: &v1alpha1.Application{},
|
||||
appSetNew: buildAppSet(map[string]string{}),
|
||||
enableProgressiveSyncs: false,
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "new object is not an appset",
|
||||
appSetOld: buildAppSet(map[string]string{}),
|
||||
appSetNew: &v1alpha1.Application{},
|
||||
enableProgressiveSyncs: false,
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "deletionTimestamp present when progressive sync enabled",
|
||||
appSetOld: buildAppSet(map[string]string{}),
|
||||
appSetNew: &v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
DeletionTimestamp: &metav1.Time{Time: time.Now()},
|
||||
},
|
||||
},
|
||||
enableProgressiveSyncs: true,
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "deletionTimestamp present when progressive sync disabled",
|
||||
appSetOld: buildAppSet(map[string]string{}),
|
||||
appSetNew: &v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
DeletionTimestamp: &metav1.Time{Time: time.Now()},
|
||||
},
|
||||
},
|
||||
enableProgressiveSyncs: false,
|
||||
want: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
ownsHandler := getApplicationSetOwnsHandler(tt.enableProgressiveSyncs)
|
||||
requeue := ownsHandler.UpdateFunc(event.UpdateEvent{
|
||||
ObjectOld: tt.appSetOld,
|
||||
ObjectNew: tt.appSetNew,
|
||||
})
|
||||
assert.Equalf(t, tt.want, requeue, "ownsHandler.UpdateFunc(%v, %v, %t)", tt.appSetOld, tt.appSetNew, tt.enableProgressiveSyncs)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestApplicationSetOwnsHandlerGeneric(t *testing.T) {
|
||||
ownsHandler := getApplicationSetOwnsHandler(false)
|
||||
tests := []struct {
|
||||
name string
|
||||
obj crtclient.Object
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "Object is ApplicationSet",
|
||||
obj: &v1alpha1.ApplicationSet{},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "Object is not ApplicationSet",
|
||||
obj: &v1alpha1.Application{},
|
||||
want: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
requeue := ownsHandler.GenericFunc(event.GenericEvent{
|
||||
Object: tt.obj,
|
||||
})
|
||||
assert.Equalf(t, tt.want, requeue, "ownsHandler.GenericFunc(%v)", tt.obj)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestApplicationSetOwnsHandlerCreate(t *testing.T) {
|
||||
ownsHandler := getApplicationSetOwnsHandler(false)
|
||||
tests := []struct {
|
||||
name string
|
||||
obj crtclient.Object
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "Object is ApplicationSet",
|
||||
obj: &v1alpha1.ApplicationSet{},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "Object is not ApplicationSet",
|
||||
obj: &v1alpha1.Application{},
|
||||
want: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
requeue := ownsHandler.CreateFunc(event.CreateEvent{
|
||||
Object: tt.obj,
|
||||
})
|
||||
assert.Equalf(t, tt.want, requeue, "ownsHandler.CreateFunc(%v)", tt.obj)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestApplicationSetOwnsHandlerDelete(t *testing.T) {
|
||||
ownsHandler := getApplicationSetOwnsHandler(false)
|
||||
tests := []struct {
|
||||
name string
|
||||
obj crtclient.Object
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "Object is ApplicationSet",
|
||||
obj: &v1alpha1.ApplicationSet{},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "Object is not ApplicationSet",
|
||||
obj: &v1alpha1.Application{},
|
||||
want: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
requeue := ownsHandler.DeleteFunc(event.DeleteEvent{
|
||||
Object: tt.obj,
|
||||
})
|
||||
assert.Equalf(t, tt.want, requeue, "ownsHandler.DeleteFunc(%v)", tt.obj)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestShouldRequeueForApplicationSet(t *testing.T) {
|
||||
type args struct {
|
||||
appSetOld *v1alpha1.ApplicationSet
|
||||
appSetNew *v1alpha1.ApplicationSet
|
||||
enableProgressiveSyncs bool
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "NilAppSet",
|
||||
args: args{
|
||||
appSetNew: &v1alpha1.ApplicationSet{},
|
||||
appSetOld: nil,
|
||||
enableProgressiveSyncs: false,
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "ApplicationSetApplicationStatusChanged",
|
||||
args: args{
|
||||
appSetOld: &v1alpha1.ApplicationSet{
|
||||
Status: v1alpha1.ApplicationSetStatus{
|
||||
ApplicationStatus: []v1alpha1.ApplicationSetApplicationStatus{
|
||||
{
|
||||
Application: "app1",
|
||||
Status: "Healthy",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
appSetNew: &v1alpha1.ApplicationSet{
|
||||
Status: v1alpha1.ApplicationSetStatus{
|
||||
ApplicationStatus: []v1alpha1.ApplicationSetApplicationStatus{
|
||||
{
|
||||
Application: "app1",
|
||||
Status: "Waiting",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
enableProgressiveSyncs: true,
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "ApplicationSetWithDeletionTimestamp",
|
||||
args: args{
|
||||
appSetOld: &v1alpha1.ApplicationSet{
|
||||
Status: v1alpha1.ApplicationSetStatus{
|
||||
ApplicationStatus: []v1alpha1.ApplicationSetApplicationStatus{
|
||||
{
|
||||
Application: "app1",
|
||||
Status: "Healthy",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
appSetNew: &v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
DeletionTimestamp: &metav1.Time{Time: time.Now()},
|
||||
},
|
||||
Status: v1alpha1.ApplicationSetStatus{
|
||||
ApplicationStatus: []v1alpha1.ApplicationSetApplicationStatus{
|
||||
{
|
||||
Application: "app1",
|
||||
Status: "Waiting",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
enableProgressiveSyncs: false,
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.Equalf(t, tt.want, shouldRequeueForApplicationSet(tt.args.appSetOld, tt.args.appSetNew, tt.args.enableProgressiveSyncs), "shouldRequeueForApplicationSet(%v, %v)", tt.args.appSetOld, tt.args.appSetNew)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIgnoreNotAllowedNamespaces(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
namespaces []string
|
||||
objectNS string
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
name: "Namespace allowed",
|
||||
namespaces: []string{"allowed-namespace"},
|
||||
objectNS: "allowed-namespace",
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "Namespace not allowed",
|
||||
namespaces: []string{"allowed-namespace"},
|
||||
objectNS: "not-allowed-namespace",
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "Empty allowed namespaces",
|
||||
namespaces: []string{},
|
||||
objectNS: "any-namespace",
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "Multiple allowed namespaces",
|
||||
namespaces: []string{"allowed-namespace-1", "allowed-namespace-2"},
|
||||
objectNS: "allowed-namespace-2",
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "Namespace not in multiple allowed namespaces",
|
||||
namespaces: []string{"allowed-namespace-1", "allowed-namespace-2"},
|
||||
objectNS: "not-allowed-namespace",
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "Namespace matched by glob pattern",
|
||||
namespaces: []string{"allowed-namespace-*"},
|
||||
objectNS: "allowed-namespace-1",
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "Namespace matched by regex pattern",
|
||||
namespaces: []string{"/^allowed-namespace-[^-]+$/"},
|
||||
objectNS: "allowed-namespace-1",
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
predicate := ignoreNotAllowedNamespaces(tt.namespaces)
|
||||
object := &v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: tt.objectNS,
|
||||
},
|
||||
}
|
||||
|
||||
t.Run(tt.name+":Create", func(t *testing.T) {
|
||||
result := predicate.Create(event.CreateEvent{Object: object})
|
||||
assert.Equal(t, tt.expected, result)
|
||||
})
|
||||
|
||||
t.Run(tt.name+":Update", func(t *testing.T) {
|
||||
result := predicate.Update(event.UpdateEvent{ObjectNew: object})
|
||||
assert.Equal(t, tt.expected, result)
|
||||
})
|
||||
|
||||
t.Run(tt.name+":Delete", func(t *testing.T) {
|
||||
result := predicate.Delete(event.DeleteEvent{Object: object})
|
||||
assert.Equal(t, tt.expected, result)
|
||||
})
|
||||
|
||||
t.Run(tt.name+":Generic", func(t *testing.T) {
|
||||
result := predicate.Generic(event.GenericEvent{Object: object})
|
||||
assert.Equal(t, tt.expected, result)
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsRollingSyncStrategy(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
appset *v1alpha1.ApplicationSet
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
name: "RollingSync strategy is explicitly set",
|
||||
appset: &v1alpha1.ApplicationSet{
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Strategy: &v1alpha1.ApplicationSetStrategy{
|
||||
Type: "RollingSync",
|
||||
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{
|
||||
Steps: []v1alpha1.ApplicationSetRolloutStep{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "AllAtOnce strategy is explicitly set",
|
||||
appset: &v1alpha1.ApplicationSet{
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Strategy: &v1alpha1.ApplicationSetStrategy{
|
||||
Type: "AllAtOnce",
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "Strategy is empty",
|
||||
appset: &v1alpha1.ApplicationSet{
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Strategy: &v1alpha1.ApplicationSetStrategy{},
|
||||
},
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "Strategy is nil",
|
||||
appset: &v1alpha1.ApplicationSet{
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Strategy: nil,
|
||||
},
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := isRollingSyncStrategy(tt.appset)
|
||||
assert.Equal(t, tt.expected, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -28,10 +28,11 @@ type GitGenerator struct {
|
||||
namespace string
|
||||
}
|
||||
|
||||
func NewGitGenerator(repos services.Repos, namespace string) Generator {
|
||||
// NewGitGenerator creates a new instance of Git Generator
|
||||
func NewGitGenerator(repos services.Repos, controllerNamespace string) Generator {
|
||||
g := &GitGenerator{
|
||||
repos: repos,
|
||||
namespace: namespace,
|
||||
namespace: controllerNamespace,
|
||||
}
|
||||
|
||||
return g
|
||||
@@ -70,11 +71,11 @@ func (g *GitGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha1.Applic
|
||||
if !strings.Contains(appSet.Spec.Template.Spec.Project, "{{") {
|
||||
project := appSet.Spec.Template.Spec.Project
|
||||
appProject := &argoprojiov1alpha1.AppProject{}
|
||||
namespace := g.namespace
|
||||
if namespace == "" {
|
||||
namespace = appSet.Namespace
|
||||
controllerNamespace := g.namespace
|
||||
if controllerNamespace == "" {
|
||||
controllerNamespace = appSet.Namespace
|
||||
}
|
||||
if err := client.Get(context.TODO(), types.NamespacedName{Name: project, Namespace: namespace}, appProject); err != nil {
|
||||
if err := client.Get(context.TODO(), types.NamespacedName{Name: project, Namespace: controllerNamespace}, appProject); err != nil {
|
||||
return nil, fmt.Errorf("error getting project %s: %w", project, err)
|
||||
}
|
||||
// we need to verify the signature on the Git revision if GPG is enabled
|
||||
|
||||
2
applicationset/generators/mocks/Generator.go
generated
2
applicationset/generators/mocks/Generator.go
generated
@@ -1,4 +1,4 @@
|
||||
// Code generated by mockery v2.43.2. DO NOT EDIT.
|
||||
// Code generated by mockery v2.53.4. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
|
||||
@@ -10,15 +10,15 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/services"
|
||||
)
|
||||
|
||||
func GetGenerators(ctx context.Context, c client.Client, k8sClient kubernetes.Interface, namespace string, argoCDService services.Repos, dynamicClient dynamic.Interface, scmConfig SCMConfig) map[string]Generator {
|
||||
func GetGenerators(ctx context.Context, c client.Client, k8sClient kubernetes.Interface, controllerNamespace string, argoCDService services.Repos, dynamicClient dynamic.Interface, scmConfig SCMConfig) map[string]Generator {
|
||||
terminalGenerators := map[string]Generator{
|
||||
"List": NewListGenerator(),
|
||||
"Clusters": NewClusterGenerator(c, ctx, k8sClient, namespace),
|
||||
"Git": NewGitGenerator(argoCDService, namespace),
|
||||
"Clusters": NewClusterGenerator(c, ctx, k8sClient, controllerNamespace),
|
||||
"Git": NewGitGenerator(argoCDService, controllerNamespace),
|
||||
"SCMProvider": NewSCMProviderGenerator(c, scmConfig),
|
||||
"ClusterDecisionResource": NewDuckTypeGenerator(ctx, dynamicClient, k8sClient, namespace),
|
||||
"ClusterDecisionResource": NewDuckTypeGenerator(ctx, dynamicClient, k8sClient, controllerNamespace),
|
||||
"PullRequest": NewPullRequestGenerator(c, scmConfig),
|
||||
"Plugin": NewPluginGenerator(c, ctx, k8sClient, namespace),
|
||||
"Plugin": NewPluginGenerator(c, ctx, k8sClient, controllerNamespace),
|
||||
}
|
||||
|
||||
nestedGenerators := map[string]Generator{
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"net/http"
|
||||
|
||||
"github.com/bradleyfalzon/ghinstallation/v2"
|
||||
"github.com/google/go-github/v63/github"
|
||||
"github.com/google/go-github/v66/github"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/services/github_app_auth"
|
||||
)
|
||||
|
||||
2
applicationset/services/mocks/Repos.go
generated
2
applicationset/services/mocks/Repos.go
generated
@@ -1,4 +1,4 @@
|
||||
// Code generated by mockery v2.43.2. DO NOT EDIT.
|
||||
// Code generated by mockery v2.53.4. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/google/go-github/v63/github"
|
||||
"github.com/google/go-github/v66/github"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@ package pull_request
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-github/v63/github"
|
||||
"github.com/google/go-github/v66/github"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Code generated by mockery v2.43.2. DO NOT EDIT.
|
||||
// Code generated by mockery v2.53.4. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Code generated by mockery v2.43.2. DO NOT EDIT.
|
||||
// Code generated by mockery v2.53.4. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Code generated by mockery v2.43.2. DO NOT EDIT.
|
||||
// Code generated by mockery v2.53.4. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
"github.com/google/go-github/v63/github"
|
||||
"github.com/google/go-github/v66/github"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
|
||||
@@ -2,10 +2,10 @@ package scm_provider
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
pathpkg "path"
|
||||
|
||||
"github.com/hashicorp/go-retryablehttp"
|
||||
"github.com/xanzy/go-gitlab"
|
||||
@@ -129,40 +129,31 @@ func (g *GitlabProvider) ListRepos(ctx context.Context, cloneProtocol string) ([
|
||||
func (g *GitlabProvider) RepoHasPath(_ context.Context, repo *Repository, path string) (bool, error) {
|
||||
p, _, err := g.client.Projects.GetProject(repo.Organization+"/"+repo.Repository, nil)
|
||||
if err != nil {
|
||||
return false, err
|
||||
return false, fmt.Errorf("error getting Project Info: %w", err)
|
||||
}
|
||||
directories := []string{
|
||||
path,
|
||||
pathpkg.Dir(path),
|
||||
}
|
||||
for _, directory := range directories {
|
||||
options := gitlab.ListTreeOptions{
|
||||
Path: &directory,
|
||||
Ref: &repo.Branch,
|
||||
}
|
||||
for {
|
||||
treeNode, resp, err := g.client.Repositories.ListTree(p.ID, &options)
|
||||
|
||||
// search if the path is a file and exists in the repo
|
||||
fileOptions := gitlab.GetFileOptions{Ref: &repo.Branch}
|
||||
_, _, err = g.client.RepositoryFiles.GetFile(p.ID, path, &fileOptions)
|
||||
if err != nil {
|
||||
if errors.Is(err, gitlab.ErrNotFound) {
|
||||
// no file found, check for a directory
|
||||
options := gitlab.ListTreeOptions{
|
||||
Path: &path,
|
||||
Ref: &repo.Branch,
|
||||
}
|
||||
_, _, err := g.client.Repositories.ListTree(p.ID, &options)
|
||||
if err != nil {
|
||||
if errors.Is(err, gitlab.ErrNotFound) {
|
||||
return false, nil // no file or directory found
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
if path == directory {
|
||||
if resp.TotalItems > 0 {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
for i := range treeNode {
|
||||
if treeNode[i].Path == path {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
if resp.NextPage == 0 {
|
||||
// no future pages
|
||||
break
|
||||
}
|
||||
options.Page = resp.NextPage
|
||||
return true, nil // directory found
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
return false, nil
|
||||
return true, nil // file found
|
||||
}
|
||||
|
||||
func (g *GitlabProvider) listBranches(_ context.Context, repo *Repository) ([]gitlab.Branch, error) {
|
||||
|
||||
@@ -20,6 +20,7 @@ func gitlabMockHandler(t *testing.T) func(http.ResponseWriter, *http.Request) {
|
||||
t.Helper()
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
fmt.Println(r.RequestURI)
|
||||
switch r.RequestURI {
|
||||
case "/api/v4":
|
||||
fmt.Println("here1")
|
||||
@@ -1040,6 +1041,32 @@ func gitlabMockHandler(t *testing.T) func(http.ResponseWriter, *http.Request) {
|
||||
if err != nil {
|
||||
t.Fail()
|
||||
}
|
||||
// Recent versions of the Gitlab API (v17.7+) listTree return 404 not only when a file doesn't exist, but also
|
||||
// when a path is to a file instead of a directory. Code was refactored to explicitly search for file then
|
||||
// search for directory, catching 404 errors as "file not found".
|
||||
case "/api/v4/projects/27084533/repository/files/argocd?ref=master":
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
case "/api/v4/projects/27084533/repository/files/argocd%2Finstall%2Eyaml?ref=master":
|
||||
_, err := io.WriteString(w, `{"file_name":"install.yaml","file_path":"argocd/install.yaml","size":0,"encoding":"base64","content_sha256":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855","ref":"main","blob_id":"e69de29bb2d1d6434b8b29ae775ad8c2e48c5391","commit_id":"6d4c0f9d34534ccc73aa3f3180b25e2aebe630eb","last_commit_id":"b50eb63f9c0e09bfdb070db26fd32c7210291f52","execute_filemode":false,"content":""}`)
|
||||
if err != nil {
|
||||
t.Fail()
|
||||
}
|
||||
case "/api/v4/projects/27084533/repository/files/notathing?ref=master":
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
case "/api/v4/projects/27084533/repository/tree?path=notathing&ref=master":
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
case "/api/v4/projects/27084533/repository/files/argocd%2Fnotathing%2Eyaml?ref=master":
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
case "/api/v4/projects/27084533/repository/tree?path=argocd%2Fnotathing.yaml&ref=master":
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
case "/api/v4/projects/27084533/repository/files/notathing%2Fnotathing%2Eyaml?ref=master":
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
case "/api/v4/projects/27084533/repository/tree?path=notathing%2Fnotathing.yaml&ref=master":
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
case "/api/v4/projects/27084533/repository/files/notathing%2Fnotathing%2Fnotathing%2Eyaml?ref=master":
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
case "/api/v4/projects/27084533/repository/tree?path=notathing%2Fnotathing%2Fnotathing.yaml&ref=master":
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
case "/api/v4/projects/27084533/repository/branches/foo":
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
default:
|
||||
@@ -1194,6 +1221,16 @@ func TestGitlabHasPath(t *testing.T) {
|
||||
path: "argocd/notathing.yaml",
|
||||
exists: false,
|
||||
},
|
||||
{
|
||||
name: "noexistent file in noexistent directory",
|
||||
path: "notathing/notathing.yaml",
|
||||
exists: false,
|
||||
},
|
||||
{
|
||||
name: "noexistent file in nested noexistent directory",
|
||||
path: "notathing/notathing/notathing.yaml",
|
||||
exists: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
|
||||
@@ -2,22 +2,15 @@ package utils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/common"
|
||||
appv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v2/util/db"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/utils/ptr"
|
||||
)
|
||||
|
||||
// The contents of this file are from
|
||||
@@ -126,11 +119,15 @@ func ListClusters(ctx context.Context, clientset kubernetes.Interface, namespace
|
||||
hasInClusterCredentials := false
|
||||
for i, clusterSecret := range clusterSecrets {
|
||||
// This line has changed from the original Argo CD code: now receives an error, and handles it
|
||||
cluster, err := secretToCluster(&clusterSecret)
|
||||
cluster, err := db.SecretToCluster(&clusterSecret)
|
||||
if err != nil || cluster == nil {
|
||||
return nil, fmt.Errorf("unable to convert cluster secret to cluster object '%s': %w", clusterSecret.Name, err)
|
||||
}
|
||||
|
||||
// db.SecretToCluster populates these, but they're not meant to be available to the caller.
|
||||
cluster.Labels = nil
|
||||
cluster.Annotations = nil
|
||||
|
||||
clusterList.Items[i] = *cluster
|
||||
if cluster.Server == appv1.KubernetesInternalAPIServerAddr {
|
||||
hasInClusterCredentials = true
|
||||
@@ -167,48 +164,3 @@ func getLocalCluster(clientset kubernetes.Interface) *appv1.Cluster {
|
||||
cluster.ConnectionState.ModifiedAt = &now
|
||||
return cluster
|
||||
}
|
||||
|
||||
// secretToCluster converts a secret into a Cluster object
|
||||
func secretToCluster(s *corev1.Secret) (*appv1.Cluster, error) {
|
||||
var config appv1.ClusterConfig
|
||||
if len(s.Data["config"]) > 0 {
|
||||
if err := json.Unmarshal(s.Data["config"], &config); err != nil {
|
||||
// This line has changed from the original Argo CD: now returns an error rather than panicing.
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
var namespaces []string
|
||||
for _, ns := range strings.Split(string(s.Data["namespaces"]), ",") {
|
||||
if ns = strings.TrimSpace(ns); ns != "" {
|
||||
namespaces = append(namespaces, ns)
|
||||
}
|
||||
}
|
||||
var refreshRequestedAt *metav1.Time
|
||||
if v, found := s.Annotations[appv1.AnnotationKeyRefresh]; found {
|
||||
requestedAt, err := time.Parse(time.RFC3339, v)
|
||||
if err != nil {
|
||||
log.Warnf("Error while parsing date in cluster secret '%s': %v", s.Name, err)
|
||||
} else {
|
||||
refreshRequestedAt = &metav1.Time{Time: requestedAt}
|
||||
}
|
||||
}
|
||||
var shard *int64
|
||||
if shardStr := s.Data["shard"]; shardStr != nil {
|
||||
if val, err := strconv.Atoi(string(shardStr)); err != nil {
|
||||
log.Warnf("Error while parsing shard in cluster secret '%s': %v", s.Name, err)
|
||||
} else {
|
||||
shard = ptr.To(int64(val))
|
||||
}
|
||||
}
|
||||
cluster := appv1.Cluster{
|
||||
ID: string(s.UID),
|
||||
Server: strings.TrimRight(string(s.Data["server"]), "/"),
|
||||
Name: string(s.Data["name"]),
|
||||
Namespaces: namespaces,
|
||||
Config: config,
|
||||
RefreshRequestedAt: refreshRequestedAt,
|
||||
Shard: shard,
|
||||
}
|
||||
return &cluster, nil
|
||||
}
|
||||
|
||||
@@ -20,51 +20,6 @@ const (
|
||||
fakeNamespace = "fake-ns"
|
||||
)
|
||||
|
||||
// From Argo CD util/db/cluster_test.go
|
||||
func Test_secretToCluster(t *testing.T) {
|
||||
secret := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "mycluster",
|
||||
Namespace: fakeNamespace,
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"name": []byte("test"),
|
||||
"server": []byte("http://mycluster"),
|
||||
"config": []byte("{\"username\":\"foo\", \"disableCompression\":true}"),
|
||||
},
|
||||
}
|
||||
cluster, err := secretToCluster(secret)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, argoappv1.Cluster{
|
||||
Name: "test",
|
||||
Server: "http://mycluster",
|
||||
Config: argoappv1.ClusterConfig{
|
||||
Username: "foo",
|
||||
DisableCompression: true,
|
||||
},
|
||||
}, *cluster)
|
||||
}
|
||||
|
||||
// From Argo CD util/db/cluster_test.go
|
||||
func Test_secretToCluster_NoConfig(t *testing.T) {
|
||||
secret := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "mycluster",
|
||||
Namespace: fakeNamespace,
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"name": []byte("test"),
|
||||
"server": []byte("http://mycluster"),
|
||||
},
|
||||
}
|
||||
cluster, err := secretToCluster(secret)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, argoappv1.Cluster{
|
||||
Name: "test",
|
||||
Server: "http://mycluster",
|
||||
}, *cluster)
|
||||
}
|
||||
|
||||
func createClusterSecret(secretName string, clusterName string, clusterServer string) *corev1.Secret {
|
||||
secret := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
||||
2
applicationset/utils/mocks/Renderer.go
generated
2
applicationset/utils/mocks/Renderer.go
generated
@@ -1,4 +1,4 @@
|
||||
// Code generated by mockery v2.43.2. DO NOT EDIT.
|
||||
// Code generated by mockery v2.53.4. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
|
||||
@@ -10,6 +10,7 @@ p, role:readonly, applications, get, */*, allow
|
||||
p, role:readonly, certificates, get, *, allow
|
||||
p, role:readonly, clusters, get, *, allow
|
||||
p, role:readonly, repositories, get, *, allow
|
||||
p, role:readonly, write-repositories, get, *, allow
|
||||
p, role:readonly, projects, get, *, allow
|
||||
p, role:readonly, accounts, get, *, allow
|
||||
p, role:readonly, gpgkeys, get, *, allow
|
||||
@@ -17,7 +18,9 @@ p, role:readonly, logs, get, */*, allow
|
||||
|
||||
p, role:admin, applications, create, */*, allow
|
||||
p, role:admin, applications, update, */*, allow
|
||||
p, role:admin, applications, update/*, */*, allow
|
||||
p, role:admin, applications, delete, */*, allow
|
||||
p, role:admin, applications, delete/*, */*, allow
|
||||
p, role:admin, applications, sync, */*, allow
|
||||
p, role:admin, applications, override, */*, allow
|
||||
p, role:admin, applications, action/*, */*, allow
|
||||
@@ -34,6 +37,9 @@ p, role:admin, clusters, delete, *, allow
|
||||
p, role:admin, repositories, create, *, allow
|
||||
p, role:admin, repositories, update, *, allow
|
||||
p, role:admin, repositories, delete, *, allow
|
||||
p, role:admin, write-repositories, create, *, allow
|
||||
p, role:admin, write-repositories, update, *, allow
|
||||
p, role:admin, write-repositories, delete, *, allow
|
||||
p, role:admin, projects, create, *, allow
|
||||
p, role:admin, projects, update, *, allow
|
||||
p, role:admin, projects, delete, *, allow
|
||||
@@ -43,4 +49,4 @@ p, role:admin, gpgkeys, delete, *, allow
|
||||
p, role:admin, exec, create, */*, allow
|
||||
|
||||
g, role:admin, role:readonly
|
||||
g, admin, role:admin
|
||||
g, admin, role:admin
|
||||
|
||||
|
534
assets/swagger.json
generated
534
assets/swagger.json
generated
@@ -1990,6 +1990,39 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/v1/applicationsets/generate": {
|
||||
"post": {
|
||||
"tags": [
|
||||
"ApplicationSetService"
|
||||
],
|
||||
"summary": "Generate generates",
|
||||
"operationId": "ApplicationSetService_Generate",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "body",
|
||||
"in": "body",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"$ref": "#/definitions/applicationsetApplicationSetGenerateRequest"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/applicationsetApplicationSetGenerateResponse"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/runtimeError"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/v1/applicationsets/{name}": {
|
||||
"get": {
|
||||
"tags": [
|
||||
@@ -4084,6 +4117,504 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/v1/write-repocreds": {
|
||||
"get": {
|
||||
"tags": [
|
||||
"RepoCredsService"
|
||||
],
|
||||
"summary": "ListWriteRepositoryCredentials gets a list of all configured repository credential sets that have write access",
|
||||
"operationId": "RepoCredsService_ListWriteRepositoryCredentials",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Repo URL for query.",
|
||||
"name": "url",
|
||||
"in": "query"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1alpha1RepoCredsList"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/runtimeError"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"post": {
|
||||
"tags": [
|
||||
"RepoCredsService"
|
||||
],
|
||||
"summary": "CreateWriteRepositoryCredentials creates a new repository credential set with write access",
|
||||
"operationId": "RepoCredsService_CreateWriteRepositoryCredentials",
|
||||
"parameters": [
|
||||
{
|
||||
"description": "Repository definition",
|
||||
"name": "body",
|
||||
"in": "body",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1alpha1RepoCreds"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "boolean",
|
||||
"description": "Whether to create in upsert mode.",
|
||||
"name": "upsert",
|
||||
"in": "query"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1alpha1RepoCreds"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/runtimeError"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/v1/write-repocreds/{creds.url}": {
|
||||
"put": {
|
||||
"tags": [
|
||||
"RepoCredsService"
|
||||
],
|
||||
"summary": "UpdateWriteRepositoryCredentials updates a repository credential set with write access",
|
||||
"operationId": "RepoCredsService_UpdateWriteRepositoryCredentials",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "URL is the URL to which these credentials match",
|
||||
"name": "creds.url",
|
||||
"in": "path",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"name": "body",
|
||||
"in": "body",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1alpha1RepoCreds"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1alpha1RepoCreds"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/runtimeError"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/v1/write-repocreds/{url}": {
|
||||
"delete": {
|
||||
"tags": [
|
||||
"RepoCredsService"
|
||||
],
|
||||
"summary": "DeleteWriteRepositoryCredentials deletes a repository credential set with write access from the configuration",
|
||||
"operationId": "RepoCredsService_DeleteWriteRepositoryCredentials",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"name": "url",
|
||||
"in": "path",
|
||||
"required": true
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/repocredsRepoCredsResponse"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/runtimeError"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/v1/write-repositories": {
|
||||
"get": {
|
||||
"tags": [
|
||||
"RepositoryService"
|
||||
],
|
||||
"summary": "ListWriteRepositories gets a list of all configured write repositories",
|
||||
"operationId": "RepositoryService_ListWriteRepositories",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Repo URL for query.",
|
||||
"name": "repo",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "boolean",
|
||||
"description": "Whether to force a cache refresh on repo's connection state.",
|
||||
"name": "forceRefresh",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"description": "App project for query.",
|
||||
"name": "appProject",
|
||||
"in": "query"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1alpha1RepositoryList"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/runtimeError"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"post": {
|
||||
"tags": [
|
||||
"RepositoryService"
|
||||
],
|
||||
"summary": "CreateWriteRepository creates a new write repository configuration",
|
||||
"operationId": "RepositoryService_CreateWriteRepository",
|
||||
"parameters": [
|
||||
{
|
||||
"description": "Repository definition",
|
||||
"name": "body",
|
||||
"in": "body",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1alpha1Repository"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "boolean",
|
||||
"description": "Whether to create in upsert mode.",
|
||||
"name": "upsert",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "boolean",
|
||||
"description": "Whether to operate on credential set instead of repository.",
|
||||
"name": "credsOnly",
|
||||
"in": "query"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1alpha1Repository"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/runtimeError"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/v1/write-repositories/{repo.repo}": {
|
||||
"put": {
|
||||
"tags": [
|
||||
"RepositoryService"
|
||||
],
|
||||
"summary": "UpdateWriteRepository updates a write repository configuration",
|
||||
"operationId": "RepositoryService_UpdateWriteRepository",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Repo contains the URL to the remote repository",
|
||||
"name": "repo.repo",
|
||||
"in": "path",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"name": "body",
|
||||
"in": "body",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1alpha1Repository"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1alpha1Repository"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/runtimeError"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/v1/write-repositories/{repo}": {
|
||||
"get": {
|
||||
"tags": [
|
||||
"RepositoryService"
|
||||
],
|
||||
"summary": "GetWrite returns a repository or its write credentials",
|
||||
"operationId": "RepositoryService_GetWrite",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Repo URL for query",
|
||||
"name": "repo",
|
||||
"in": "path",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"type": "boolean",
|
||||
"description": "Whether to force a cache refresh on repo's connection state.",
|
||||
"name": "forceRefresh",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"description": "App project for query.",
|
||||
"name": "appProject",
|
||||
"in": "query"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1alpha1Repository"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/runtimeError"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"delete": {
|
||||
"tags": [
|
||||
"RepositoryService"
|
||||
],
|
||||
"summary": "DeleteWriteRepository deletes a write repository from the configuration",
|
||||
"operationId": "RepositoryService_DeleteWriteRepository",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Repo URL for query",
|
||||
"name": "repo",
|
||||
"in": "path",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"type": "boolean",
|
||||
"description": "Whether to force a cache refresh on repo's connection state.",
|
||||
"name": "forceRefresh",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"description": "App project for query.",
|
||||
"name": "appProject",
|
||||
"in": "query"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/repositoryRepoResponse"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/runtimeError"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/v1/write-repositories/{repo}/validate": {
|
||||
"post": {
|
||||
"tags": [
|
||||
"RepositoryService"
|
||||
],
|
||||
"summary": "ValidateWriteAccess validates write access to a repository with given parameters",
|
||||
"operationId": "RepositoryService_ValidateWriteAccess",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "The URL to the repo",
|
||||
"name": "repo",
|
||||
"in": "path",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"description": "The URL to the repo",
|
||||
"name": "body",
|
||||
"in": "body",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Username for accessing repo.",
|
||||
"name": "username",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Password for accessing repo.",
|
||||
"name": "password",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Private key data for accessing SSH repository.",
|
||||
"name": "sshPrivateKey",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "boolean",
|
||||
"description": "Whether to skip certificate or host key validation.",
|
||||
"name": "insecure",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"description": "TLS client cert data for accessing HTTPS repository.",
|
||||
"name": "tlsClientCertData",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"description": "TLS client cert key for accessing HTTPS repository.",
|
||||
"name": "tlsClientCertKey",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"description": "The type of the repo.",
|
||||
"name": "type",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"description": "The name of the repo.",
|
||||
"name": "name",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "boolean",
|
||||
"description": "Whether helm-oci support should be enabled for this repo.",
|
||||
"name": "enableOci",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Github App Private Key PEM data.",
|
||||
"name": "githubAppPrivateKey",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"format": "int64",
|
||||
"description": "Github App ID of the app used to access the repo.",
|
||||
"name": "githubAppID",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"format": "int64",
|
||||
"description": "Github App Installation ID of the installed GitHub App.",
|
||||
"name": "githubAppInstallationID",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Github App Enterprise base url if empty will default to https://api.github.com.",
|
||||
"name": "githubAppEnterpriseBaseUrl",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"description": "HTTP/HTTPS proxy to access the repository.",
|
||||
"name": "proxy",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Reference between project and repository that allow you automatically to be added as item inside SourceRepos project entity.",
|
||||
"name": "project",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Google Cloud Platform service account key.",
|
||||
"name": "gcpServiceAccountKey",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "boolean",
|
||||
"description": "Whether to force HTTP basic auth.",
|
||||
"name": "forceHttpBasicAuth",
|
||||
"in": "query"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/repositoryRepoResponse"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/runtimeError"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/version": {
|
||||
"get": {
|
||||
"tags": [
|
||||
@@ -4725,6 +5256,9 @@
|
||||
"help": {
|
||||
"$ref": "#/definitions/clusterHelp"
|
||||
},
|
||||
"hydratorEnabled": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"impersonationEnabled": {
|
||||
"type": "boolean"
|
||||
},
|
||||
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
|
||||
cmdutil "github.com/argoproj/argo-cd/v2/cmd/util"
|
||||
commitclient "github.com/argoproj/argo-cd/v2/commitserver/apiclient"
|
||||
"github.com/argoproj/argo-cd/v2/common"
|
||||
"github.com/argoproj/argo-cd/v2/controller"
|
||||
"github.com/argoproj/argo-cd/v2/controller/sharding"
|
||||
@@ -58,10 +59,12 @@ func NewCommand() *cobra.Command {
|
||||
repoErrorGracePeriod int64
|
||||
repoServerAddress string
|
||||
repoServerTimeoutSeconds int
|
||||
commitServerAddress string
|
||||
selfHealTimeoutSeconds int
|
||||
selfHealBackoffTimeoutSeconds int
|
||||
selfHealBackoffFactor int
|
||||
selfHealBackoffCapSeconds int
|
||||
selfHealBackoffCooldownSeconds int
|
||||
syncTimeout int
|
||||
statusProcessors int
|
||||
operationProcessors int
|
||||
@@ -87,7 +90,8 @@ func NewCommand() *cobra.Command {
|
||||
ignoreNormalizerOpts normalizers.IgnoreNormalizerOpts
|
||||
|
||||
// argocd k8s event logging flag
|
||||
enableK8sEvent []string
|
||||
enableK8sEvent []string
|
||||
hydratorEnabled bool
|
||||
)
|
||||
command := cobra.Command{
|
||||
Use: cliName,
|
||||
@@ -157,6 +161,8 @@ func NewCommand() *cobra.Command {
|
||||
|
||||
repoClientset := apiclient.NewRepoServerClientset(repoServerAddress, repoServerTimeoutSeconds, tlsConfig)
|
||||
|
||||
commitClientset := commitclient.NewCommitServerClientset(commitServerAddress)
|
||||
|
||||
cache, err := cacheSource()
|
||||
errors.CheckError(err)
|
||||
cache.Cache.SetClient(cacheutil.NewTwoLevelClient(cache.Cache.GetClient(), 10*time.Minute))
|
||||
@@ -183,6 +189,7 @@ func NewCommand() *cobra.Command {
|
||||
kubeClient,
|
||||
appClient,
|
||||
repoClientset,
|
||||
commitClientset,
|
||||
cache,
|
||||
kubectl,
|
||||
resyncDuration,
|
||||
@@ -190,6 +197,7 @@ func NewCommand() *cobra.Command {
|
||||
time.Duration(appResyncJitter)*time.Second,
|
||||
time.Duration(selfHealTimeoutSeconds)*time.Second,
|
||||
selfHealBackoff,
|
||||
time.Duration(selfHealBackoffCooldownSeconds)*time.Second,
|
||||
time.Duration(syncTimeout)*time.Second,
|
||||
time.Duration(repoErrorGracePeriod)*time.Second,
|
||||
metricsPort,
|
||||
@@ -205,6 +213,7 @@ func NewCommand() *cobra.Command {
|
||||
enableDynamicClusterDistribution,
|
||||
ignoreNormalizerOpts,
|
||||
enableK8sEvent,
|
||||
hydratorEnabled,
|
||||
)
|
||||
errors.CheckError(err)
|
||||
cacheutil.CollectMetrics(redisClient, appController.GetMetricsServer(), nil)
|
||||
@@ -247,6 +256,7 @@ func NewCommand() *cobra.Command {
|
||||
command.Flags().Int64Var(&repoErrorGracePeriod, "repo-error-grace-period-seconds", int64(env.ParseDurationFromEnv("ARGOCD_REPO_ERROR_GRACE_PERIOD_SECONDS", defaultAppResyncPeriod*time.Second, 0, math.MaxInt64).Seconds()), "Grace period in seconds for ignoring consecutive errors while communicating with repo server.")
|
||||
command.Flags().StringVar(&repoServerAddress, "repo-server", env.StringFromEnv("ARGOCD_APPLICATION_CONTROLLER_REPO_SERVER", common.DefaultRepoServerAddr), "Repo server address.")
|
||||
command.Flags().IntVar(&repoServerTimeoutSeconds, "repo-server-timeout-seconds", env.ParseNumFromEnv("ARGOCD_APPLICATION_CONTROLLER_REPO_SERVER_TIMEOUT_SECONDS", 60, 0, math.MaxInt64), "Repo server RPC call timeout seconds.")
|
||||
command.Flags().StringVar(&commitServerAddress, "commit-server", env.StringFromEnv("ARGOCD_APPLICATION_CONTROLLER_COMMIT_SERVER", common.DefaultCommitServerAddr), "Commit server address.")
|
||||
command.Flags().IntVar(&statusProcessors, "status-processors", env.ParseNumFromEnv("ARGOCD_APPLICATION_CONTROLLER_STATUS_PROCESSORS", 20, 0, math.MaxInt32), "Number of application status processors")
|
||||
command.Flags().IntVar(&operationProcessors, "operation-processors", env.ParseNumFromEnv("ARGOCD_APPLICATION_CONTROLLER_OPERATION_PROCESSORS", 10, 0, math.MaxInt32), "Number of application operation processors")
|
||||
command.Flags().StringVar(&cmdutil.LogFormat, "logformat", env.StringFromEnv("ARGOCD_APPLICATION_CONTROLLER_LOGFORMAT", "text"), "Set the logging format. One of: text|json")
|
||||
@@ -258,6 +268,7 @@ func NewCommand() *cobra.Command {
|
||||
command.Flags().IntVar(&selfHealBackoffTimeoutSeconds, "self-heal-backoff-timeout-seconds", env.ParseNumFromEnv("ARGOCD_APPLICATION_CONTROLLER_SELF_HEAL_BACKOFF_TIMEOUT_SECONDS", 2, 0, math.MaxInt32), "Specifies initial timeout of exponential backoff between self heal attempts")
|
||||
command.Flags().IntVar(&selfHealBackoffFactor, "self-heal-backoff-factor", env.ParseNumFromEnv("ARGOCD_APPLICATION_CONTROLLER_SELF_HEAL_BACKOFF_FACTOR", 3, 0, math.MaxInt32), "Specifies factor of exponential timeout between application self heal attempts")
|
||||
command.Flags().IntVar(&selfHealBackoffCapSeconds, "self-heal-backoff-cap-seconds", env.ParseNumFromEnv("ARGOCD_APPLICATION_CONTROLLER_SELF_HEAL_BACKOFF_CAP_SECONDS", 300, 0, math.MaxInt32), "Specifies max timeout of exponential backoff between application self heal attempts")
|
||||
command.Flags().IntVar(&selfHealBackoffCooldownSeconds, "self-heal-backoff-cooldown-seconds", env.ParseNumFromEnv("ARGOCD_APPLICATION_CONTROLLER_SELF_HEAL_BACKOFF_COOLDOWN_SECONDS", 330, 0, math.MaxInt32), "Specifies period of time the app needs to stay synced before the self heal backoff can reset")
|
||||
command.Flags().IntVar(&syncTimeout, "sync-timeout", env.ParseNumFromEnv("ARGOCD_APPLICATION_CONTROLLER_SYNC_TIMEOUT", 0, 0, math.MaxInt32), "Specifies the timeout after which a sync would be terminated. 0 means no timeout (default 0).")
|
||||
command.Flags().Int64Var(&kubectlParallelismLimit, "kubectl-parallelism-limit", env.ParseInt64FromEnv("ARGOCD_APPLICATION_CONTROLLER_KUBECTL_PARALLELISM_LIMIT", 20, 0, math.MaxInt64), "Number of allowed concurrent kubectl fork/execs. Any value less than 1 means no limit.")
|
||||
command.Flags().BoolVar(&repoServerPlaintext, "repo-server-plaintext", env.ParseBoolFromEnv("ARGOCD_APPLICATION_CONTROLLER_REPO_SERVER_PLAINTEXT", false), "Disable TLS on connections to repo server")
|
||||
@@ -285,7 +296,7 @@ func NewCommand() *cobra.Command {
|
||||
command.Flags().DurationVar(&ignoreNormalizerOpts.JQExecutionTimeout, "ignore-normalizer-jq-execution-timeout-seconds", env.ParseDurationFromEnv("ARGOCD_IGNORE_NORMALIZER_JQ_TIMEOUT", 0*time.Second, 0, math.MaxInt64), "Set ignore normalizer JQ execution timeout")
|
||||
// argocd k8s event logging flag
|
||||
command.Flags().StringSliceVar(&enableK8sEvent, "enable-k8s-event", env.StringsFromEnv("ARGOCD_ENABLE_K8S_EVENT", argo.DefaultEnableEventList(), ","), "Enable ArgoCD to use k8s event. For disabling all events, set the value as `none`. (e.g --enable-k8s-event=none), For enabling specific events, set the value as `event reason`. (e.g --enable-k8s-event=StatusRefreshed,ResourceCreated)")
|
||||
|
||||
command.Flags().BoolVar(&hydratorEnabled, "hydrator-enabled", env.ParseBoolFromEnv("ARGOCD_HYDRATOR_ENABLED", false), "Feature flag to enable Hydrator. Default (\"false\")")
|
||||
cacheSource = appstatecache.AddCacheFlagsToCmd(&command, cacheutil.Options{
|
||||
OnClientCreated: func(client *redis.Client) {
|
||||
redisClient = client
|
||||
|
||||
@@ -74,6 +74,7 @@ func NewCommand() *cobra.Command {
|
||||
enableScmProviders bool
|
||||
webhookParallelism int
|
||||
tokenRefStrictMode bool
|
||||
maxResourcesStatusCount int
|
||||
)
|
||||
scheme := runtime.NewScheme()
|
||||
_ = clientgoscheme.AddToScheme(scheme)
|
||||
@@ -227,6 +228,7 @@ func NewCommand() *cobra.Command {
|
||||
GlobalPreservedAnnotations: globalPreservedAnnotations,
|
||||
GlobalPreservedLabels: globalPreservedLabels,
|
||||
Metrics: &metrics,
|
||||
MaxResourcesStatusCount: maxResourcesStatusCount,
|
||||
}).SetupWithManager(mgr, enableProgressiveSyncs, maxConcurrentReconciliations); err != nil {
|
||||
log.Error(err, "unable to create controller", "controller", "ApplicationSet")
|
||||
os.Exit(1)
|
||||
@@ -270,6 +272,7 @@ func NewCommand() *cobra.Command {
|
||||
command.Flags().StringSliceVar(&globalPreservedLabels, "preserved-labels", env.StringsFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_GLOBAL_PRESERVED_LABELS", []string{}, ","), "Sets global preserved field values for labels")
|
||||
command.Flags().IntVar(&webhookParallelism, "webhook-parallelism-limit", env.ParseNumFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_WEBHOOK_PARALLELISM_LIMIT", 50, 1, 1000), "Number of webhook requests processed concurrently")
|
||||
command.Flags().StringSliceVar(&metricsAplicationsetLabels, "metrics-applicationset-labels", []string{}, "List of Application labels that will be added to the argocd_applicationset_labels metric")
|
||||
command.Flags().IntVar(&maxResourcesStatusCount, "max-resources-status-count", env.ParseNumFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_MAX_RESOURCES_STATUS_COUNT", 0, 0, math.MaxInt), "Max number of resources stored in appset status.")
|
||||
return &command
|
||||
}
|
||||
|
||||
|
||||
117
cmd/argocd-commit-server/commands/argocd_commit_server.go
Normal file
117
cmd/argocd-commit-server/commands/argocd_commit_server.go
Normal file
@@ -0,0 +1,117 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"google.golang.org/grpc/health/grpc_health_v1"
|
||||
|
||||
cmdutil "github.com/argoproj/argo-cd/v2/cmd/util"
|
||||
"github.com/argoproj/argo-cd/v2/commitserver"
|
||||
"github.com/argoproj/argo-cd/v2/commitserver/apiclient"
|
||||
"github.com/argoproj/argo-cd/v2/commitserver/metrics"
|
||||
"github.com/argoproj/argo-cd/v2/common"
|
||||
"github.com/argoproj/argo-cd/v2/util/askpass"
|
||||
"github.com/argoproj/argo-cd/v2/util/cli"
|
||||
"github.com/argoproj/argo-cd/v2/util/env"
|
||||
"github.com/argoproj/argo-cd/v2/util/errors"
|
||||
"github.com/argoproj/argo-cd/v2/util/healthz"
|
||||
ioutil "github.com/argoproj/argo-cd/v2/util/io"
|
||||
)
|
||||
|
||||
// NewCommand returns a new instance of an argocd-commit-server command
|
||||
func NewCommand() *cobra.Command {
|
||||
var (
|
||||
listenHost string
|
||||
listenPort int
|
||||
metricsPort int
|
||||
metricsHost string
|
||||
)
|
||||
command := &cobra.Command{
|
||||
Use: "argocd-commit-server",
|
||||
Short: "Run Argo CD Commit Server",
|
||||
Long: "Argo CD Commit Server is an internal service which commits and pushes hydrated manifests to git. This command runs Commit Server in the foreground.",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
vers := common.GetVersion()
|
||||
vers.LogStartupInfo(
|
||||
"Argo CD Commit Server",
|
||||
map[string]any{
|
||||
"port": listenPort,
|
||||
},
|
||||
)
|
||||
|
||||
cli.SetLogFormat(cmdutil.LogFormat)
|
||||
cli.SetLogLevel(cmdutil.LogLevel)
|
||||
|
||||
metricsServer := metrics.NewMetricsServer()
|
||||
http.Handle("/metrics", metricsServer.GetHandler())
|
||||
go func() { errors.CheckError(http.ListenAndServe(fmt.Sprintf("%s:%d", metricsHost, metricsPort), nil)) }()
|
||||
|
||||
askPassServer := askpass.NewServer(askpass.CommitServerSocketPath)
|
||||
go func() { errors.CheckError(askPassServer.Run()) }()
|
||||
|
||||
server := commitserver.NewServer(askPassServer, metricsServer)
|
||||
grpc := server.CreateGRPC()
|
||||
|
||||
listener, err := net.Listen("tcp", fmt.Sprintf("%s:%d", listenHost, listenPort))
|
||||
errors.CheckError(err)
|
||||
|
||||
healthz.ServeHealthCheck(http.DefaultServeMux, func(r *http.Request) error {
|
||||
if val, ok := r.URL.Query()["full"]; ok && len(val) > 0 && val[0] == "true" {
|
||||
// connect to itself to make sure commit server is able to serve connection
|
||||
// used by liveness probe to auto restart commit server
|
||||
conn, err := apiclient.NewConnection(fmt.Sprintf("localhost:%d", listenPort))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer ioutil.Close(conn)
|
||||
client := grpc_health_v1.NewHealthClient(conn)
|
||||
res, err := client.Check(r.Context(), &grpc_health_v1.HealthCheckRequest{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if res.Status != grpc_health_v1.HealthCheckResponse_SERVING {
|
||||
return fmt.Errorf("grpc health check status is '%v'", res.Status)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
// Graceful shutdown code adapted from here: https://gist.github.com/embano1/e0bf49d24f1cdd07cffad93097c04f0a
|
||||
sigCh := make(chan os.Signal, 1)
|
||||
signal.Notify(sigCh, os.Interrupt, syscall.SIGTERM)
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
s := <-sigCh
|
||||
log.Printf("got signal %v, attempting graceful shutdown", s)
|
||||
grpc.GracefulStop()
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
log.Println("starting grpc server")
|
||||
err = grpc.Serve(listener)
|
||||
errors.CheckError(err)
|
||||
wg.Wait()
|
||||
log.Println("clean shutdown")
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
command.Flags().StringVar(&cmdutil.LogFormat, "logformat", env.StringFromEnv("ARGOCD_COMMIT_SERVER_LOGFORMAT", "text"), "Set the logging format. One of: text|json")
|
||||
command.Flags().StringVar(&cmdutil.LogLevel, "loglevel", env.StringFromEnv("ARGOCD_COMMIT_SERVER_LOGLEVEL", "info"), "Set the logging level. One of: debug|info|warn|error")
|
||||
command.Flags().StringVar(&listenHost, "address", env.StringFromEnv("ARGOCD_COMMIT_SERVER_LISTEN_ADDRESS", common.DefaultAddressCommitServer), "Listen on given address for incoming connections")
|
||||
command.Flags().IntVar(&listenPort, "port", common.DefaultPortCommitServer, "Listen on given port for incoming connections")
|
||||
command.Flags().StringVar(&metricsHost, "metrics-address", env.StringFromEnv("ARGOCD_COMMIT_SERVER_METRICS_LISTEN_ADDRESS", common.DefaultAddressCommitServerMetrics), "Listen on given address for metrics")
|
||||
command.Flags().IntVar(&metricsPort, "metrics-port", common.DefaultPortCommitServerMetrics, "Start metrics server on given port")
|
||||
|
||||
return command
|
||||
}
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/reposerver/askpass"
|
||||
"github.com/argoproj/argo-cd/v2/util/askpass"
|
||||
"github.com/argoproj/argo-cd/v2/util/errors"
|
||||
grpc_util "github.com/argoproj/argo-cd/v2/util/grpc"
|
||||
"github.com/argoproj/argo-cd/v2/util/io"
|
||||
|
||||
@@ -23,10 +23,10 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/common"
|
||||
"github.com/argoproj/argo-cd/v2/reposerver"
|
||||
"github.com/argoproj/argo-cd/v2/reposerver/apiclient"
|
||||
"github.com/argoproj/argo-cd/v2/reposerver/askpass"
|
||||
reposervercache "github.com/argoproj/argo-cd/v2/reposerver/cache"
|
||||
"github.com/argoproj/argo-cd/v2/reposerver/metrics"
|
||||
"github.com/argoproj/argo-cd/v2/reposerver/repository"
|
||||
"github.com/argoproj/argo-cd/v2/util/askpass"
|
||||
cacheutil "github.com/argoproj/argo-cd/v2/util/cache"
|
||||
"github.com/argoproj/argo-cd/v2/util/cli"
|
||||
"github.com/argoproj/argo-cd/v2/util/env"
|
||||
|
||||
@@ -87,6 +87,7 @@ func NewCommand() *cobra.Command {
|
||||
applicationNamespaces []string
|
||||
enableProxyExtension bool
|
||||
webhookParallelism int
|
||||
hydratorEnabled bool
|
||||
|
||||
// ApplicationSet
|
||||
enableNewGitFileGlobbing bool
|
||||
@@ -243,6 +244,7 @@ func NewCommand() *cobra.Command {
|
||||
EnableProxyExtension: enableProxyExtension,
|
||||
WebhookParallelism: webhookParallelism,
|
||||
EnableK8sEvent: enableK8sEvent,
|
||||
HydratorEnabled: hydratorEnabled,
|
||||
}
|
||||
|
||||
appsetOpts := server.ApplicationSetOpts{
|
||||
@@ -321,6 +323,7 @@ func NewCommand() *cobra.Command {
|
||||
command.Flags().BoolVar(&enableProxyExtension, "enable-proxy-extension", env.ParseBoolFromEnv("ARGOCD_SERVER_ENABLE_PROXY_EXTENSION", false), "Enable Proxy Extension feature")
|
||||
command.Flags().IntVar(&webhookParallelism, "webhook-parallelism-limit", env.ParseNumFromEnv("ARGOCD_SERVER_WEBHOOK_PARALLELISM_LIMIT", 50, 1, 1000), "Number of webhook requests processed concurrently")
|
||||
command.Flags().StringSliceVar(&enableK8sEvent, "enable-k8s-event", env.StringsFromEnv("ARGOCD_ENABLE_K8S_EVENT", argo.DefaultEnableEventList(), ","), "Enable ArgoCD to use k8s event. For disabling all events, set the value as `none`. (e.g --enable-k8s-event=none), For enabling specific events, set the value as `event reason`. (e.g --enable-k8s-event=StatusRefreshed,ResourceCreated)")
|
||||
command.Flags().BoolVar(&hydratorEnabled, "hydrator-enabled", env.ParseBoolFromEnv("ARGOCD_HYDRATOR_ENABLED", false), "Feature flag to enable Hydrator. Default (\"false\")")
|
||||
|
||||
// Flags related to the applicationSet component.
|
||||
command.Flags().StringVar(&scmRootCAPath, "appset-scm-root-ca-path", env.StringFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_SCM_ROOT_CA_PATH", ""), "Provide Root CA Path for self-signed TLS Certificates")
|
||||
|
||||
@@ -16,12 +16,13 @@ import (
|
||||
"golang.org/x/term"
|
||||
"sigs.k8s.io/yaml"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/util/rbac"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/cmd/argocd/commands/headless"
|
||||
"github.com/argoproj/argo-cd/v2/cmd/argocd/commands/utils"
|
||||
argocdclient "github.com/argoproj/argo-cd/v2/pkg/apiclient"
|
||||
accountpkg "github.com/argoproj/argo-cd/v2/pkg/apiclient/account"
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apiclient/session"
|
||||
"github.com/argoproj/argo-cd/v2/server/rbacpolicy"
|
||||
"github.com/argoproj/argo-cd/v2/util/cli"
|
||||
"github.com/argoproj/argo-cd/v2/util/errors"
|
||||
"github.com/argoproj/argo-cd/v2/util/io"
|
||||
@@ -218,7 +219,7 @@ argocd account can-i create clusters '*'
|
||||
|
||||
Actions: %v
|
||||
Resources: %v
|
||||
`, rbacpolicy.Actions, rbacpolicy.Resources),
|
||||
`, rbac.Actions, rbac.Resources),
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
ctx := c.Context()
|
||||
|
||||
@@ -262,7 +263,7 @@ func NewAccountListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comman
|
||||
Use: "list",
|
||||
Short: "List accounts",
|
||||
Example: "argocd account list",
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
Run: func(c *cobra.Command, _ []string) {
|
||||
ctx := c.Context()
|
||||
|
||||
conn, client := headless.NewClientOrDie(clientOpts, c).NewAccountClientOrDie()
|
||||
@@ -309,7 +310,7 @@ argocd account get
|
||||
|
||||
# Get details for an account by name
|
||||
argocd account get --account <account-name>`,
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
Run: func(c *cobra.Command, _ []string) {
|
||||
ctx := c.Context()
|
||||
|
||||
clientset := headless.NewClientOrDie(clientOpts, c)
|
||||
@@ -358,7 +359,7 @@ func printAccountDetails(acc *accountpkg.Account) {
|
||||
expiresAt := time.Unix(t.ExpiresAt, 0)
|
||||
expiresAtFormatted = expiresAt.Format(time.RFC3339)
|
||||
if expiresAt.Before(time.Now()) {
|
||||
expiresAtFormatted = fmt.Sprintf("%s (expired)", expiresAtFormatted)
|
||||
expiresAtFormatted = expiresAtFormatted + " (expired)"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -382,7 +383,7 @@ argocd account generate-token
|
||||
|
||||
# Generate token for the account with the specified name
|
||||
argocd account generate-token --account <account-name>`,
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
Run: func(c *cobra.Command, _ []string) {
|
||||
ctx := c.Context()
|
||||
|
||||
clientset := headless.NewClientOrDie(clientOpts, c)
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
"github.com/spf13/cobra"
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -402,7 +403,26 @@ func reconcileApplications(
|
||||
)
|
||||
|
||||
appStateManager := controller.NewAppStateManager(
|
||||
argoDB, appClientset, repoServerClient, namespace, kubeutil.NewKubectl(), settingsMgr, stateCache, projInformer, server, cache, time.Second, argo.NewResourceTracking(), false, 0, serverSideDiff, ignoreNormalizerOpts)
|
||||
argoDB,
|
||||
appClientset,
|
||||
repoServerClient,
|
||||
namespace,
|
||||
kubeutil.NewKubectl(),
|
||||
func(_ string) (kube.CleanupFunc, error) {
|
||||
return func() {}, nil
|
||||
},
|
||||
settingsMgr,
|
||||
stateCache,
|
||||
projInformer,
|
||||
server,
|
||||
cache,
|
||||
time.Second,
|
||||
argo.NewResourceTracking(),
|
||||
false,
|
||||
0,
|
||||
serverSideDiff,
|
||||
ignoreNormalizerOpts,
|
||||
)
|
||||
|
||||
appsList, err := appClientset.ArgoprojV1alpha1().Applications(namespace).List(ctx, v1.ListOptions{LabelSelector: selector})
|
||||
if err != nil {
|
||||
|
||||
@@ -91,7 +91,7 @@ func TestGetReconcileResults_Refresh(t *testing.T) {
|
||||
|
||||
appClientset := appfake.NewSimpleClientset(app, proj)
|
||||
deployment := test.NewDeployment()
|
||||
kubeClientset := kubefake.NewSimpleClientset(deployment, &cm)
|
||||
kubeClientset := kubefake.NewClientset(deployment, &cm)
|
||||
clusterCache := clustermocks.ClusterCache{}
|
||||
clusterCache.On("IsNamespaced", mock.Anything).Return(true, nil)
|
||||
clusterCache.On("GetGVKParser", mock.Anything).Return(nil)
|
||||
|
||||
@@ -183,13 +183,12 @@ func getControllerReplicas(ctx context.Context, kubeClient *kubernetes.Clientset
|
||||
|
||||
func NewClusterShardsCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
var (
|
||||
shard int
|
||||
replicas int
|
||||
shardingAlgorithm string
|
||||
clientConfig clientcmd.ClientConfig
|
||||
cacheSrc func() (*appstatecache.Cache, error)
|
||||
portForwardRedis bool
|
||||
redisCompressionStr string
|
||||
shard int
|
||||
replicas int
|
||||
shardingAlgorithm string
|
||||
clientConfig clientcmd.ClientConfig
|
||||
cacheSrc func() (*appstatecache.Cache, error)
|
||||
portForwardRedis bool
|
||||
)
|
||||
command := cobra.Command{
|
||||
Use: "shards",
|
||||
@@ -213,7 +212,7 @@ func NewClusterShardsCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comm
|
||||
if replicas == 0 {
|
||||
return
|
||||
}
|
||||
clusters, err := loadClusters(ctx, kubeClient, appClient, replicas, shardingAlgorithm, namespace, portForwardRedis, cacheSrc, shard, clientOpts.RedisName, clientOpts.RedisHaProxyName, redisCompressionStr)
|
||||
clusters, err := loadClusters(ctx, kubeClient, appClient, replicas, shardingAlgorithm, namespace, portForwardRedis, cacheSrc, shard, clientOpts.RedisName, clientOpts.RedisHaProxyName, clientOpts.RedisCompression)
|
||||
errors.CheckError(err)
|
||||
if len(clusters) == 0 {
|
||||
return
|
||||
@@ -234,7 +233,6 @@ func NewClusterShardsCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comm
|
||||
// we can ignore unchecked error here as the command will be parsed again and checked when command.Execute() is run later
|
||||
// nolint:errcheck
|
||||
command.ParseFlags(os.Args[1:])
|
||||
redisCompressionStr, _ = command.Flags().GetString(cacheutil.CLIFlagRedisCompress)
|
||||
return &command
|
||||
}
|
||||
|
||||
@@ -466,13 +464,12 @@ func NewClusterDisableNamespacedMode() *cobra.Command {
|
||||
|
||||
func NewClusterStatsCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
var (
|
||||
shard int
|
||||
replicas int
|
||||
shardingAlgorithm string
|
||||
clientConfig clientcmd.ClientConfig
|
||||
cacheSrc func() (*appstatecache.Cache, error)
|
||||
portForwardRedis bool
|
||||
redisCompressionStr string
|
||||
shard int
|
||||
replicas int
|
||||
shardingAlgorithm string
|
||||
clientConfig clientcmd.ClientConfig
|
||||
cacheSrc func() (*appstatecache.Cache, error)
|
||||
portForwardRedis bool
|
||||
)
|
||||
command := cobra.Command{
|
||||
Use: "stats",
|
||||
@@ -502,7 +499,7 @@ argocd admin cluster stats target-cluster`,
|
||||
replicas, err = getControllerReplicas(ctx, kubeClient, namespace, clientOpts.AppControllerName)
|
||||
errors.CheckError(err)
|
||||
}
|
||||
clusters, err := loadClusters(ctx, kubeClient, appClient, replicas, shardingAlgorithm, namespace, portForwardRedis, cacheSrc, shard, clientOpts.RedisName, clientOpts.RedisHaProxyName, redisCompressionStr)
|
||||
clusters, err := loadClusters(ctx, kubeClient, appClient, replicas, shardingAlgorithm, namespace, portForwardRedis, cacheSrc, shard, clientOpts.RedisName, clientOpts.RedisHaProxyName, clientOpts.RedisCompression)
|
||||
errors.CheckError(err)
|
||||
|
||||
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
|
||||
@@ -524,7 +521,6 @@ argocd admin cluster stats target-cluster`,
|
||||
// we can ignore unchecked error here as the command will be parsed again and checked when command.Execute() is run later
|
||||
// nolint:errcheck
|
||||
command.ParseFlags(os.Args[1:])
|
||||
redisCompressionStr, _ = command.Flags().GetString(cacheutil.CLIFlagRedisCompress)
|
||||
return &command
|
||||
}
|
||||
|
||||
@@ -617,7 +613,7 @@ func NewGenClusterConfigCommand(pathOpts *clientcmd.PathOptions) *cobra.Command
|
||||
clientConfig := clientcmd.NewDefaultClientConfig(*cfgAccess, &overrides)
|
||||
conf, err := clientConfig.ClientConfig()
|
||||
errors.CheckError(err)
|
||||
kubeClientset := fake.NewSimpleClientset()
|
||||
kubeClientset := fake.NewClientset()
|
||||
|
||||
var awsAuthConf *v1alpha1.AWSAuthConfig
|
||||
var execProviderConf *v1alpha1.ExecProviderConfig
|
||||
|
||||
@@ -12,17 +12,14 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/cmd/argocd/commands/initialize"
|
||||
"github.com/argoproj/argo-cd/v2/common"
|
||||
argocdclient "github.com/argoproj/argo-cd/v2/pkg/apiclient"
|
||||
"github.com/argoproj/argo-cd/v2/util/cache"
|
||||
"github.com/argoproj/argo-cd/v2/util/env"
|
||||
"github.com/argoproj/argo-cd/v2/util/errors"
|
||||
)
|
||||
|
||||
func NewDashboardCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
var (
|
||||
port int
|
||||
address string
|
||||
compressionStr string
|
||||
clientConfig clientcmd.ClientConfig
|
||||
port int
|
||||
address string
|
||||
clientConfig clientcmd.ClientConfig
|
||||
)
|
||||
cmd := &cobra.Command{
|
||||
Use: "dashboard",
|
||||
@@ -30,10 +27,8 @@ func NewDashboardCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
ctx := cmd.Context()
|
||||
|
||||
compression, err := cache.CompressionTypeFromString(compressionStr)
|
||||
errors.CheckError(err)
|
||||
clientOpts.Core = true
|
||||
errors.CheckError(headless.MaybeStartLocalServer(ctx, clientOpts, initialize.RetrieveContextIfChanged(cmd.Flag("context")), &port, &address, compression, clientConfig))
|
||||
errors.CheckError(headless.MaybeStartLocalServer(ctx, clientOpts, initialize.RetrieveContextIfChanged(cmd.Flag("context")), &port, &address, clientConfig))
|
||||
println(fmt.Sprintf("Argo CD UI is available at http://%s:%d", address, port))
|
||||
<-ctx.Done()
|
||||
},
|
||||
@@ -50,6 +45,5 @@ $ argocd admin dashboard --redis-compress gzip
|
||||
clientConfig = cli.AddKubectlFlagsToSet(cmd.Flags())
|
||||
cmd.Flags().IntVar(&port, "port", common.DefaultPortAPIServer, "Listen on given port")
|
||||
cmd.Flags().StringVar(&address, "address", common.DefaultAddressAdminDashboard, "Listen on given address")
|
||||
cmd.Flags().StringVar(&compressionStr, "redis-compress", env.StringFromEnv("REDIS_COMPRESSION", string(cache.RedisCompressionGZip)), "Enable this if the application controller is configured with redis compression enabled. (possible values: gzip, none)")
|
||||
return cmd
|
||||
}
|
||||
|
||||
@@ -150,7 +150,7 @@ func NewGenRepoSpecCommand() *cobra.Command {
|
||||
},
|
||||
},
|
||||
}
|
||||
kubeClientset := fake.NewSimpleClientset(argoCDCM)
|
||||
kubeClientset := fake.NewClientset(argoCDCM)
|
||||
settingsMgr := settings.NewSettingsManager(ctx, kubeClientset, ArgoCDNamespace)
|
||||
argoDB := db.NewDB(ArgoCDNamespace, settingsMgr, kubeClientset)
|
||||
|
||||
|
||||
@@ -119,7 +119,7 @@ func (opts *settingsOpts) createSettingsManager(ctx context.Context) (*settings.
|
||||
}
|
||||
}
|
||||
setSettingsMeta(argocdSecret)
|
||||
clientset := fake.NewSimpleClientset(argocdSecret, argocdCM)
|
||||
clientset := fake.NewClientset(argocdSecret, argocdCM)
|
||||
|
||||
manager := settings.NewSettingsManager(ctx, clientset, "default")
|
||||
errors.CheckError(manager.ResyncInformers())
|
||||
|
||||
@@ -9,13 +9,12 @@ import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"sigs.k8s.io/yaml"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/common"
|
||||
"github.com/argoproj/argo-cd/v2/server/rbacpolicy"
|
||||
"github.com/argoproj/argo-cd/v2/util/assets"
|
||||
"github.com/argoproj/argo-cd/v2/util/cli"
|
||||
"github.com/argoproj/argo-cd/v2/util/errors"
|
||||
@@ -28,94 +27,85 @@ type rbacTrait struct {
|
||||
allowPath bool
|
||||
}
|
||||
|
||||
// Provide a mapping of short-hand resource names to their RBAC counterparts
|
||||
// Provide a mapping of shorthand resource names to their RBAC counterparts
|
||||
var resourceMap = map[string]string{
|
||||
"account": rbacpolicy.ResourceAccounts,
|
||||
"app": rbacpolicy.ResourceApplications,
|
||||
"apps": rbacpolicy.ResourceApplications,
|
||||
"application": rbacpolicy.ResourceApplications,
|
||||
"applicationsets": rbacpolicy.ResourceApplicationSets,
|
||||
"cert": rbacpolicy.ResourceCertificates,
|
||||
"certs": rbacpolicy.ResourceCertificates,
|
||||
"certificate": rbacpolicy.ResourceCertificates,
|
||||
"cluster": rbacpolicy.ResourceClusters,
|
||||
"extension": rbacpolicy.ResourceExtensions,
|
||||
"gpgkey": rbacpolicy.ResourceGPGKeys,
|
||||
"key": rbacpolicy.ResourceGPGKeys,
|
||||
"log": rbacpolicy.ResourceLogs,
|
||||
"logs": rbacpolicy.ResourceLogs,
|
||||
"exec": rbacpolicy.ResourceExec,
|
||||
"proj": rbacpolicy.ResourceProjects,
|
||||
"projs": rbacpolicy.ResourceProjects,
|
||||
"project": rbacpolicy.ResourceProjects,
|
||||
"repo": rbacpolicy.ResourceRepositories,
|
||||
"repos": rbacpolicy.ResourceRepositories,
|
||||
"repository": rbacpolicy.ResourceRepositories,
|
||||
}
|
||||
|
||||
var projectScoped = map[string]bool{
|
||||
rbacpolicy.ResourceApplications: true,
|
||||
rbacpolicy.ResourceApplicationSets: true,
|
||||
rbacpolicy.ResourceLogs: true,
|
||||
rbacpolicy.ResourceExec: true,
|
||||
rbacpolicy.ResourceClusters: true,
|
||||
rbacpolicy.ResourceRepositories: true,
|
||||
"account": rbac.ResourceAccounts,
|
||||
"app": rbac.ResourceApplications,
|
||||
"apps": rbac.ResourceApplications,
|
||||
"application": rbac.ResourceApplications,
|
||||
"applicationsets": rbac.ResourceApplicationSets,
|
||||
"cert": rbac.ResourceCertificates,
|
||||
"certs": rbac.ResourceCertificates,
|
||||
"certificate": rbac.ResourceCertificates,
|
||||
"cluster": rbac.ResourceClusters,
|
||||
"extension": rbac.ResourceExtensions,
|
||||
"gpgkey": rbac.ResourceGPGKeys,
|
||||
"key": rbac.ResourceGPGKeys,
|
||||
"log": rbac.ResourceLogs,
|
||||
"logs": rbac.ResourceLogs,
|
||||
"exec": rbac.ResourceExec,
|
||||
"proj": rbac.ResourceProjects,
|
||||
"projs": rbac.ResourceProjects,
|
||||
"project": rbac.ResourceProjects,
|
||||
"repo": rbac.ResourceRepositories,
|
||||
"repos": rbac.ResourceRepositories,
|
||||
"repository": rbac.ResourceRepositories,
|
||||
}
|
||||
|
||||
// List of allowed RBAC resources
|
||||
var validRBACResourcesActions = map[string]actionTraitMap{
|
||||
rbacpolicy.ResourceAccounts: accountsActions,
|
||||
rbacpolicy.ResourceApplications: applicationsActions,
|
||||
rbacpolicy.ResourceApplicationSets: defaultCRUDActions,
|
||||
rbacpolicy.ResourceCertificates: defaultCRDActions,
|
||||
rbacpolicy.ResourceClusters: defaultCRUDActions,
|
||||
rbacpolicy.ResourceExtensions: extensionActions,
|
||||
rbacpolicy.ResourceGPGKeys: defaultCRDActions,
|
||||
rbacpolicy.ResourceLogs: logsActions,
|
||||
rbacpolicy.ResourceExec: execActions,
|
||||
rbacpolicy.ResourceProjects: defaultCRUDActions,
|
||||
rbacpolicy.ResourceRepositories: defaultCRUDActions,
|
||||
rbac.ResourceAccounts: accountsActions,
|
||||
rbac.ResourceApplications: applicationsActions,
|
||||
rbac.ResourceApplicationSets: defaultCRUDActions,
|
||||
rbac.ResourceCertificates: defaultCRDActions,
|
||||
rbac.ResourceClusters: defaultCRUDActions,
|
||||
rbac.ResourceExtensions: extensionActions,
|
||||
rbac.ResourceGPGKeys: defaultCRDActions,
|
||||
rbac.ResourceLogs: logsActions,
|
||||
rbac.ResourceExec: execActions,
|
||||
rbac.ResourceProjects: defaultCRUDActions,
|
||||
rbac.ResourceRepositories: defaultCRUDActions,
|
||||
}
|
||||
|
||||
// List of allowed RBAC actions
|
||||
var defaultCRUDActions = actionTraitMap{
|
||||
rbacpolicy.ActionCreate: rbacTrait{},
|
||||
rbacpolicy.ActionGet: rbacTrait{},
|
||||
rbacpolicy.ActionUpdate: rbacTrait{},
|
||||
rbacpolicy.ActionDelete: rbacTrait{},
|
||||
rbac.ActionCreate: rbacTrait{},
|
||||
rbac.ActionGet: rbacTrait{},
|
||||
rbac.ActionUpdate: rbacTrait{},
|
||||
rbac.ActionDelete: rbacTrait{},
|
||||
}
|
||||
|
||||
var defaultCRDActions = actionTraitMap{
|
||||
rbacpolicy.ActionCreate: rbacTrait{},
|
||||
rbacpolicy.ActionGet: rbacTrait{},
|
||||
rbacpolicy.ActionDelete: rbacTrait{},
|
||||
rbac.ActionCreate: rbacTrait{},
|
||||
rbac.ActionGet: rbacTrait{},
|
||||
rbac.ActionDelete: rbacTrait{},
|
||||
}
|
||||
|
||||
var applicationsActions = actionTraitMap{
|
||||
rbacpolicy.ActionCreate: rbacTrait{},
|
||||
rbacpolicy.ActionGet: rbacTrait{},
|
||||
rbacpolicy.ActionUpdate: rbacTrait{allowPath: true},
|
||||
rbacpolicy.ActionDelete: rbacTrait{allowPath: true},
|
||||
rbacpolicy.ActionAction: rbacTrait{allowPath: true},
|
||||
rbacpolicy.ActionOverride: rbacTrait{},
|
||||
rbacpolicy.ActionSync: rbacTrait{},
|
||||
rbac.ActionCreate: rbacTrait{},
|
||||
rbac.ActionGet: rbacTrait{},
|
||||
rbac.ActionUpdate: rbacTrait{allowPath: true},
|
||||
rbac.ActionDelete: rbacTrait{allowPath: true},
|
||||
rbac.ActionAction: rbacTrait{allowPath: true},
|
||||
rbac.ActionOverride: rbacTrait{},
|
||||
rbac.ActionSync: rbacTrait{},
|
||||
}
|
||||
|
||||
var accountsActions = actionTraitMap{
|
||||
rbacpolicy.ActionCreate: rbacTrait{},
|
||||
rbacpolicy.ActionUpdate: rbacTrait{},
|
||||
rbac.ActionCreate: rbacTrait{},
|
||||
rbac.ActionUpdate: rbacTrait{},
|
||||
}
|
||||
|
||||
var execActions = actionTraitMap{
|
||||
rbacpolicy.ActionCreate: rbacTrait{},
|
||||
rbac.ActionCreate: rbacTrait{},
|
||||
}
|
||||
|
||||
var logsActions = actionTraitMap{
|
||||
rbacpolicy.ActionGet: rbacTrait{},
|
||||
rbac.ActionGet: rbacTrait{},
|
||||
}
|
||||
|
||||
var extensionActions = actionTraitMap{
|
||||
rbacpolicy.ActionInvoke: rbacTrait{},
|
||||
rbac.ActionInvoke: rbacTrait{},
|
||||
}
|
||||
|
||||
// NewRBACCommand is the command for 'rbac'
|
||||
@@ -226,7 +216,7 @@ argocd admin settings rbac can someuser create application 'default/app' --defau
|
||||
// even if there is no explicit RBAC allow, or if there is an explicit RBAC deny)
|
||||
var isLogRbacEnforced func() bool
|
||||
if nsOverride && policyFile == "" {
|
||||
if resolveRBACResourceName(resource) == rbacpolicy.ResourceLogs {
|
||||
if resolveRBACResourceName(resource) == rbac.ResourceLogs {
|
||||
isLogRbacEnforced = func() bool {
|
||||
if opts, ok := cmdCtx.(*settingsOpts); ok {
|
||||
opts.loadClusterSettings = true
|
||||
@@ -248,12 +238,11 @@ argocd admin settings rbac can someuser create application 'default/app' --defau
|
||||
fmt.Println("Yes")
|
||||
}
|
||||
os.Exit(0)
|
||||
} else {
|
||||
if !quiet {
|
||||
fmt.Println("No")
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
||||
if !quiet {
|
||||
fmt.Println("No")
|
||||
}
|
||||
os.Exit(1)
|
||||
},
|
||||
}
|
||||
clientConfig = cli.AddKubectlFlagsToCmd(command)
|
||||
@@ -321,13 +310,11 @@ argocd admin settings rbac validate --namespace argocd
|
||||
if err := rbac.ValidatePolicy(userPolicy); err == nil {
|
||||
fmt.Printf("Policy is valid.\n")
|
||||
os.Exit(0)
|
||||
} else {
|
||||
fmt.Printf("Policy is invalid: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
} else {
|
||||
log.Fatalf("Policy is empty or could not be loaded.")
|
||||
fmt.Printf("Policy is invalid: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
log.Fatalf("Policy is empty or could not be loaded.")
|
||||
},
|
||||
}
|
||||
clientConfig = cli.AddKubectlFlagsToCmd(command)
|
||||
@@ -402,7 +389,7 @@ func getPolicyFromConfigMap(cm *corev1.ConfigMap) (string, string, string) {
|
||||
|
||||
// getPolicyConfigMap fetches the RBAC config map from K8s cluster
|
||||
func getPolicyConfigMap(ctx context.Context, client kubernetes.Interface, namespace string) (*corev1.ConfigMap, error) {
|
||||
cm, err := client.CoreV1().ConfigMaps(namespace).Get(ctx, common.ArgoCDRBACConfigMapName, v1.GetOptions{})
|
||||
cm, err := client.CoreV1().ConfigMaps(namespace).Get(ctx, common.ArgoCDRBACConfigMapName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -448,12 +435,12 @@ func checkPolicy(subject, action, resource, subResource, builtinPolicy, userPoli
|
||||
// Some project scoped resources have a special notation - for simplicity's sake,
|
||||
// if user gives no sub-resource (or specifies simple '*'), we construct
|
||||
// the required notation by setting subresource to '*/*'.
|
||||
if projectScoped[realResource] {
|
||||
if rbac.ProjectScoped[realResource] {
|
||||
if subResource == "*" || subResource == "" {
|
||||
subResource = "*/*"
|
||||
}
|
||||
}
|
||||
if realResource == rbacpolicy.ResourceLogs {
|
||||
if realResource == rbac.ResourceLogs {
|
||||
if isLogRbacEnforced != nil && !isLogRbacEnforced() {
|
||||
return true
|
||||
}
|
||||
@@ -466,9 +453,8 @@ func checkPolicy(subject, action, resource, subResource, builtinPolicy, userPoli
|
||||
func resolveRBACResourceName(name string) string {
|
||||
if res, ok := resourceMap[name]; ok {
|
||||
return res
|
||||
} else {
|
||||
return name
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
// validateRBACResourceAction checks whether a given resource is a valid RBAC resource.
|
||||
|
||||
@@ -7,14 +7,15 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/server/rbacpolicy"
|
||||
"github.com/argoproj/argo-cd/v2/util/rbac"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/util/assets"
|
||||
)
|
||||
|
||||
@@ -56,8 +57,8 @@ func Test_validateRBACResourceAction(t *testing.T) {
|
||||
{
|
||||
name: "Test valid resource and action",
|
||||
args: args{
|
||||
resource: rbacpolicy.ResourceApplications,
|
||||
action: rbacpolicy.ActionCreate,
|
||||
resource: rbac.ResourceApplications,
|
||||
action: rbac.ActionCreate,
|
||||
},
|
||||
valid: true,
|
||||
},
|
||||
@@ -71,7 +72,7 @@ func Test_validateRBACResourceAction(t *testing.T) {
|
||||
{
|
||||
name: "Test invalid action",
|
||||
args: args{
|
||||
resource: rbacpolicy.ResourceApplications,
|
||||
resource: rbac.ResourceApplications,
|
||||
action: "invalid",
|
||||
},
|
||||
valid: false,
|
||||
@@ -79,24 +80,24 @@ func Test_validateRBACResourceAction(t *testing.T) {
|
||||
{
|
||||
name: "Test invalid action for resource",
|
||||
args: args{
|
||||
resource: rbacpolicy.ResourceLogs,
|
||||
action: rbacpolicy.ActionCreate,
|
||||
resource: rbac.ResourceLogs,
|
||||
action: rbac.ActionCreate,
|
||||
},
|
||||
valid: false,
|
||||
},
|
||||
{
|
||||
name: "Test valid action with path",
|
||||
args: args{
|
||||
resource: rbacpolicy.ResourceApplications,
|
||||
action: rbacpolicy.ActionAction + "/apps/Deployment/restart",
|
||||
resource: rbac.ResourceApplications,
|
||||
action: rbac.ActionAction + "/apps/Deployment/restart",
|
||||
},
|
||||
valid: true,
|
||||
},
|
||||
{
|
||||
name: "Test invalid action with path",
|
||||
args: args{
|
||||
resource: rbacpolicy.ResourceApplications,
|
||||
action: rbacpolicy.ActionGet + "/apps/Deployment/restart",
|
||||
resource: rbac.ResourceApplications,
|
||||
action: rbac.ActionGet + "/apps/Deployment/restart",
|
||||
},
|
||||
valid: false,
|
||||
},
|
||||
@@ -147,7 +148,7 @@ func Test_PolicyFromK8s(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
require.NoError(t, err)
|
||||
kubeclientset := fake.NewSimpleClientset(&v1.ConfigMap{
|
||||
kubeclientset := fake.NewClientset(&corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "argocd-rbac-cm",
|
||||
Namespace: "argocd",
|
||||
@@ -280,7 +281,7 @@ p, role:user, logs, get, .*/.*, allow
|
||||
p, role:user, exec, create, .*/.*, allow
|
||||
`
|
||||
|
||||
kubeclientset := fake.NewSimpleClientset(&v1.ConfigMap{
|
||||
kubeclientset := fake.NewClientset(&corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "argocd-rbac-cm",
|
||||
Namespace: "argocd",
|
||||
|
||||
@@ -45,7 +45,7 @@ func captureStdout(callback func()) (string, error) {
|
||||
func newSettingsManager(data map[string]string) *settings.SettingsManager {
|
||||
ctx := context.Background()
|
||||
|
||||
clientset := fake.NewSimpleClientset(&v1.ConfigMap{
|
||||
clientset := fake.NewClientset(&v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: common.ArgoCDConfigMapName,
|
||||
@@ -69,7 +69,7 @@ func newSettingsManager(data map[string]string) *settings.SettingsManager {
|
||||
|
||||
type fakeCmdContext struct {
|
||||
mgr *settings.SettingsManager
|
||||
// nolint:unused,structcheck
|
||||
// nolint:unused
|
||||
out bytes.Buffer
|
||||
}
|
||||
|
||||
|
||||
@@ -112,6 +112,7 @@ type watchOpts struct {
|
||||
suspended bool
|
||||
degraded bool
|
||||
delete bool
|
||||
hydrated bool
|
||||
}
|
||||
|
||||
// NewApplicationCreateCommand returns a new instance of an `argocd app create` command
|
||||
@@ -1883,6 +1884,7 @@ func NewApplicationWaitCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
command.Flags().BoolVar(&watch.suspended, "suspended", false, "Wait for suspended")
|
||||
command.Flags().BoolVar(&watch.degraded, "degraded", false, "Wait for degraded")
|
||||
command.Flags().BoolVar(&watch.delete, "delete", false, "Wait for delete")
|
||||
command.Flags().BoolVar(&watch.hydrated, "hydrated", false, "Wait for hydration operations")
|
||||
command.Flags().StringVarP(&selector, "selector", "l", "", "Wait for apps by label. Supports '=', '==', '!=', in, notin, exists & not exists. Matching apps must satisfy all of the specified label constraints.")
|
||||
command.Flags().StringArrayVar(&resources, "resource", []string{}, fmt.Sprintf("Sync only specific resources as GROUP%[1]sKIND%[1]sNAME or %[2]sGROUP%[1]sKIND%[1]sNAME. Fields may be blank and '*' can be used. This option may be specified repeatedly", resourceFieldDelimiter, resourceExcludeIndicator))
|
||||
command.Flags().BoolVar(&watch.operation, "operation", false, "Wait for pending operations")
|
||||
@@ -2450,7 +2452,7 @@ func groupResourceStates(app *argoappv1.Application, selectedResources []*argoap
|
||||
}
|
||||
|
||||
// check if resource health, sync and operation statuses matches watch options
|
||||
func checkResourceStatus(watch watchOpts, healthStatus string, syncStatus string, operationStatus *argoappv1.Operation) bool {
|
||||
func checkResourceStatus(watch watchOpts, healthStatus string, syncStatus string, operationStatus *argoappv1.Operation, hydrationFinished bool) bool {
|
||||
if watch.delete {
|
||||
return false
|
||||
}
|
||||
@@ -2480,7 +2482,8 @@ func checkResourceStatus(watch watchOpts, healthStatus string, syncStatus string
|
||||
|
||||
synced := !watch.sync || syncStatus == string(argoappv1.SyncStatusCodeSynced)
|
||||
operational := !watch.operation || operationStatus == nil
|
||||
return synced && healthCheckPassed && operational
|
||||
hydrated := !watch.hydrated || hydrationFinished
|
||||
return synced && healthCheckPassed && operational && hydrated
|
||||
}
|
||||
|
||||
// resourceParentChild gets the latest state of the app and the latest state of the app's resource tree and then
|
||||
@@ -2644,13 +2647,15 @@ func waitOnApplicationStatus(ctx context.Context, acdClient argocdclient.Client,
|
||||
}
|
||||
}
|
||||
|
||||
hydrationFinished := app.Status.SourceHydrator.CurrentOperation != nil && app.Status.SourceHydrator.CurrentOperation.Phase == argoappv1.HydrateOperationPhaseHydrated && app.Status.SourceHydrator.CurrentOperation.SourceHydrator.DeepEquals(app.Status.SourceHydrator.LastSuccessfulOperation.SourceHydrator) && app.Status.SourceHydrator.CurrentOperation.DrySHA == app.Status.SourceHydrator.LastSuccessfulOperation.DrySHA
|
||||
|
||||
var selectedResourcesAreReady bool
|
||||
|
||||
// If selected resources are included, wait only on those resources, otherwise wait on the application as a whole.
|
||||
if len(selectedResources) > 0 {
|
||||
selectedResourcesAreReady = true
|
||||
for _, state := range getResourceStates(app, selectedResources) {
|
||||
resourceIsReady := checkResourceStatus(watch, state.Health, state.Status, appEvent.Application.Operation)
|
||||
resourceIsReady := checkResourceStatus(watch, state.Health, state.Status, appEvent.Application.Operation, hydrationFinished)
|
||||
if !resourceIsReady {
|
||||
selectedResourcesAreReady = false
|
||||
break
|
||||
@@ -2658,7 +2663,7 @@ func waitOnApplicationStatus(ctx context.Context, acdClient argocdclient.Client,
|
||||
}
|
||||
} else {
|
||||
// Wait on the application as a whole
|
||||
selectedResourcesAreReady = checkResourceStatus(watch, string(app.Status.Health.Status), string(app.Status.Sync.Status), appEvent.Application.Operation)
|
||||
selectedResourcesAreReady = checkResourceStatus(watch, string(app.Status.Health.Status), string(app.Status.Sync.Status), appEvent.Application.Operation, hydrationFinished)
|
||||
}
|
||||
|
||||
if selectedResourcesAreReady && (!operationInProgress || !watch.operation) {
|
||||
|
||||
@@ -1705,7 +1705,7 @@ func TestCheckResourceStatus(t *testing.T) {
|
||||
suspended: true,
|
||||
health: true,
|
||||
degraded: true,
|
||||
}, string(health.HealthStatusHealthy), string(v1alpha1.SyncStatusCodeSynced), &v1alpha1.Operation{})
|
||||
}, string(health.HealthStatusHealthy), string(v1alpha1.SyncStatusCodeSynced), &v1alpha1.Operation{}, true)
|
||||
assert.True(t, res)
|
||||
})
|
||||
t.Run("Degraded, Suspended and health status failed", func(t *testing.T) {
|
||||
@@ -1713,57 +1713,57 @@ func TestCheckResourceStatus(t *testing.T) {
|
||||
suspended: true,
|
||||
health: true,
|
||||
degraded: true,
|
||||
}, string(health.HealthStatusProgressing), string(v1alpha1.SyncStatusCodeSynced), &v1alpha1.Operation{})
|
||||
}, string(health.HealthStatusProgressing), string(v1alpha1.SyncStatusCodeSynced), &v1alpha1.Operation{}, true)
|
||||
assert.False(t, res)
|
||||
})
|
||||
t.Run("Suspended and health status passed", func(t *testing.T) {
|
||||
res := checkResourceStatus(watchOpts{
|
||||
suspended: true,
|
||||
health: true,
|
||||
}, string(health.HealthStatusHealthy), string(v1alpha1.SyncStatusCodeSynced), &v1alpha1.Operation{})
|
||||
}, string(health.HealthStatusHealthy), string(v1alpha1.SyncStatusCodeSynced), &v1alpha1.Operation{}, true)
|
||||
assert.True(t, res)
|
||||
})
|
||||
t.Run("Suspended and health status failed", func(t *testing.T) {
|
||||
res := checkResourceStatus(watchOpts{
|
||||
suspended: true,
|
||||
health: true,
|
||||
}, string(health.HealthStatusProgressing), string(v1alpha1.SyncStatusCodeSynced), &v1alpha1.Operation{})
|
||||
}, string(health.HealthStatusProgressing), string(v1alpha1.SyncStatusCodeSynced), &v1alpha1.Operation{}, true)
|
||||
assert.False(t, res)
|
||||
})
|
||||
t.Run("Suspended passed", func(t *testing.T) {
|
||||
res := checkResourceStatus(watchOpts{
|
||||
suspended: true,
|
||||
health: false,
|
||||
}, string(health.HealthStatusSuspended), string(v1alpha1.SyncStatusCodeSynced), &v1alpha1.Operation{})
|
||||
}, string(health.HealthStatusSuspended), string(v1alpha1.SyncStatusCodeSynced), &v1alpha1.Operation{}, true)
|
||||
assert.True(t, res)
|
||||
})
|
||||
t.Run("Suspended failed", func(t *testing.T) {
|
||||
res := checkResourceStatus(watchOpts{
|
||||
suspended: true,
|
||||
health: false,
|
||||
}, string(health.HealthStatusProgressing), string(v1alpha1.SyncStatusCodeSynced), &v1alpha1.Operation{})
|
||||
}, string(health.HealthStatusProgressing), string(v1alpha1.SyncStatusCodeSynced), &v1alpha1.Operation{}, true)
|
||||
assert.False(t, res)
|
||||
})
|
||||
t.Run("Health passed", func(t *testing.T) {
|
||||
res := checkResourceStatus(watchOpts{
|
||||
suspended: false,
|
||||
health: true,
|
||||
}, string(health.HealthStatusHealthy), string(v1alpha1.SyncStatusCodeSynced), &v1alpha1.Operation{})
|
||||
}, string(health.HealthStatusHealthy), string(v1alpha1.SyncStatusCodeSynced), &v1alpha1.Operation{}, true)
|
||||
assert.True(t, res)
|
||||
})
|
||||
t.Run("Health failed", func(t *testing.T) {
|
||||
res := checkResourceStatus(watchOpts{
|
||||
suspended: false,
|
||||
health: true,
|
||||
}, string(health.HealthStatusProgressing), string(v1alpha1.SyncStatusCodeSynced), &v1alpha1.Operation{})
|
||||
}, string(health.HealthStatusProgressing), string(v1alpha1.SyncStatusCodeSynced), &v1alpha1.Operation{}, true)
|
||||
assert.False(t, res)
|
||||
})
|
||||
t.Run("Synced passed", func(t *testing.T) {
|
||||
res := checkResourceStatus(watchOpts{}, string(health.HealthStatusProgressing), string(v1alpha1.SyncStatusCodeSynced), &v1alpha1.Operation{})
|
||||
res := checkResourceStatus(watchOpts{}, string(health.HealthStatusProgressing), string(v1alpha1.SyncStatusCodeSynced), &v1alpha1.Operation{}, true)
|
||||
assert.True(t, res)
|
||||
})
|
||||
t.Run("Synced failed", func(t *testing.T) {
|
||||
res := checkResourceStatus(watchOpts{}, string(health.HealthStatusProgressing), string(v1alpha1.SyncStatusCodeOutOfSync), &v1alpha1.Operation{})
|
||||
res := checkResourceStatus(watchOpts{}, string(health.HealthStatusProgressing), string(v1alpha1.SyncStatusCodeOutOfSync), &v1alpha1.Operation{}, true)
|
||||
assert.True(t, res)
|
||||
})
|
||||
t.Run("Degraded passed", func(t *testing.T) {
|
||||
@@ -1771,7 +1771,7 @@ func TestCheckResourceStatus(t *testing.T) {
|
||||
suspended: false,
|
||||
health: false,
|
||||
degraded: true,
|
||||
}, string(health.HealthStatusDegraded), string(v1alpha1.SyncStatusCodeSynced), &v1alpha1.Operation{})
|
||||
}, string(health.HealthStatusDegraded), string(v1alpha1.SyncStatusCodeSynced), &v1alpha1.Operation{}, true)
|
||||
assert.True(t, res)
|
||||
})
|
||||
t.Run("Degraded failed", func(t *testing.T) {
|
||||
@@ -1779,7 +1779,7 @@ func TestCheckResourceStatus(t *testing.T) {
|
||||
suspended: false,
|
||||
health: false,
|
||||
degraded: true,
|
||||
}, string(health.HealthStatusProgressing), string(v1alpha1.SyncStatusCodeSynced), &v1alpha1.Operation{})
|
||||
}, string(health.HealthStatusProgressing), string(v1alpha1.SyncStatusCodeSynced), &v1alpha1.Operation{}, true)
|
||||
assert.False(t, res)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -14,7 +14,8 @@ import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
runtimeUtil "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/client-go/dynamic"
|
||||
@@ -128,7 +129,7 @@ func (c *forwardRepoClientset) NewRepoServerClient() (io.Closer, repoapiclient.R
|
||||
}
|
||||
repoServerName := c.repoServerName
|
||||
repoServererviceLabelSelector := common.LabelKeyComponentRepoServer + "=" + common.LabelValueComponentRepoServer
|
||||
repoServerServices, err := c.kubeClientset.CoreV1().Services(c.namespace).List(context.Background(), v1.ListOptions{LabelSelector: repoServererviceLabelSelector})
|
||||
repoServerServices, err := c.kubeClientset.CoreV1().Services(c.namespace).List(context.Background(), metaV1.ListOptions{LabelSelector: repoServererviceLabelSelector})
|
||||
if err != nil {
|
||||
c.err = err
|
||||
return
|
||||
@@ -176,7 +177,7 @@ func testAPI(ctx context.Context, clientOpts *apiclient.ClientOptions) error {
|
||||
//
|
||||
// If the clientOpts enables core mode, but the local config does not have core mode enabled, this function will
|
||||
// not start the local server.
|
||||
func MaybeStartLocalServer(ctx context.Context, clientOpts *apiclient.ClientOptions, ctxStr string, port *int, address *string, compression cache.RedisCompressionType, clientConfig clientcmd.ClientConfig) error {
|
||||
func MaybeStartLocalServer(ctx context.Context, clientOpts *apiclient.ClientOptions, ctxStr string, port *int, address *string, clientConfig clientcmd.ClientConfig) error {
|
||||
if clientConfig == nil {
|
||||
flags := pflag.NewFlagSet("tmp", pflag.ContinueOnError)
|
||||
clientConfig = cli.AddKubectlFlagsToSet(flags)
|
||||
@@ -243,6 +244,10 @@ func MaybeStartLocalServer(ctx context.Context, clientOpts *apiclient.ClientOpti
|
||||
if err != nil {
|
||||
return fmt.Errorf("error adding argo resources to scheme: %w", err)
|
||||
}
|
||||
err = corev1.AddToScheme(scheme)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error adding corev1 resources to scheme: %w", err)
|
||||
}
|
||||
controllerClientset, err := client.New(restConfig, client.Options{
|
||||
Scheme: scheme,
|
||||
})
|
||||
@@ -265,7 +270,7 @@ func MaybeStartLocalServer(ctx context.Context, clientOpts *apiclient.ClientOpti
|
||||
log.Warnf("Failed to fetch & set redis password for namespace %s: %v", namespace, err)
|
||||
}
|
||||
|
||||
appstateCache := appstatecache.NewCache(cache.NewCache(&forwardCacheClient{namespace: namespace, context: ctxStr, compression: compression, redisHaProxyName: clientOpts.RedisHaProxyName, redisName: clientOpts.RedisName, redisPassword: redisOptions.Password}), time.Hour)
|
||||
appstateCache := appstatecache.NewCache(cache.NewCache(&forwardCacheClient{namespace: namespace, context: ctxStr, compression: cache.RedisCompressionType(clientOpts.RedisCompression), redisHaProxyName: clientOpts.RedisHaProxyName, redisName: clientOpts.RedisName, redisPassword: redisOptions.Password}), time.Hour)
|
||||
srv := server.NewServer(ctx, server.ArgoCDServerOpts{
|
||||
EnableGZip: false,
|
||||
Namespace: namespace,
|
||||
@@ -316,7 +321,7 @@ func NewClientOrDie(opts *apiclient.ClientOptions, c *cobra.Command) apiclient.C
|
||||
ctxStr := initialize.RetrieveContextIfChanged(c.Flag("context"))
|
||||
// If we're in core mode, start the API server on the fly and configure the client `opts` to use it.
|
||||
// If we're not in core mode, this function call will do nothing.
|
||||
err := MaybeStartLocalServer(ctx, opts, ctxStr, nil, nil, cache.RedisCompressionNone, nil)
|
||||
err := MaybeStartLocalServer(ctx, opts, ctxStr, nil, nil, nil)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
cmdutil "github.com/argoproj/argo-cd/v2/cmd/util"
|
||||
"github.com/argoproj/argo-cd/v2/common"
|
||||
argocdclient "github.com/argoproj/argo-cd/v2/pkg/apiclient"
|
||||
"github.com/argoproj/argo-cd/v2/util/cache"
|
||||
"github.com/argoproj/argo-cd/v2/util/cli"
|
||||
"github.com/argoproj/argo-cd/v2/util/config"
|
||||
"github.com/argoproj/argo-cd/v2/util/env"
|
||||
@@ -87,6 +88,7 @@ func NewCommand() *cobra.Command {
|
||||
command.PersistentFlags().StringVar(&clientOpts.RedisHaProxyName, "redis-haproxy-name", env.StringFromEnv(common.EnvRedisHaProxyName, common.DefaultRedisHaProxyName), fmt.Sprintf("Name of the Redis HA Proxy; set this or the %s environment variable when the HA Proxy's name label differs from the default, for example when installing via the Helm chart", common.EnvRedisHaProxyName))
|
||||
command.PersistentFlags().StringVar(&clientOpts.RedisName, "redis-name", env.StringFromEnv(common.EnvRedisName, common.DefaultRedisName), fmt.Sprintf("Name of the Redis deployment; set this or the %s environment variable when the Redis's name label differs from the default, for example when installing via the Helm chart", common.EnvRedisName))
|
||||
command.PersistentFlags().StringVar(&clientOpts.RepoServerName, "repo-server-name", env.StringFromEnv(common.EnvRepoServerName, common.DefaultRepoServerName), fmt.Sprintf("Name of the Argo CD Repo server; set this or the %s environment variable when the server's name label differs from the default, for example when installing via the Helm chart", common.EnvRepoServerName))
|
||||
command.PersistentFlags().StringVar(&clientOpts.RedisCompression, "redis-compress", env.StringFromEnv("REDIS_COMPRESSION", string(cache.RedisCompressionGZip)), "Enable this if the application controller is configured with redis compression enabled. (possible values: gzip, none)")
|
||||
command.PersistentFlags().BoolVar(&clientOpts.PromptsEnabled, "prompts-enabled", localconfig.GetPromptsEnabled(true), "Force optional interactive prompts to be enabled or disabled, overriding local configuration. If not specified, the local configuration value will be used, which is false by default.")
|
||||
|
||||
clientOpts.KubeOverrides = &clientcmd.ConfigOverrides{}
|
||||
|
||||
@@ -53,7 +53,7 @@ func (p *Prompt) ConfirmBaseOnCount(messageForSingle string, messageForArray str
|
||||
}
|
||||
|
||||
if count == 1 {
|
||||
return p.Confirm(messageForSingle), true
|
||||
return p.Confirm(messageForSingle), false
|
||||
}
|
||||
|
||||
return p.ConfirmAll(messageForArray)
|
||||
|
||||
@@ -38,11 +38,47 @@ func TestConfirmBaseOnCountPromptDisabled(t *testing.T) {
|
||||
assert.True(t, result2)
|
||||
}
|
||||
|
||||
func TestConfirmBaseOnCountZeroApps(t *testing.T) {
|
||||
p := &Prompt{enabled: true}
|
||||
result1, result2 := p.ConfirmBaseOnCount("Proceed?", "Process all?", 0)
|
||||
assert.True(t, result1)
|
||||
assert.True(t, result2)
|
||||
func TestConfirmBaseOnCount(t *testing.T) {
|
||||
tests := []struct {
|
||||
input string
|
||||
output bool
|
||||
count int
|
||||
}{
|
||||
{
|
||||
input: "y\n",
|
||||
output: true,
|
||||
count: 0,
|
||||
},
|
||||
{
|
||||
input: "y\n",
|
||||
output: true,
|
||||
count: 1,
|
||||
},
|
||||
{
|
||||
input: "n\n",
|
||||
output: false,
|
||||
count: 1,
|
||||
},
|
||||
}
|
||||
|
||||
origStdin := os.Stdin
|
||||
|
||||
for _, tt := range tests {
|
||||
tmpFile, err := writeToStdin(tt.input)
|
||||
require.NoError(t, err)
|
||||
p := &Prompt{enabled: true}
|
||||
result1, result2 := p.ConfirmBaseOnCount("Proceed?", "Proceed all?", tt.count)
|
||||
assert.Equal(t, tt.output, result1)
|
||||
if tt.count == 1 {
|
||||
assert.False(t, result2)
|
||||
} else {
|
||||
assert.Equal(t, tt.output, result2)
|
||||
}
|
||||
_ = tmpFile.Close()
|
||||
os.Remove(tmpFile.Name())
|
||||
}
|
||||
|
||||
os.Stdin = origStdin
|
||||
}
|
||||
|
||||
func TestConfirmPrompt(t *testing.T) {
|
||||
@@ -62,8 +98,8 @@ func TestConfirmPrompt(t *testing.T) {
|
||||
p := &Prompt{enabled: true}
|
||||
result := p.Confirm("Are you sure you want to run this command? (y/n) \n")
|
||||
assert.Equal(t, c.output, result)
|
||||
os.Remove(tmpFile.Name())
|
||||
_ = tmpFile.Close()
|
||||
os.Remove(tmpFile.Name())
|
||||
}
|
||||
|
||||
os.Stdin = origStdin
|
||||
@@ -89,8 +125,8 @@ func TestConfirmAllPrompt(t *testing.T) {
|
||||
confirm, confirmAll := p.ConfirmAll("Are you sure you want to run this command? (y/n) \n")
|
||||
assert.Equal(t, c.confirm, confirm)
|
||||
assert.Equal(t, c.confirmAll, confirmAll)
|
||||
os.Remove(tmpFile.Name())
|
||||
_ = tmpFile.Close()
|
||||
os.Remove(tmpFile.Name())
|
||||
}
|
||||
|
||||
os.Stdin = origStdin
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
appcontroller "github.com/argoproj/argo-cd/v2/cmd/argocd-application-controller/commands"
|
||||
applicationset "github.com/argoproj/argo-cd/v2/cmd/argocd-applicationset-controller/commands"
|
||||
cmpserver "github.com/argoproj/argo-cd/v2/cmd/argocd-cmp-server/commands"
|
||||
commitserver "github.com/argoproj/argo-cd/v2/cmd/argocd-commit-server/commands"
|
||||
dex "github.com/argoproj/argo-cd/v2/cmd/argocd-dex/commands"
|
||||
gitaskpass "github.com/argoproj/argo-cd/v2/cmd/argocd-git-ask-pass/commands"
|
||||
k8sauth "github.com/argoproj/argo-cd/v2/cmd/argocd-k8s-auth/commands"
|
||||
@@ -46,6 +47,8 @@ func main() {
|
||||
case "argocd-cmp-server":
|
||||
command = cmpserver.NewCommand()
|
||||
isCLI = true
|
||||
case "argocd-commit-server":
|
||||
command = commitserver.NewCommand()
|
||||
case "argocd-dex":
|
||||
command = dex.NewCommand()
|
||||
case "argocd-notifications":
|
||||
|
||||
@@ -91,6 +91,12 @@ type AppOptions struct {
|
||||
retryBackoffFactor int64
|
||||
ref string
|
||||
SourceName string
|
||||
drySourceRepo string
|
||||
drySourceRevision string
|
||||
drySourcePath string
|
||||
syncSourceBranch string
|
||||
syncSourcePath string
|
||||
hydrateToBranch string
|
||||
}
|
||||
|
||||
// SetAutoMaxProcs sets the GOMAXPROCS value based on the binary name.
|
||||
@@ -112,6 +118,12 @@ func AddAppFlags(command *cobra.Command, opts *AppOptions) {
|
||||
command.Flags().StringVar(&opts.chart, "helm-chart", "", "Helm Chart name")
|
||||
command.Flags().StringVar(&opts.env, "env", "", "Application environment to monitor")
|
||||
command.Flags().StringVar(&opts.revision, "revision", "", "The tracking source branch, tag, commit or Helm chart version the application will sync to")
|
||||
command.Flags().StringVar(&opts.drySourceRepo, "dry-source-repo", "", "Repository URL of the app dry source")
|
||||
command.Flags().StringVar(&opts.drySourceRevision, "dry-source-revision", "", "Revision of the app dry source")
|
||||
command.Flags().StringVar(&opts.drySourcePath, "dry-source-path", "", "Path in repository to the app directory for the dry source")
|
||||
command.Flags().StringVar(&opts.syncSourceBranch, "sync-source-branch", "", "The branch from which the app will sync")
|
||||
command.Flags().StringVar(&opts.syncSourcePath, "sync-source-path", "", "The path in the repository from which the app will sync")
|
||||
command.Flags().StringVar(&opts.hydrateToBranch, "hydrate-to-branch", "", "The branch to hydrate the app to")
|
||||
command.Flags().IntVar(&opts.revisionHistoryLimit, "revision-history-limit", argoappv1.RevisionHistoryLimit, "How many items to keep in revision history")
|
||||
command.Flags().StringVar(&opts.destServer, "dest-server", "", "K8s cluster URL (e.g. https://kubernetes.default.svc)")
|
||||
command.Flags().StringVar(&opts.destName, "dest-name", "", "K8s cluster Name (e.g. minikube)")
|
||||
@@ -175,21 +187,27 @@ func SetAppSpecOptions(flags *pflag.FlagSet, spec *argoappv1.ApplicationSpec, ap
|
||||
if flags == nil {
|
||||
return visited
|
||||
}
|
||||
source := spec.GetSourcePtrByPosition(sourcePosition)
|
||||
if source == nil {
|
||||
source = &argoappv1.ApplicationSource{}
|
||||
}
|
||||
source, visited = ConstructSource(source, *appOpts, flags)
|
||||
if spec.HasMultipleSources() {
|
||||
if sourcePosition == 0 {
|
||||
spec.Sources[sourcePosition] = *source
|
||||
} else if sourcePosition > 0 {
|
||||
spec.Sources[sourcePosition-1] = *source
|
||||
} else {
|
||||
spec.Sources = append(spec.Sources, *source)
|
||||
}
|
||||
var h *argoappv1.SourceHydrator
|
||||
h, hasHydratorFlag := constructSourceHydrator(spec.SourceHydrator, *appOpts, flags)
|
||||
if hasHydratorFlag {
|
||||
spec.SourceHydrator = h
|
||||
} else {
|
||||
spec.Source = source
|
||||
source := spec.GetSourcePtrByPosition(sourcePosition)
|
||||
if source == nil {
|
||||
source = &argoappv1.ApplicationSource{}
|
||||
}
|
||||
source, visited = ConstructSource(source, *appOpts, flags)
|
||||
if spec.HasMultipleSources() {
|
||||
if sourcePosition == 0 {
|
||||
spec.Sources[sourcePosition] = *source
|
||||
} else if sourcePosition > 0 {
|
||||
spec.Sources[sourcePosition-1] = *source
|
||||
} else {
|
||||
spec.Sources = append(spec.Sources, *source)
|
||||
}
|
||||
} else {
|
||||
spec.Source = source
|
||||
}
|
||||
}
|
||||
flags.Visit(func(f *pflag.Flag) {
|
||||
visited++
|
||||
@@ -592,9 +610,7 @@ func constructAppsBaseOnName(appName string, labels, annotations, args []string,
|
||||
Name: appName,
|
||||
Namespace: appNs,
|
||||
},
|
||||
Spec: argoappv1.ApplicationSpec{
|
||||
Source: &argoappv1.ApplicationSource{},
|
||||
},
|
||||
Spec: argoappv1.ApplicationSpec{},
|
||||
}
|
||||
SetAppSpecOptions(flags, &app.Spec, &appOpts, 0)
|
||||
SetParameterOverrides(app, appOpts.Parameters, 0)
|
||||
@@ -768,6 +784,47 @@ func ConstructSource(source *argoappv1.ApplicationSource, appOpts AppOptions, fl
|
||||
return source, visited
|
||||
}
|
||||
|
||||
// constructSourceHydrator constructs a source hydrator from the command line flags. It returns the modified source
|
||||
// hydrator and a boolean indicating if any hydrator flags were set. We return instead of just modifying the source
|
||||
// hydrator in place because the given hydrator `h` might be nil. In that case, we need to create a new source hydrator
|
||||
// and return it.
|
||||
func constructSourceHydrator(h *argoappv1.SourceHydrator, appOpts AppOptions, flags *pflag.FlagSet) (*argoappv1.SourceHydrator, bool) {
|
||||
hasHydratorFlag := false
|
||||
ensureNotNil := func(notEmpty bool) {
|
||||
hasHydratorFlag = true
|
||||
if notEmpty && h == nil {
|
||||
h = &argoappv1.SourceHydrator{}
|
||||
}
|
||||
}
|
||||
flags.Visit(func(f *pflag.Flag) {
|
||||
switch f.Name {
|
||||
case "dry-source-repo":
|
||||
ensureNotNil(appOpts.drySourceRepo != "")
|
||||
h.DrySource.RepoURL = appOpts.drySourceRepo
|
||||
case "dry-source-path":
|
||||
ensureNotNil(appOpts.drySourcePath != "")
|
||||
h.DrySource.Path = appOpts.drySourcePath
|
||||
case "dry-source-revision":
|
||||
ensureNotNil(appOpts.drySourceRevision != "")
|
||||
h.DrySource.TargetRevision = appOpts.drySourceRevision
|
||||
case "sync-source-branch":
|
||||
ensureNotNil(appOpts.syncSourceBranch != "")
|
||||
h.SyncSource.TargetBranch = appOpts.syncSourceBranch
|
||||
case "sync-source-path":
|
||||
ensureNotNil(appOpts.syncSourcePath != "")
|
||||
h.SyncSource.Path = appOpts.syncSourcePath
|
||||
case "hydrate-to-branch":
|
||||
ensureNotNil(appOpts.hydrateToBranch != "")
|
||||
if appOpts.hydrateToBranch == "" {
|
||||
h.HydrateTo = nil
|
||||
} else {
|
||||
h.HydrateTo = &argoappv1.HydrateTo{TargetBranch: appOpts.hydrateToBranch}
|
||||
}
|
||||
}
|
||||
})
|
||||
return h, hasHydratorFlag
|
||||
}
|
||||
|
||||
func mergeLabels(app *argoappv1.Application, labels []string) {
|
||||
mapLabels, err := label.Parse(labels)
|
||||
errors.CheckError(err)
|
||||
|
||||
@@ -295,6 +295,28 @@ func Test_setAppSpecOptions(t *testing.T) {
|
||||
require.NoError(t, f.SetFlag("helm-api-versions", "v2"))
|
||||
assert.Equal(t, []string{"v1", "v2"}, f.spec.Source.Helm.APIVersions)
|
||||
})
|
||||
t.Run("source hydrator", func(t *testing.T) {
|
||||
require.NoError(t, f.SetFlag("dry-source-repo", "https://github.com/argoproj/argocd-example-apps"))
|
||||
assert.Equal(t, "https://github.com/argoproj/argocd-example-apps", f.spec.SourceHydrator.DrySource.RepoURL)
|
||||
|
||||
require.NoError(t, f.SetFlag("dry-source-path", "apps"))
|
||||
assert.Equal(t, "apps", f.spec.SourceHydrator.DrySource.Path)
|
||||
|
||||
require.NoError(t, f.SetFlag("dry-source-revision", "HEAD"))
|
||||
assert.Equal(t, "HEAD", f.spec.SourceHydrator.DrySource.TargetRevision)
|
||||
|
||||
require.NoError(t, f.SetFlag("sync-source-branch", "env/test"))
|
||||
assert.Equal(t, "env/test", f.spec.SourceHydrator.SyncSource.TargetBranch)
|
||||
|
||||
require.NoError(t, f.SetFlag("sync-source-path", "apps"))
|
||||
assert.Equal(t, "apps", f.spec.SourceHydrator.SyncSource.Path)
|
||||
|
||||
require.NoError(t, f.SetFlag("hydrate-to-branch", "env/test-next"))
|
||||
assert.Equal(t, "env/test-next", f.spec.SourceHydrator.HydrateTo.TargetBranch)
|
||||
|
||||
require.NoError(t, f.SetFlag("hydrate-to-branch", ""))
|
||||
assert.Nil(t, f.spec.SourceHydrator.HydrateTo)
|
||||
})
|
||||
}
|
||||
|
||||
func newMultiSourceAppOptionsFixture() *appOptionsFixture {
|
||||
|
||||
@@ -162,7 +162,7 @@ func TestGetKubePublicEndpoint(t *testing.T) {
|
||||
if tc.clusterInfo != nil {
|
||||
objects = append(objects, tc.clusterInfo)
|
||||
}
|
||||
clientset := fake.NewSimpleClientset(objects...)
|
||||
clientset := fake.NewClientset(objects...)
|
||||
endpoint, err := GetKubePublicEndpoint(clientset)
|
||||
if tc.expectError {
|
||||
require.Error(t, err)
|
||||
|
||||
@@ -18,11 +18,13 @@ func TestProjectOpts_ResourceLists(t *testing.T) {
|
||||
}
|
||||
|
||||
assert.ElementsMatch(t,
|
||||
[]v1.GroupKind{{Kind: "ConfigMap"}}, opts.GetAllowedNamespacedResources(),
|
||||
[]v1.GroupKind{{Group: "apps", Kind: "DaemonSet"}}, opts.GetDeniedNamespacedResources(),
|
||||
[]v1.GroupKind{{Group: "apiextensions.k8s.io", Kind: "CustomResourceDefinition"}}, opts.GetAllowedClusterResources(),
|
||||
[]v1.GroupKind{{Group: "rbac.authorization.k8s.io", Kind: "ClusterRole"}}, opts.GetDeniedClusterResources(),
|
||||
)
|
||||
[]v1.GroupKind{{Kind: "ConfigMap"}}, opts.GetAllowedNamespacedResources())
|
||||
assert.ElementsMatch(t,
|
||||
[]v1.GroupKind{{Group: "apps", Kind: "DaemonSet"}}, opts.GetDeniedNamespacedResources())
|
||||
assert.ElementsMatch(t,
|
||||
[]v1.GroupKind{{Group: "apiextensions.k8s.io", Kind: "CustomResourceDefinition"}}, opts.GetAllowedClusterResources())
|
||||
assert.ElementsMatch(t,
|
||||
[]v1.GroupKind{{Group: "rbac.authorization.k8s.io", Kind: "ClusterRole"}}, opts.GetDeniedClusterResources())
|
||||
}
|
||||
|
||||
func TestProjectOpts_GetDestinationServiceAccounts(t *testing.T) {
|
||||
|
||||
@@ -45,7 +45,7 @@ func AddRepoFlags(command *cobra.Command, opts *RepoOptions) {
|
||||
command.Flags().StringVar(&opts.GithubAppPrivateKeyPath, "github-app-private-key-path", "", "private key of the GitHub Application")
|
||||
command.Flags().StringVar(&opts.GitHubAppEnterpriseBaseURL, "github-app-enterprise-base-url", "", "base url to use when using GitHub Enterprise (e.g. https://ghe.example.com/api/v3")
|
||||
command.Flags().StringVar(&opts.Proxy, "proxy", "", "use proxy to access repository")
|
||||
command.Flags().StringVar(&opts.Proxy, "no-proxy", "", "don't access these targets via proxy")
|
||||
command.Flags().StringVar(&opts.NoProxy, "no-proxy", "", "don't access these targets via proxy")
|
||||
command.Flags().StringVar(&opts.GCPServiceAccountKeyPath, "gcp-service-account-key-path", "", "service account key for the Google Cloud Platform")
|
||||
command.Flags().BoolVar(&opts.ForceHttpBasicAuth, "force-http-basic-auth", false, "whether to force use of basic auth when connecting repository via HTTP")
|
||||
}
|
||||
|
||||
49
commitserver/apiclient/clientset.go
Normal file
49
commitserver/apiclient/clientset.go
Normal file
@@ -0,0 +1,49 @@
|
||||
package apiclient
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/util/io"
|
||||
)
|
||||
|
||||
// Clientset represents commit server api clients
|
||||
type Clientset interface {
|
||||
NewCommitServerClient() (io.Closer, CommitServiceClient, error)
|
||||
}
|
||||
|
||||
type clientSet struct {
|
||||
address string
|
||||
}
|
||||
|
||||
// NewCommitServerClient creates new instance of commit server client
|
||||
func (c *clientSet) NewCommitServerClient() (io.Closer, CommitServiceClient, error) {
|
||||
conn, err := NewConnection(c.address)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to open a new connection to commit server: %w", err)
|
||||
}
|
||||
return conn, NewCommitServiceClient(conn), nil
|
||||
}
|
||||
|
||||
// NewConnection creates new connection to commit server
|
||||
func NewConnection(address string) (*grpc.ClientConn, error) {
|
||||
var opts []grpc.DialOption
|
||||
opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials()))
|
||||
|
||||
// TODO: switch to grpc.NewClient.
|
||||
// nolint:staticcheck
|
||||
conn, err := grpc.Dial(address, opts...)
|
||||
if err != nil {
|
||||
log.Errorf("Unable to connect to commit service with address %s", address)
|
||||
return nil, err
|
||||
}
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
// NewCommitServerClientset creates new instance of commit server Clientset
|
||||
func NewCommitServerClientset(address string) Clientset {
|
||||
return &clientSet{address: address}
|
||||
}
|
||||
1382
commitserver/apiclient/commit.pb.go
generated
Normal file
1382
commitserver/apiclient/commit.pb.go
generated
Normal file
File diff suppressed because it is too large
Load Diff
68
commitserver/apiclient/mocks/Clientset.go
generated
Normal file
68
commitserver/apiclient/mocks/Clientset.go
generated
Normal file
@@ -0,0 +1,68 @@
|
||||
// Code generated by mockery v2.53.4. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
import (
|
||||
apiclient "github.com/argoproj/argo-cd/v2/commitserver/apiclient"
|
||||
io "github.com/argoproj/argo-cd/v2/util/io"
|
||||
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
// Clientset is an autogenerated mock type for the Clientset type
|
||||
type Clientset struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
// NewCommitServerClient provides a mock function with no fields
|
||||
func (_m *Clientset) NewCommitServerClient() (io.Closer, apiclient.CommitServiceClient, error) {
|
||||
ret := _m.Called()
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for NewCommitServerClient")
|
||||
}
|
||||
|
||||
var r0 io.Closer
|
||||
var r1 apiclient.CommitServiceClient
|
||||
var r2 error
|
||||
if rf, ok := ret.Get(0).(func() (io.Closer, apiclient.CommitServiceClient, error)); ok {
|
||||
return rf()
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func() io.Closer); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(io.Closer)
|
||||
}
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func() apiclient.CommitServiceClient); ok {
|
||||
r1 = rf()
|
||||
} else {
|
||||
if ret.Get(1) != nil {
|
||||
r1 = ret.Get(1).(apiclient.CommitServiceClient)
|
||||
}
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(2).(func() error); ok {
|
||||
r2 = rf()
|
||||
} else {
|
||||
r2 = ret.Error(2)
|
||||
}
|
||||
|
||||
return r0, r1, r2
|
||||
}
|
||||
|
||||
// NewClientset creates a new instance of Clientset. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
// The first argument is typically a *testing.T value.
|
||||
func NewClientset(t interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}) *Clientset {
|
||||
mock := &Clientset{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
||||
69
commitserver/apiclient/mocks/CommitServiceClient.go
generated
Normal file
69
commitserver/apiclient/mocks/CommitServiceClient.go
generated
Normal file
@@ -0,0 +1,69 @@
|
||||
// Code generated by mockery v2.53.4. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
import (
|
||||
context "context"
|
||||
|
||||
apiclient "github.com/argoproj/argo-cd/v2/commitserver/apiclient"
|
||||
|
||||
grpc "google.golang.org/grpc"
|
||||
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
// CommitServiceClient is an autogenerated mock type for the CommitServiceClient type
|
||||
type CommitServiceClient struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
// CommitHydratedManifests provides a mock function with given fields: ctx, in, opts
|
||||
func (_m *CommitServiceClient) CommitHydratedManifests(ctx context.Context, in *apiclient.CommitHydratedManifestsRequest, opts ...grpc.CallOption) (*apiclient.CommitHydratedManifestsResponse, error) {
|
||||
_va := make([]interface{}, len(opts))
|
||||
for _i := range opts {
|
||||
_va[_i] = opts[_i]
|
||||
}
|
||||
var _ca []interface{}
|
||||
_ca = append(_ca, ctx, in)
|
||||
_ca = append(_ca, _va...)
|
||||
ret := _m.Called(_ca...)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for CommitHydratedManifests")
|
||||
}
|
||||
|
||||
var r0 *apiclient.CommitHydratedManifestsResponse
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *apiclient.CommitHydratedManifestsRequest, ...grpc.CallOption) (*apiclient.CommitHydratedManifestsResponse, error)); ok {
|
||||
return rf(ctx, in, opts...)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *apiclient.CommitHydratedManifestsRequest, ...grpc.CallOption) *apiclient.CommitHydratedManifestsResponse); ok {
|
||||
r0 = rf(ctx, in, opts...)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*apiclient.CommitHydratedManifestsResponse)
|
||||
}
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *apiclient.CommitHydratedManifestsRequest, ...grpc.CallOption) error); ok {
|
||||
r1 = rf(ctx, in, opts...)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// NewCommitServiceClient creates a new instance of CommitServiceClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
// The first argument is typically a *testing.T value.
|
||||
func NewCommitServiceClient(t interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}) *CommitServiceClient {
|
||||
mock := &CommitServiceClient{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
||||
225
commitserver/commit/commit.go
Normal file
225
commitserver/commit/commit.go
Normal file
@@ -0,0 +1,225 @@
|
||||
package commit
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/commitserver/apiclient"
|
||||
"github.com/argoproj/argo-cd/v2/commitserver/metrics"
|
||||
"github.com/argoproj/argo-cd/v2/util/git"
|
||||
"github.com/argoproj/argo-cd/v2/util/io/files"
|
||||
)
|
||||
|
||||
// Service is the service that handles commit requests.
|
||||
type Service struct {
|
||||
gitCredsStore git.CredsStore
|
||||
metricsServer *metrics.Server
|
||||
repoClientFactory RepoClientFactory
|
||||
}
|
||||
|
||||
// NewService returns a new instance of the commit service.
|
||||
func NewService(gitCredsStore git.CredsStore, metricsServer *metrics.Server) *Service {
|
||||
return &Service{
|
||||
gitCredsStore: gitCredsStore,
|
||||
metricsServer: metricsServer,
|
||||
repoClientFactory: NewRepoClientFactory(gitCredsStore, metricsServer),
|
||||
}
|
||||
}
|
||||
|
||||
// CommitHydratedManifests handles a commit request. It clones the repository, checks out the sync branch, checks out
|
||||
// the target branch, clears the repository contents, writes the manifests to the repository, commits the changes, and
|
||||
// pushes the changes. It returns the hydrated revision SHA and an error if one occurred.
|
||||
func (s *Service) CommitHydratedManifests(ctx context.Context, r *apiclient.CommitHydratedManifestsRequest) (*apiclient.CommitHydratedManifestsResponse, error) {
|
||||
// This method is intentionally short. It's a wrapper around handleCommitRequest that adds metrics and logging.
|
||||
// Keep logic here minimal and put most of the logic in handleCommitRequest.
|
||||
startTime := time.Now()
|
||||
|
||||
// We validate for a nil repo in handleCommitRequest, but we need to check for a nil repo here to get the repo URL
|
||||
// for metrics.
|
||||
var repoURL string
|
||||
if r.Repo != nil {
|
||||
repoURL = r.Repo.Repo
|
||||
}
|
||||
|
||||
var err error
|
||||
s.metricsServer.IncPendingCommitRequest(repoURL)
|
||||
defer func() {
|
||||
s.metricsServer.DecPendingCommitRequest(repoURL)
|
||||
commitResponseType := metrics.CommitResponseTypeSuccess
|
||||
if err != nil {
|
||||
commitResponseType = metrics.CommitResponseTypeFailure
|
||||
}
|
||||
s.metricsServer.IncCommitRequest(repoURL, commitResponseType)
|
||||
s.metricsServer.ObserveCommitRequestDuration(repoURL, commitResponseType, time.Since(startTime))
|
||||
}()
|
||||
|
||||
logCtx := log.WithFields(log.Fields{"branch": r.TargetBranch, "drySHA": r.DrySha})
|
||||
|
||||
out, sha, err := s.handleCommitRequest(logCtx, r)
|
||||
if err != nil {
|
||||
logCtx.WithError(err).WithField("output", out).Error("failed to handle commit request")
|
||||
|
||||
// No need to wrap this error, sufficient context is build in handleCommitRequest.
|
||||
return &apiclient.CommitHydratedManifestsResponse{}, err
|
||||
}
|
||||
|
||||
logCtx.Info("Successfully handled commit request")
|
||||
return &apiclient.CommitHydratedManifestsResponse{
|
||||
HydratedSha: sha,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// handleCommitRequest handles the commit request. It clones the repository, checks out the sync branch, checks out the
|
||||
// target branch, clears the repository contents, writes the manifests to the repository, commits the changes, and pushes
|
||||
// the changes. It returns the output of the git commands and an error if one occurred.
|
||||
func (s *Service) handleCommitRequest(logCtx *log.Entry, r *apiclient.CommitHydratedManifestsRequest) (string, string, error) {
|
||||
if r.Repo == nil {
|
||||
return "", "", fmt.Errorf("repo is required")
|
||||
}
|
||||
if r.Repo.Repo == "" {
|
||||
return "", "", fmt.Errorf("repo URL is required")
|
||||
}
|
||||
if r.TargetBranch == "" {
|
||||
return "", "", fmt.Errorf("target branch is required")
|
||||
}
|
||||
if r.SyncBranch == "" {
|
||||
return "", "", fmt.Errorf("sync branch is required")
|
||||
}
|
||||
|
||||
logCtx = logCtx.WithField("repo", r.Repo.Repo)
|
||||
logCtx.Debug("Initiating git client")
|
||||
gitClient, dirPath, cleanup, err := s.initGitClient(logCtx, r)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("failed to init git client: %w", err)
|
||||
}
|
||||
defer cleanup()
|
||||
|
||||
logCtx.Debugf("Checking out sync branch %s", r.SyncBranch)
|
||||
var out string
|
||||
out, err = gitClient.CheckoutOrOrphan(r.SyncBranch, false)
|
||||
if err != nil {
|
||||
return out, "", fmt.Errorf("failed to checkout sync branch: %w", err)
|
||||
}
|
||||
|
||||
logCtx.Debugf("Checking out target branch %s", r.TargetBranch)
|
||||
out, err = gitClient.CheckoutOrNew(r.TargetBranch, r.SyncBranch, false)
|
||||
if err != nil {
|
||||
return out, "", fmt.Errorf("failed to checkout target branch: %w", err)
|
||||
}
|
||||
|
||||
logCtx.Debug("Clearing repo contents")
|
||||
out, err = gitClient.RemoveContents()
|
||||
if err != nil {
|
||||
return out, "", fmt.Errorf("failed to clear repo: %w", err)
|
||||
}
|
||||
|
||||
logCtx.Debug("Writing manifests")
|
||||
err = WriteForPaths(dirPath, r.Repo.Repo, r.DrySha, r.Paths)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("failed to write manifests: %w", err)
|
||||
}
|
||||
|
||||
logCtx.Debug("Committing and pushing changes")
|
||||
out, err = gitClient.CommitAndPush(r.TargetBranch, r.CommitMessage)
|
||||
if err != nil {
|
||||
return out, "", fmt.Errorf("failed to commit and push: %w", err)
|
||||
}
|
||||
|
||||
logCtx.Debug("Getting commit SHA")
|
||||
sha, err := gitClient.CommitSHA()
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("failed to get commit SHA: %w", err)
|
||||
}
|
||||
|
||||
return "", sha, nil
|
||||
}
|
||||
|
||||
// initGitClient initializes a git client for the given repository and returns the client, the path to the directory where
|
||||
// the repository is cloned, a cleanup function that should be called when the directory is no longer needed, and an error
|
||||
// if one occurred.
|
||||
func (s *Service) initGitClient(logCtx *log.Entry, r *apiclient.CommitHydratedManifestsRequest) (git.Client, string, func(), error) {
|
||||
dirPath, err := files.CreateTempDir("/tmp/_commit-service")
|
||||
if err != nil {
|
||||
return nil, "", nil, fmt.Errorf("failed to create temp dir: %w", err)
|
||||
}
|
||||
// Call cleanupOrLog in this function if an error occurs to ensure the temp dir is cleaned up.
|
||||
cleanupOrLog := func() {
|
||||
err := os.RemoveAll(dirPath)
|
||||
if err != nil {
|
||||
logCtx.WithError(err).Error("failed to cleanup temp dir")
|
||||
}
|
||||
}
|
||||
|
||||
gitClient, err := s.repoClientFactory.NewClient(r.Repo, dirPath)
|
||||
if err != nil {
|
||||
cleanupOrLog()
|
||||
return nil, "", nil, fmt.Errorf("failed to create git client: %w", err)
|
||||
}
|
||||
|
||||
logCtx.Debugf("Initializing repo %s", r.Repo.Repo)
|
||||
err = gitClient.Init()
|
||||
if err != nil {
|
||||
cleanupOrLog()
|
||||
return nil, "", nil, fmt.Errorf("failed to init git client: %w", err)
|
||||
}
|
||||
|
||||
logCtx.Debugf("Fetching repo %s", r.Repo.Repo)
|
||||
err = gitClient.Fetch("")
|
||||
if err != nil {
|
||||
cleanupOrLog()
|
||||
return nil, "", nil, fmt.Errorf("failed to clone repo: %w", err)
|
||||
}
|
||||
|
||||
// FIXME: make it work for GHE
|
||||
//logCtx.Debugf("Getting user info for repo credentials")
|
||||
//gitCreds := r.Repo.GetGitCreds(s.gitCredsStore)
|
||||
//startTime := time.Now()
|
||||
//authorName, authorEmail, err := gitCreds.GetUserInfo(ctx)
|
||||
//s.metricsServer.ObserveUserInfoRequestDuration(r.Repo.Repo, getCredentialType(r.Repo), time.Since(startTime))
|
||||
//if err != nil {
|
||||
// cleanupOrLog()
|
||||
// return nil, "", nil, fmt.Errorf("failed to get github app info: %w", err)
|
||||
//}
|
||||
var authorName, authorEmail string
|
||||
|
||||
if authorName == "" {
|
||||
authorName = "Argo CD"
|
||||
}
|
||||
if authorEmail == "" {
|
||||
logCtx.Warnf("Author email not available, using 'argo-cd@example.com'.")
|
||||
authorEmail = "argo-cd@example.com"
|
||||
}
|
||||
|
||||
logCtx.Debugf("Setting author %s <%s>", authorName, authorEmail)
|
||||
_, err = gitClient.SetAuthor(authorName, authorEmail)
|
||||
if err != nil {
|
||||
cleanupOrLog()
|
||||
return nil, "", nil, fmt.Errorf("failed to set author: %w", err)
|
||||
}
|
||||
|
||||
return gitClient, dirPath, cleanupOrLog, nil
|
||||
}
|
||||
|
||||
type hydratorMetadataFile struct {
|
||||
RepoURL string `json:"repoURL"`
|
||||
DrySHA string `json:"drySha"`
|
||||
Commands []string `json:"commands"`
|
||||
}
|
||||
|
||||
// TODO: make this configurable via ConfigMap.
|
||||
var manifestHydrationReadmeTemplate = `
|
||||
# Manifest Hydration
|
||||
|
||||
To hydrate the manifests in this repository, run the following commands:
|
||||
|
||||
` + "```shell\n" + `
|
||||
git clone {{ .RepoURL }}
|
||||
# cd into the cloned directory
|
||||
git checkout {{ .DrySHA }}
|
||||
{{ range $command := .Commands -}}
|
||||
{{ $command }}
|
||||
{{ end -}}` + "```"
|
||||
50
commitserver/commit/commit.proto
Normal file
50
commitserver/commit/commit.proto
Normal file
@@ -0,0 +1,50 @@
|
||||
syntax = "proto3";
|
||||
option go_package = "github.com/argoproj/argo-cd/v2/commitserver/apiclient";
|
||||
|
||||
import "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/generated.proto";
|
||||
|
||||
// CommitHydratedManifestsRequest is the request to commit hydrated manifests to a repository.
|
||||
message CommitHydratedManifestsRequest {
|
||||
// Repo contains repository information including, at minimum, the URL of the repository. Generally it will contain
|
||||
// repo credentials.
|
||||
github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.Repository repo = 1;
|
||||
// SyncBranch is the branch Argo CD syncs from, i.e. the hydrated branch.
|
||||
string syncBranch = 2;
|
||||
// TargetBranch is the branch Argo CD is committing to, i.e. the branch that will be updated.
|
||||
string targetBranch = 3;
|
||||
// DrySha is the commit SHA from the dry branch, i.e. pre-rendered manifest branch.
|
||||
string drySha = 4;
|
||||
// CommitMessage is the commit message to use when committing changes.
|
||||
string commitMessage = 5;
|
||||
// Paths contains the paths to write hydrated manifests to, along with the manifests and commands to execute.
|
||||
repeated PathDetails paths = 6;
|
||||
}
|
||||
|
||||
// PathDetails holds information about hydrated manifests to be written to a particular path in the hydrated manifests
|
||||
// commit.
|
||||
message PathDetails {
|
||||
// Path is the path to write the hydrated manifests to.
|
||||
string path = 1;
|
||||
// Manifests contains the manifests to write to the path.
|
||||
repeated HydratedManifestDetails manifests = 2;
|
||||
// Commands contains the commands executed when hydrating the manifests.
|
||||
repeated string commands = 3;
|
||||
}
|
||||
|
||||
// ManifestDetails contains the hydrated manifests.
|
||||
message HydratedManifestDetails {
|
||||
// ManifestJSON is the hydrated manifest as JSON.
|
||||
string manifestJSON = 1;
|
||||
}
|
||||
|
||||
// ManifestsResponse is the response to the ManifestsRequest.
|
||||
message CommitHydratedManifestsResponse {
|
||||
// HydratedSha is the commit SHA of the hydrated manifests commit.
|
||||
string hydratedSha = 1;
|
||||
}
|
||||
|
||||
// CommitService is the service for committing hydrated manifests to a repository.
|
||||
service CommitService {
|
||||
// Commit commits hydrated manifests to a repository.
|
||||
rpc CommitHydratedManifests (CommitHydratedManifestsRequest) returns (CommitHydratedManifestsResponse);
|
||||
}
|
||||
125
commitserver/commit/commit_test.go
Normal file
125
commitserver/commit/commit_test.go
Normal file
@@ -0,0 +1,125 @@
|
||||
package commit
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/commitserver/apiclient"
|
||||
"github.com/argoproj/argo-cd/v2/commitserver/commit/mocks"
|
||||
"github.com/argoproj/argo-cd/v2/commitserver/metrics"
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v2/util/git"
|
||||
gitmocks "github.com/argoproj/argo-cd/v2/util/git/mocks"
|
||||
)
|
||||
|
||||
func Test_CommitHydratedManifests(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
validRequest := &apiclient.CommitHydratedManifestsRequest{
|
||||
Repo: &v1alpha1.Repository{
|
||||
Repo: "https://github.com/argoproj/argocd-example-apps.git",
|
||||
},
|
||||
TargetBranch: "main",
|
||||
SyncBranch: "env/test",
|
||||
CommitMessage: "test commit message",
|
||||
}
|
||||
|
||||
t.Run("missing repo", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
service, _ := newServiceWithMocks(t)
|
||||
request := &apiclient.CommitHydratedManifestsRequest{}
|
||||
_, err := service.CommitHydratedManifests(context.Background(), request)
|
||||
require.Error(t, err)
|
||||
assert.ErrorContains(t, err, "repo is required")
|
||||
})
|
||||
|
||||
t.Run("missing repo URL", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
service, _ := newServiceWithMocks(t)
|
||||
request := &apiclient.CommitHydratedManifestsRequest{
|
||||
Repo: &v1alpha1.Repository{},
|
||||
}
|
||||
_, err := service.CommitHydratedManifests(context.Background(), request)
|
||||
require.Error(t, err)
|
||||
assert.ErrorContains(t, err, "repo URL is required")
|
||||
})
|
||||
|
||||
t.Run("missing target branch", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
service, _ := newServiceWithMocks(t)
|
||||
request := &apiclient.CommitHydratedManifestsRequest{
|
||||
Repo: &v1alpha1.Repository{
|
||||
Repo: "https://github.com/argoproj/argocd-example-apps.git",
|
||||
},
|
||||
}
|
||||
_, err := service.CommitHydratedManifests(context.Background(), request)
|
||||
require.Error(t, err)
|
||||
assert.ErrorContains(t, err, "target branch is required")
|
||||
})
|
||||
|
||||
t.Run("missing sync branch", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
service, _ := newServiceWithMocks(t)
|
||||
request := &apiclient.CommitHydratedManifestsRequest{
|
||||
Repo: &v1alpha1.Repository{
|
||||
Repo: "https://github.com/argoproj/argocd-example-apps.git",
|
||||
},
|
||||
TargetBranch: "main",
|
||||
}
|
||||
_, err := service.CommitHydratedManifests(context.Background(), request)
|
||||
require.Error(t, err)
|
||||
assert.ErrorContains(t, err, "sync branch is required")
|
||||
})
|
||||
|
||||
t.Run("failed to create git client", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
service, mockRepoClientFactory := newServiceWithMocks(t)
|
||||
mockRepoClientFactory.On("NewClient", mock.Anything, mock.Anything).Return(nil, assert.AnError).Once()
|
||||
|
||||
_, err := service.CommitHydratedManifests(context.Background(), validRequest)
|
||||
require.Error(t, err)
|
||||
assert.ErrorIs(t, err, assert.AnError)
|
||||
})
|
||||
|
||||
t.Run("happy path", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
service, mockRepoClientFactory := newServiceWithMocks(t)
|
||||
mockGitClient := gitmocks.NewClient(t)
|
||||
mockGitClient.On("Init").Return(nil).Once()
|
||||
mockGitClient.On("Fetch", mock.Anything).Return(nil).Once()
|
||||
mockGitClient.On("SetAuthor", "Argo CD", "argo-cd@example.com").Return("", nil).Once()
|
||||
mockGitClient.On("CheckoutOrOrphan", "env/test", false).Return("", nil).Once()
|
||||
mockGitClient.On("CheckoutOrNew", "main", "env/test", false).Return("", nil).Once()
|
||||
mockGitClient.On("RemoveContents").Return("", nil).Once()
|
||||
mockGitClient.On("CommitAndPush", "main", "test commit message").Return("", nil).Once()
|
||||
mockGitClient.On("CommitSHA").Return("it-worked!", nil).Once()
|
||||
mockRepoClientFactory.On("NewClient", mock.Anything, mock.Anything).Return(mockGitClient, nil).Once()
|
||||
|
||||
resp, err := service.CommitHydratedManifests(context.Background(), validRequest)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, resp)
|
||||
assert.Equal(t, "it-worked!", resp.HydratedSha)
|
||||
})
|
||||
}
|
||||
|
||||
func newServiceWithMocks(t *testing.T) (*Service, *mocks.RepoClientFactory) {
|
||||
t.Helper()
|
||||
|
||||
metricsServer := metrics.NewMetricsServer()
|
||||
mockCredsStore := git.NoopCredsStore{}
|
||||
service := NewService(mockCredsStore, metricsServer)
|
||||
mockRepoClientFactory := mocks.NewRepoClientFactory(t)
|
||||
service.repoClientFactory = mockRepoClientFactory
|
||||
|
||||
return service, mockRepoClientFactory
|
||||
}
|
||||
23
commitserver/commit/credentialtypehelper.go
Normal file
23
commitserver/commit/credentialtypehelper.go
Normal file
@@ -0,0 +1,23 @@
|
||||
package commit
|
||||
|
||||
import "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
|
||||
// getCredentialType returns the type of credential used by the repository.
|
||||
func getCredentialType(repo *v1alpha1.Repository) string {
|
||||
if repo == nil {
|
||||
return ""
|
||||
}
|
||||
if repo.Password != "" {
|
||||
return "https"
|
||||
}
|
||||
if repo.SSHPrivateKey != "" {
|
||||
return "ssh"
|
||||
}
|
||||
if repo.GithubAppPrivateKey != "" && repo.GithubAppId != 0 && repo.GithubAppInstallationId != 0 {
|
||||
return "github-app"
|
||||
}
|
||||
if repo.GCPServiceAccountKey != "" {
|
||||
return "cloud-source-repositories"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
62
commitserver/commit/credentialtypehelper_test.go
Normal file
62
commitserver/commit/credentialtypehelper_test.go
Normal file
@@ -0,0 +1,62 @@
|
||||
package commit
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
)
|
||||
|
||||
func TestRepository_GetCredentialType(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
repo *v1alpha1.Repository
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "Empty Repository",
|
||||
repo: nil,
|
||||
want: "",
|
||||
},
|
||||
{
|
||||
name: "HTTPS Repository",
|
||||
repo: &v1alpha1.Repository{
|
||||
Repo: "foo",
|
||||
Password: "some-password",
|
||||
},
|
||||
want: "https",
|
||||
},
|
||||
{
|
||||
name: "SSH Repository",
|
||||
repo: &v1alpha1.Repository{
|
||||
Repo: "foo",
|
||||
SSHPrivateKey: "some-key",
|
||||
},
|
||||
want: "ssh",
|
||||
},
|
||||
{
|
||||
name: "GitHub App Repository",
|
||||
repo: &v1alpha1.Repository{
|
||||
Repo: "foo",
|
||||
GithubAppPrivateKey: "some-key",
|
||||
GithubAppId: 1,
|
||||
GithubAppInstallationId: 1,
|
||||
},
|
||||
want: "github-app",
|
||||
},
|
||||
{
|
||||
name: "Google Cloud Repository",
|
||||
repo: &v1alpha1.Repository{
|
||||
Repo: "foo",
|
||||
GCPServiceAccountKey: "some-key",
|
||||
},
|
||||
want: "cloud-source-repositories",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := getCredentialType(tt.repo); got != tt.want {
|
||||
t.Errorf("Repository.GetCredentialType() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
145
commitserver/commit/hydratorhelper.go
Normal file
145
commitserver/commit/hydratorhelper.go
Normal file
@@ -0,0 +1,145 @@
|
||||
package commit
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"text/template"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"gopkg.in/yaml.v3"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/commitserver/apiclient"
|
||||
"github.com/argoproj/argo-cd/v2/util/io/files"
|
||||
)
|
||||
|
||||
// WriteForPaths writes the manifests, hydrator.metadata, and README.md files for each path in the provided paths. It
|
||||
// also writes a root-level hydrator.metadata file containing the repo URL and dry SHA.
|
||||
func WriteForPaths(rootPath string, repoUrl string, drySha string, paths []*apiclient.PathDetails) error {
|
||||
// Write the top-level readme.
|
||||
err := writeMetadata(rootPath, hydratorMetadataFile{DrySHA: drySha, RepoURL: repoUrl})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to write top-level hydrator metadata: %w", err)
|
||||
}
|
||||
|
||||
for _, p := range paths {
|
||||
hydratePath := p.Path
|
||||
if hydratePath == "." {
|
||||
hydratePath = ""
|
||||
}
|
||||
|
||||
var fullHydratePath string
|
||||
fullHydratePath, err = files.SecureMkdirAll(rootPath, hydratePath, os.ModePerm)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create path: %w", err)
|
||||
}
|
||||
|
||||
// Write the manifests
|
||||
err = writeManifests(fullHydratePath, p.Manifests)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to write manifests: %w", err)
|
||||
}
|
||||
|
||||
// Write hydrator.metadata containing information about the hydration process.
|
||||
hydratorMetadata := hydratorMetadataFile{
|
||||
Commands: p.Commands,
|
||||
DrySHA: drySha,
|
||||
RepoURL: repoUrl,
|
||||
}
|
||||
err = writeMetadata(fullHydratePath, hydratorMetadata)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to write hydrator metadata: %w", err)
|
||||
}
|
||||
|
||||
// Write README
|
||||
err = writeReadme(fullHydratePath, hydratorMetadata)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to write readme: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeMetadata writes the metadata to the hydrator.metadata file.
|
||||
func writeMetadata(dirPath string, metadata hydratorMetadataFile) error {
|
||||
hydratorMetadataJson, err := json.MarshalIndent(metadata, "", " ")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal hydrator metadata: %w", err)
|
||||
}
|
||||
// No need to use SecureJoin here, as the path is already sanitized.
|
||||
hydratorMetadataPath := path.Join(dirPath, "hydrator.metadata")
|
||||
err = os.WriteFile(hydratorMetadataPath, hydratorMetadataJson, os.ModePerm)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to write hydrator metadata: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeReadme writes the readme to the README.md file.
|
||||
func writeReadme(dirPath string, metadata hydratorMetadataFile) error {
|
||||
readmeTemplate := template.New("readme")
|
||||
readmeTemplate, err := readmeTemplate.Parse(manifestHydrationReadmeTemplate)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse readme template: %w", err)
|
||||
}
|
||||
// Create writer to template into
|
||||
// No need to use SecureJoin here, as the path is already sanitized.
|
||||
readmePath := path.Join(dirPath, "README.md")
|
||||
readmeFile, err := os.Create(readmePath)
|
||||
if err != nil && !os.IsExist(err) {
|
||||
return fmt.Errorf("failed to create README file: %w", err)
|
||||
}
|
||||
err = readmeTemplate.Execute(readmeFile, metadata)
|
||||
closeErr := readmeFile.Close()
|
||||
if closeErr != nil {
|
||||
log.WithError(closeErr).Error("failed to close README file")
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to execute readme template: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeManifests writes the manifests to the manifest.yaml file, truncating the file if it exists and appending the
|
||||
// manifests in the order they are provided.
|
||||
func writeManifests(dirPath string, manifests []*apiclient.HydratedManifestDetails) error {
|
||||
// If the file exists, truncate it.
|
||||
// No need to use SecureJoin here, as the path is already sanitized.
|
||||
manifestPath := path.Join(dirPath, "manifest.yaml")
|
||||
|
||||
file, err := os.OpenFile(manifestPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open manifest file: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
err := file.Close()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("failed to close file")
|
||||
}
|
||||
}()
|
||||
|
||||
enc := yaml.NewEncoder(file)
|
||||
defer func() {
|
||||
err := enc.Close()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("failed to close yaml encoder")
|
||||
}
|
||||
}()
|
||||
enc.SetIndent(2)
|
||||
|
||||
for _, m := range manifests {
|
||||
obj := &unstructured.Unstructured{}
|
||||
err = json.Unmarshal([]byte(m.ManifestJSON), obj)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to unmarshal manifest: %w", err)
|
||||
}
|
||||
err = enc.Encode(&obj.Object)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to encode manifest: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
135
commitserver/commit/hydratorhelper_test.go
Normal file
135
commitserver/commit/hydratorhelper_test.go
Normal file
@@ -0,0 +1,135 @@
|
||||
package commit
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
securejoin "github.com/cyphar/filepath-securejoin"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/commitserver/apiclient"
|
||||
)
|
||||
|
||||
func TestWriteForPaths(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
repoUrl := "https://github.com/example/repo"
|
||||
drySha := "abc123"
|
||||
paths := []*apiclient.PathDetails{
|
||||
{
|
||||
Path: "path1",
|
||||
Manifests: []*apiclient.HydratedManifestDetails{
|
||||
{ManifestJSON: `{"kind":"Pod","apiVersion":"v1"}`},
|
||||
},
|
||||
Commands: []string{"command1", "command2"},
|
||||
},
|
||||
{
|
||||
Path: "path2",
|
||||
Manifests: []*apiclient.HydratedManifestDetails{
|
||||
{ManifestJSON: `{"kind":"Service","apiVersion":"v1"}`},
|
||||
},
|
||||
Commands: []string{"command3"},
|
||||
},
|
||||
}
|
||||
|
||||
err := WriteForPaths(dir, repoUrl, drySha, paths)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check if the top-level hydrator.metadata exists and contains the repo URL and dry SHA
|
||||
topMetadataPath := path.Join(dir, "hydrator.metadata")
|
||||
topMetadataBytes, err := os.ReadFile(topMetadataPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
var topMetadata hydratorMetadataFile
|
||||
err = json.Unmarshal(topMetadataBytes, &topMetadata)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, repoUrl, topMetadata.RepoURL)
|
||||
assert.Equal(t, drySha, topMetadata.DrySHA)
|
||||
|
||||
for _, p := range paths {
|
||||
fullHydratePath, err := securejoin.SecureJoin(dir, p.Path)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check if each path directory exists
|
||||
assert.DirExists(t, fullHydratePath)
|
||||
|
||||
// Check if each path contains a hydrator.metadata file and contains the repo URL
|
||||
metadataPath := path.Join(fullHydratePath, "hydrator.metadata")
|
||||
metadataBytes, err := os.ReadFile(metadataPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
var readMetadata hydratorMetadataFile
|
||||
err = json.Unmarshal(metadataBytes, &readMetadata)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, repoUrl, readMetadata.RepoURL)
|
||||
|
||||
// Check if each path contains a README.md file and contains the repo URL
|
||||
readmePath := path.Join(fullHydratePath, "README.md")
|
||||
readmeBytes, err := os.ReadFile(readmePath)
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, string(readmeBytes), repoUrl)
|
||||
|
||||
// Check if each path contains a manifest.yaml file and contains the word Pod
|
||||
manifestPath := path.Join(fullHydratePath, "manifest.yaml")
|
||||
manifestBytes, err := os.ReadFile(manifestPath)
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, string(manifestBytes), "kind")
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriteMetadata(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
metadata := hydratorMetadataFile{
|
||||
RepoURL: "https://github.com/example/repo",
|
||||
DrySHA: "abc123",
|
||||
}
|
||||
|
||||
err := writeMetadata(dir, metadata)
|
||||
require.NoError(t, err)
|
||||
|
||||
metadataPath := path.Join(dir, "hydrator.metadata")
|
||||
metadataBytes, err := os.ReadFile(metadataPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
var readMetadata hydratorMetadataFile
|
||||
err = json.Unmarshal(metadataBytes, &readMetadata)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, metadata, readMetadata)
|
||||
}
|
||||
|
||||
func TestWriteReadme(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
metadata := hydratorMetadataFile{
|
||||
RepoURL: "https://github.com/example/repo",
|
||||
DrySHA: "abc123",
|
||||
}
|
||||
|
||||
err := writeReadme(dir, metadata)
|
||||
require.NoError(t, err)
|
||||
|
||||
readmePath := path.Join(dir, "README.md")
|
||||
readmeBytes, err := os.ReadFile(readmePath)
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, string(readmeBytes), metadata.RepoURL)
|
||||
}
|
||||
|
||||
func TestWriteManifests(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
manifests := []*apiclient.HydratedManifestDetails{
|
||||
{ManifestJSON: `{"kind":"Pod","apiVersion":"v1"}`},
|
||||
}
|
||||
|
||||
err := writeManifests(dir, manifests)
|
||||
require.NoError(t, err)
|
||||
|
||||
manifestPath := path.Join(dir, "manifest.yaml")
|
||||
manifestBytes, err := os.ReadFile(manifestPath)
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, string(manifestBytes), "kind")
|
||||
}
|
||||
59
commitserver/commit/mocks/RepoClientFactory.go
generated
Normal file
59
commitserver/commit/mocks/RepoClientFactory.go
generated
Normal file
@@ -0,0 +1,59 @@
|
||||
// Code generated by mockery v2.53.4. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
import (
|
||||
git "github.com/argoproj/argo-cd/v2/util/git"
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
|
||||
v1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
)
|
||||
|
||||
// RepoClientFactory is an autogenerated mock type for the RepoClientFactory type
|
||||
type RepoClientFactory struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
// NewClient provides a mock function with given fields: repo, rootPath
|
||||
func (_m *RepoClientFactory) NewClient(repo *v1alpha1.Repository, rootPath string) (git.Client, error) {
|
||||
ret := _m.Called(repo, rootPath)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for NewClient")
|
||||
}
|
||||
|
||||
var r0 git.Client
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(*v1alpha1.Repository, string) (git.Client, error)); ok {
|
||||
return rf(repo, rootPath)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(*v1alpha1.Repository, string) git.Client); ok {
|
||||
r0 = rf(repo, rootPath)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(git.Client)
|
||||
}
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func(*v1alpha1.Repository, string) error); ok {
|
||||
r1 = rf(repo, rootPath)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// NewRepoClientFactory creates a new instance of RepoClientFactory. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
// The first argument is typically a *testing.T value.
|
||||
func NewRepoClientFactory(t interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}) *RepoClientFactory {
|
||||
mock := &RepoClientFactory{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
||||
32
commitserver/commit/repo_client_factory.go
Normal file
32
commitserver/commit/repo_client_factory.go
Normal file
@@ -0,0 +1,32 @@
|
||||
package commit
|
||||
|
||||
import (
|
||||
"github.com/argoproj/argo-cd/v2/commitserver/metrics"
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v2/util/git"
|
||||
)
|
||||
|
||||
// RepoClientFactory is a factory for creating git clients for a repository.
|
||||
type RepoClientFactory interface {
|
||||
NewClient(repo *v1alpha1.Repository, rootPath string) (git.Client, error)
|
||||
}
|
||||
|
||||
type repoClientFactory struct {
|
||||
gitCredsStore git.CredsStore
|
||||
metricsServer *metrics.Server
|
||||
}
|
||||
|
||||
// NewRepoClientFactory returns a new instance of the repo client factory.
|
||||
func NewRepoClientFactory(gitCredsStore git.CredsStore, metricsServer *metrics.Server) RepoClientFactory {
|
||||
return &repoClientFactory{
|
||||
gitCredsStore: gitCredsStore,
|
||||
metricsServer: metricsServer,
|
||||
}
|
||||
}
|
||||
|
||||
// NewClient creates a new git client for the repository.
|
||||
func (r *repoClientFactory) NewClient(repo *v1alpha1.Repository, rootPath string) (git.Client, error) {
|
||||
gitCreds := repo.GetGitCreds(r.gitCredsStore)
|
||||
opts := git.WithEventHandlers(metrics.NewGitClientEventHandlers(r.metricsServer))
|
||||
return git.NewClientExt(repo.Repo, rootPath, gitCreds, repo.IsInsecure(), repo.IsLFSEnabled(), repo.Proxy, repo.NoProxy, opts)
|
||||
}
|
||||
34
commitserver/metrics/githandlers.go
Normal file
34
commitserver/metrics/githandlers.go
Normal file
@@ -0,0 +1,34 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/util/git"
|
||||
)
|
||||
|
||||
// NewGitClientEventHandlers creates event handlers that update Git related metrics
|
||||
func NewGitClientEventHandlers(metricsServer *Server) git.EventHandlers {
|
||||
return git.EventHandlers{
|
||||
OnFetch: func(repo string) func() {
|
||||
startTime := time.Now()
|
||||
metricsServer.IncGitRequest(repo, GitRequestTypeFetch)
|
||||
return func() {
|
||||
metricsServer.ObserveGitRequestDuration(repo, GitRequestTypeFetch, time.Since(startTime))
|
||||
}
|
||||
},
|
||||
OnLsRemote: func(repo string) func() {
|
||||
startTime := time.Now()
|
||||
metricsServer.IncGitRequest(repo, GitRequestTypeLsRemote)
|
||||
return func() {
|
||||
metricsServer.ObserveGitRequestDuration(repo, GitRequestTypeLsRemote, time.Since(startTime))
|
||||
}
|
||||
},
|
||||
OnPush: func(repo string) func() {
|
||||
startTime := time.Now()
|
||||
metricsServer.IncGitRequest(repo, GitRequestTypePush)
|
||||
return func() {
|
||||
metricsServer.ObserveGitRequestDuration(repo, GitRequestTypePush, time.Since(startTime))
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
157
commitserver/metrics/metrics.go
Normal file
157
commitserver/metrics/metrics.go
Normal file
@@ -0,0 +1,157 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/collectors"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
)
|
||||
|
||||
// Server is a prometheus server which collects application metrics.
|
||||
type Server struct {
|
||||
handler http.Handler
|
||||
commitPendingRequestsGauge *prometheus.GaugeVec
|
||||
gitRequestCounter *prometheus.CounterVec
|
||||
gitRequestHistogram *prometheus.HistogramVec
|
||||
commitRequestHistogram *prometheus.HistogramVec
|
||||
userInfoRequestHistogram *prometheus.HistogramVec
|
||||
commitRequestCounter *prometheus.CounterVec
|
||||
}
|
||||
|
||||
// GitRequestType is the type of git request
|
||||
type GitRequestType string
|
||||
|
||||
const (
|
||||
// GitRequestTypeLsRemote is a request to list remote refs
|
||||
GitRequestTypeLsRemote = "ls-remote"
|
||||
// GitRequestTypeFetch is a request to fetch from remote
|
||||
GitRequestTypeFetch = "fetch"
|
||||
// GitRequestTypePush is a request to push to remote
|
||||
GitRequestTypePush = "push"
|
||||
)
|
||||
|
||||
// CommitResponseType is the type of response for a commit request
|
||||
type CommitResponseType string
|
||||
|
||||
const (
|
||||
// CommitResponseTypeSuccess is a successful commit request
|
||||
CommitResponseTypeSuccess CommitResponseType = "success"
|
||||
// CommitResponseTypeFailure is a failed commit request
|
||||
CommitResponseTypeFailure CommitResponseType = "failure"
|
||||
)
|
||||
|
||||
// NewMetricsServer returns a new prometheus server which collects application metrics.
|
||||
func NewMetricsServer() *Server {
|
||||
registry := prometheus.NewRegistry()
|
||||
registry.MustRegister(collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}))
|
||||
registry.MustRegister(collectors.NewGoCollector())
|
||||
|
||||
commitPendingRequestsGauge := prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "argocd_commitserver_commit_pending_request_total",
|
||||
Help: "Number of pending commit requests",
|
||||
},
|
||||
[]string{"repo"},
|
||||
)
|
||||
registry.MustRegister(commitPendingRequestsGauge)
|
||||
|
||||
gitRequestCounter := prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "argocd_commitserver_git_request_total",
|
||||
Help: "Number of git requests performed by repo server",
|
||||
},
|
||||
[]string{"repo", "request_type"},
|
||||
)
|
||||
registry.MustRegister(gitRequestCounter)
|
||||
|
||||
gitRequestHistogram := prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "argocd_commitserver_git_request_duration_seconds",
|
||||
Help: "Git requests duration seconds.",
|
||||
Buckets: []float64{0.1, 0.25, .5, 1, 2, 4, 10, 20},
|
||||
},
|
||||
[]string{"repo", "request_type"},
|
||||
)
|
||||
registry.MustRegister(gitRequestHistogram)
|
||||
|
||||
commitRequestHistogram := prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "argocd_commitserver_commit_request_duration_seconds",
|
||||
Help: "Commit request duration seconds.",
|
||||
Buckets: []float64{0.1, 0.25, .5, 1, 2, 4, 10, 20},
|
||||
},
|
||||
[]string{"repo", "response_type"},
|
||||
)
|
||||
registry.MustRegister(commitRequestHistogram)
|
||||
|
||||
userInfoRequestHistogram := prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "argocd_commitserver_userinfo_request_duration_seconds",
|
||||
Help: "Userinfo request duration seconds.",
|
||||
Buckets: []float64{0.1, 0.25, .5, 1, 2, 4, 10, 20},
|
||||
},
|
||||
[]string{"repo", "credential_type"},
|
||||
)
|
||||
registry.MustRegister(userInfoRequestHistogram)
|
||||
|
||||
commitRequestCounter := prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "argocd_commitserver_commit_request_total",
|
||||
Help: "Number of commit requests performed handled",
|
||||
},
|
||||
[]string{"repo", "response_type"},
|
||||
)
|
||||
registry.MustRegister(commitRequestCounter)
|
||||
|
||||
return &Server{
|
||||
handler: promhttp.HandlerFor(registry, promhttp.HandlerOpts{}),
|
||||
commitPendingRequestsGauge: commitPendingRequestsGauge,
|
||||
gitRequestCounter: gitRequestCounter,
|
||||
gitRequestHistogram: gitRequestHistogram,
|
||||
commitRequestHistogram: commitRequestHistogram,
|
||||
userInfoRequestHistogram: userInfoRequestHistogram,
|
||||
commitRequestCounter: commitRequestCounter,
|
||||
}
|
||||
}
|
||||
|
||||
// GetHandler returns the http.Handler for the prometheus server
|
||||
func (m *Server) GetHandler() http.Handler {
|
||||
return m.handler
|
||||
}
|
||||
|
||||
// IncPendingCommitRequest increments the pending commit requests gauge
|
||||
func (m *Server) IncPendingCommitRequest(repo string) {
|
||||
m.commitPendingRequestsGauge.WithLabelValues(repo).Inc()
|
||||
}
|
||||
|
||||
// DecPendingCommitRequest decrements the pending commit requests gauge
|
||||
func (m *Server) DecPendingCommitRequest(repo string) {
|
||||
m.commitPendingRequestsGauge.WithLabelValues(repo).Dec()
|
||||
}
|
||||
|
||||
// IncGitRequest increments the git requests counter
|
||||
func (m *Server) IncGitRequest(repo string, requestType GitRequestType) {
|
||||
m.gitRequestCounter.WithLabelValues(repo, string(requestType)).Inc()
|
||||
}
|
||||
|
||||
// ObserveGitRequestDuration observes the duration of a git request
|
||||
func (m *Server) ObserveGitRequestDuration(repo string, requestType GitRequestType, duration time.Duration) {
|
||||
m.gitRequestHistogram.WithLabelValues(repo, string(requestType)).Observe(duration.Seconds())
|
||||
}
|
||||
|
||||
// ObserveCommitRequestDuration observes the duration of a commit request
|
||||
func (m *Server) ObserveCommitRequestDuration(repo string, rt CommitResponseType, duration time.Duration) {
|
||||
m.commitRequestHistogram.WithLabelValues(repo, string(rt)).Observe(duration.Seconds())
|
||||
}
|
||||
|
||||
// ObserveUserInfoRequestDuration observes the duration of a userinfo request
|
||||
func (m *Server) ObserveUserInfoRequestDuration(repo string, credentialType string, duration time.Duration) {
|
||||
m.userInfoRequestHistogram.WithLabelValues(repo, credentialType).Observe(duration.Seconds())
|
||||
}
|
||||
|
||||
// IncCommitRequest increments the commit request counter
|
||||
func (m *Server) IncCommitRequest(repo string, rt CommitResponseType) {
|
||||
m.commitRequestCounter.WithLabelValues(repo, string(rt)).Inc()
|
||||
}
|
||||
38
commitserver/server.go
Normal file
38
commitserver/server.go
Normal file
@@ -0,0 +1,38 @@
|
||||
package commitserver
|
||||
|
||||
import (
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/health"
|
||||
"google.golang.org/grpc/health/grpc_health_v1"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/commitserver/apiclient"
|
||||
"github.com/argoproj/argo-cd/v2/commitserver/commit"
|
||||
"github.com/argoproj/argo-cd/v2/commitserver/metrics"
|
||||
versionpkg "github.com/argoproj/argo-cd/v2/pkg/apiclient/version"
|
||||
"github.com/argoproj/argo-cd/v2/server/version"
|
||||
"github.com/argoproj/argo-cd/v2/util/git"
|
||||
)
|
||||
|
||||
// ArgoCDCommitServer is the server that handles commit requests.
|
||||
type ArgoCDCommitServer struct {
|
||||
commitService *commit.Service
|
||||
}
|
||||
|
||||
// NewServer returns a new instance of the commit server.
|
||||
func NewServer(gitCredsStore git.CredsStore, metricsServer *metrics.Server) *ArgoCDCommitServer {
|
||||
return &ArgoCDCommitServer{commitService: commit.NewService(gitCredsStore, metricsServer)}
|
||||
}
|
||||
|
||||
// CreateGRPC creates a new gRPC server.
|
||||
func (a *ArgoCDCommitServer) CreateGRPC() *grpc.Server {
|
||||
server := grpc.NewServer()
|
||||
versionpkg.RegisterVersionServiceServer(server, version.NewServer(nil, func() (bool, error) {
|
||||
return true, nil
|
||||
}))
|
||||
apiclient.RegisterCommitServiceServer(server, a.commitService)
|
||||
|
||||
healthService := health.NewServer()
|
||||
grpc_health_v1.RegisterHealthServer(server, healthService)
|
||||
|
||||
return server
|
||||
}
|
||||
@@ -26,6 +26,8 @@ const (
|
||||
const (
|
||||
// DefaultRepoServerAddr is the gRPC address of the Argo CD repo server
|
||||
DefaultRepoServerAddr = "argocd-repo-server:8081"
|
||||
// DefaultCommitServerAddr is the gRPC address of the Argo CD commit server
|
||||
DefaultCommitServerAddr = "argocd-commit-server:8086"
|
||||
// DefaultDexServerAddr is the HTTP address of the Dex OIDC server, which we run a reverse proxy against
|
||||
DefaultDexServerAddr = "argocd-dex-server:5556"
|
||||
// DefaultRedisAddr is the default redis address
|
||||
@@ -62,15 +64,19 @@ const (
|
||||
DefaultPortArgoCDMetrics = 8082
|
||||
DefaultPortArgoCDAPIServerMetrics = 8083
|
||||
DefaultPortRepoServerMetrics = 8084
|
||||
DefaultPortCommitServer = 8086
|
||||
DefaultPortCommitServerMetrics = 8087
|
||||
)
|
||||
|
||||
// DefaultAddressAPIServer for ArgoCD components
|
||||
const (
|
||||
DefaultAddressAdminDashboard = "localhost"
|
||||
DefaultAddressAPIServer = "0.0.0.0"
|
||||
DefaultAddressAPIServerMetrics = "0.0.0.0"
|
||||
DefaultAddressRepoServer = "0.0.0.0"
|
||||
DefaultAddressRepoServerMetrics = "0.0.0.0"
|
||||
DefaultAddressAdminDashboard = "localhost"
|
||||
DefaultAddressAPIServer = "0.0.0.0"
|
||||
DefaultAddressAPIServerMetrics = "0.0.0.0"
|
||||
DefaultAddressRepoServer = "0.0.0.0"
|
||||
DefaultAddressRepoServerMetrics = "0.0.0.0"
|
||||
DefaultAddressCommitServer = "0.0.0.0"
|
||||
DefaultAddressCommitServerMetrics = "0.0.0.0"
|
||||
)
|
||||
|
||||
// Default paths on the pod's file system
|
||||
@@ -175,6 +181,8 @@ const (
|
||||
LabelValueSecretTypeRepository = "repository"
|
||||
// LabelValueSecretTypeRepoCreds indicates a secret type of repository credentials
|
||||
LabelValueSecretTypeRepoCreds = "repo-creds"
|
||||
// LabelValueSecretTypeRepositoryWrite indicates a secret type of repository credentials for writing
|
||||
LabelValueSecretTypeRepositoryWrite = "repository-write"
|
||||
// LabelValueSecretTypeSCMCreds indicates a secret type of SCM credentials
|
||||
LabelValueSecretTypeSCMCreds = "scm-creds"
|
||||
|
||||
|
||||
@@ -92,7 +92,7 @@ func TestSetOptionalRedisPasswordFromKubeConfig(t *testing.T) {
|
||||
t.Parallel()
|
||||
var (
|
||||
ctx = context.TODO()
|
||||
kubeClient = kubefake.NewSimpleClientset()
|
||||
kubeClient = kubefake.NewClientset()
|
||||
redisOptions = &redis.Options{}
|
||||
)
|
||||
if tc.secret != nil {
|
||||
|
||||
@@ -41,9 +41,12 @@ import (
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
commitclient "github.com/argoproj/argo-cd/v2/commitserver/apiclient"
|
||||
"github.com/argoproj/argo-cd/v2/common"
|
||||
statecache "github.com/argoproj/argo-cd/v2/controller/cache"
|
||||
"github.com/argoproj/argo-cd/v2/controller/hydrator"
|
||||
"github.com/argoproj/argo-cd/v2/controller/metrics"
|
||||
"github.com/argoproj/argo-cd/v2/controller/sharding"
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application"
|
||||
@@ -121,6 +124,8 @@ type ApplicationController struct {
|
||||
appComparisonTypeRefreshQueue workqueue.TypedRateLimitingInterface[string]
|
||||
appOperationQueue workqueue.TypedRateLimitingInterface[string]
|
||||
projectRefreshQueue workqueue.TypedRateLimitingInterface[string]
|
||||
appHydrateQueue workqueue.TypedRateLimitingInterface[string]
|
||||
hydrationQueue workqueue.TypedRateLimitingInterface[hydrator.HydrationQueueKey]
|
||||
appInformer cache.SharedIndexInformer
|
||||
appLister applisters.ApplicationLister
|
||||
projInformer cache.SharedIndexInformer
|
||||
@@ -131,6 +136,7 @@ type ApplicationController struct {
|
||||
statusRefreshJitter time.Duration
|
||||
selfHealTimeout time.Duration
|
||||
selfHealBackOff *wait.Backoff
|
||||
selfHealBackoffCooldown time.Duration
|
||||
syncTimeout time.Duration
|
||||
db db.ArgoDB
|
||||
settingsMgr *settings_util.SettingsManager
|
||||
@@ -146,6 +152,8 @@ type ApplicationController struct {
|
||||
// dynamicClusterDistributionEnabled if disabled deploymentInformer is never initialized
|
||||
dynamicClusterDistributionEnabled bool
|
||||
deploymentInformer informerv1.DeploymentInformer
|
||||
|
||||
hydrator *hydrator.Hydrator
|
||||
}
|
||||
|
||||
// NewApplicationController creates new instance of ApplicationController.
|
||||
@@ -155,6 +163,7 @@ func NewApplicationController(
|
||||
kubeClientset kubernetes.Interface,
|
||||
applicationClientset appclientset.Interface,
|
||||
repoClientset apiclient.Clientset,
|
||||
commitClientset commitclient.Clientset,
|
||||
argoCache *appstatecache.Cache,
|
||||
kubectl kube.Kubectl,
|
||||
appResyncPeriod time.Duration,
|
||||
@@ -162,6 +171,7 @@ func NewApplicationController(
|
||||
appResyncJitter time.Duration,
|
||||
selfHealTimeout time.Duration,
|
||||
selfHealBackoff *wait.Backoff,
|
||||
selfHealBackoffCooldown time.Duration,
|
||||
syncTimeout time.Duration,
|
||||
repoErrorGracePeriod time.Duration,
|
||||
metricsPort int,
|
||||
@@ -177,6 +187,7 @@ func NewApplicationController(
|
||||
dynamicClusterDistributionEnabled bool,
|
||||
ignoreNormalizerOpts normalizers.IgnoreNormalizerOpts,
|
||||
enableK8sEvent []string,
|
||||
hydratorEnabled bool,
|
||||
) (*ApplicationController, error) {
|
||||
log.Infof("appResyncPeriod=%v, appHardResyncPeriod=%v, appResyncJitter=%v", appResyncPeriod, appHardResyncPeriod, appResyncJitter)
|
||||
db := db.NewDB(namespace, settingsMgr, kubeClientset)
|
||||
@@ -190,10 +201,12 @@ func NewApplicationController(
|
||||
kubeClientset: kubeClientset,
|
||||
kubectl: kubectl,
|
||||
applicationClientset: applicationClientset,
|
||||
appRefreshQueue: workqueue.NewTypedRateLimitingQueueWithConfig(ratelimiter.NewCustomAppControllerRateLimiter(rateLimiterConfig), workqueue.TypedRateLimitingQueueConfig[string]{Name: "app_reconciliation_queue"}),
|
||||
appOperationQueue: workqueue.NewTypedRateLimitingQueueWithConfig(ratelimiter.NewCustomAppControllerRateLimiter(rateLimiterConfig), workqueue.TypedRateLimitingQueueConfig[string]{Name: "app_operation_processing_queue"}),
|
||||
projectRefreshQueue: workqueue.NewTypedRateLimitingQueueWithConfig(ratelimiter.NewCustomAppControllerRateLimiter(rateLimiterConfig), workqueue.TypedRateLimitingQueueConfig[string]{Name: "project_reconciliation_queue"}),
|
||||
appComparisonTypeRefreshQueue: workqueue.NewTypedRateLimitingQueue(ratelimiter.NewCustomAppControllerRateLimiter(rateLimiterConfig)),
|
||||
appRefreshQueue: workqueue.NewTypedRateLimitingQueueWithConfig(ratelimiter.NewCustomAppControllerRateLimiter[string](rateLimiterConfig), workqueue.TypedRateLimitingQueueConfig[string]{Name: "app_reconciliation_queue"}),
|
||||
appOperationQueue: workqueue.NewTypedRateLimitingQueueWithConfig(ratelimiter.NewCustomAppControllerRateLimiter[string](rateLimiterConfig), workqueue.TypedRateLimitingQueueConfig[string]{Name: "app_operation_processing_queue"}),
|
||||
projectRefreshQueue: workqueue.NewTypedRateLimitingQueueWithConfig(ratelimiter.NewCustomAppControllerRateLimiter[string](rateLimiterConfig), workqueue.TypedRateLimitingQueueConfig[string]{Name: "project_reconciliation_queue"}),
|
||||
appComparisonTypeRefreshQueue: workqueue.NewTypedRateLimitingQueue(ratelimiter.NewCustomAppControllerRateLimiter[string](rateLimiterConfig)),
|
||||
appHydrateQueue: workqueue.NewTypedRateLimitingQueueWithConfig(ratelimiter.NewCustomAppControllerRateLimiter[string](rateLimiterConfig), workqueue.TypedRateLimitingQueueConfig[string]{Name: "app_hydration_queue"}),
|
||||
hydrationQueue: workqueue.NewTypedRateLimitingQueueWithConfig(ratelimiter.NewCustomAppControllerRateLimiter[hydrator.HydrationQueueKey](rateLimiterConfig), workqueue.TypedRateLimitingQueueConfig[hydrator.HydrationQueueKey]{Name: "manifest_hydration_queue"}),
|
||||
db: db,
|
||||
statusRefreshTimeout: appResyncPeriod,
|
||||
statusHardRefreshTimeout: appHardResyncPeriod,
|
||||
@@ -204,6 +217,7 @@ func NewApplicationController(
|
||||
settingsMgr: settingsMgr,
|
||||
selfHealTimeout: selfHealTimeout,
|
||||
selfHealBackOff: selfHealBackoff,
|
||||
selfHealBackoffCooldown: selfHealBackoffCooldown,
|
||||
syncTimeout: syncTimeout,
|
||||
clusterSharding: clusterSharding,
|
||||
projByNameCache: sync.Map{},
|
||||
@@ -211,6 +225,9 @@ func NewApplicationController(
|
||||
dynamicClusterDistributionEnabled: dynamicClusterDistributionEnabled,
|
||||
ignoreNormalizerOpts: ignoreNormalizerOpts,
|
||||
}
|
||||
if hydratorEnabled {
|
||||
ctrl.hydrator = hydrator.NewHydrator(&ctrl, appResyncPeriod, commitClientset)
|
||||
}
|
||||
if kubectlParallelismLimit > 0 {
|
||||
ctrl.kubectlSemaphore = semaphore.NewWeighted(kubectlParallelismLimit)
|
||||
}
|
||||
@@ -296,7 +313,7 @@ func NewApplicationController(
|
||||
}
|
||||
}
|
||||
stateCache := statecache.NewLiveStateCache(db, appInformer, ctrl.settingsMgr, kubectl, ctrl.metricsServer, ctrl.handleObjectUpdated, clusterSharding, argo.NewResourceTracking())
|
||||
appStateManager := NewAppStateManager(db, applicationClientset, repoClientset, namespace, kubectl, ctrl.settingsMgr, stateCache, projInformer, ctrl.metricsServer, argoCache, ctrl.statusRefreshTimeout, argo.NewResourceTracking(), persistResourceHealth, repoErrorGracePeriod, serverSideDiff, ignoreNormalizerOpts)
|
||||
appStateManager := NewAppStateManager(db, applicationClientset, repoClientset, namespace, kubectl, ctrl.onKubectlRun, ctrl.settingsMgr, stateCache, projInformer, ctrl.metricsServer, argoCache, ctrl.statusRefreshTimeout, argo.NewResourceTracking(), persistResourceHealth, repoErrorGracePeriod, serverSideDiff, ignoreNormalizerOpts)
|
||||
ctrl.appInformer = appInformer
|
||||
ctrl.appLister = appLister
|
||||
ctrl.projInformer = projInformer
|
||||
@@ -845,6 +862,8 @@ func (ctrl *ApplicationController) Run(ctx context.Context, statusProcessors int
|
||||
defer ctrl.appComparisonTypeRefreshQueue.ShutDown()
|
||||
defer ctrl.appOperationQueue.ShutDown()
|
||||
defer ctrl.projectRefreshQueue.ShutDown()
|
||||
defer ctrl.appHydrateQueue.ShutDown()
|
||||
defer ctrl.hydrationQueue.ShutDown()
|
||||
|
||||
ctrl.metricsServer.RegisterClustersInfoSource(ctx, ctrl.stateCache)
|
||||
ctrl.RegisterClusterSecretUpdater(ctx)
|
||||
@@ -903,6 +922,19 @@ func (ctrl *ApplicationController) Run(ctx context.Context, statusProcessors int
|
||||
for ctrl.processProjectQueueItem() {
|
||||
}
|
||||
}, time.Second, ctx.Done())
|
||||
|
||||
if ctrl.hydrator != nil {
|
||||
go wait.Until(func() {
|
||||
for ctrl.processAppHydrateQueueItem() {
|
||||
}
|
||||
}, time.Second, ctx.Done())
|
||||
|
||||
go wait.Until(func() {
|
||||
for ctrl.processHydrationQueueItem() {
|
||||
}
|
||||
}, time.Second, ctx.Done())
|
||||
}
|
||||
|
||||
<-ctx.Done()
|
||||
}
|
||||
|
||||
@@ -912,7 +944,7 @@ func (ctrl *ApplicationController) requestAppRefresh(appName string, compareWith
|
||||
key := ctrl.toAppKey(appName)
|
||||
|
||||
if compareWith != nil && after != nil {
|
||||
ctrl.appComparisonTypeRefreshQueue.AddAfter(fmt.Sprintf("%s/%d", key, compareWith), *after)
|
||||
ctrl.appComparisonTypeRefreshQueue.AddAfter(fmt.Sprintf("%s/%d", key, *compareWith), *after)
|
||||
} else {
|
||||
if compareWith != nil {
|
||||
ctrl.refreshRequestedAppsMutex.Lock()
|
||||
@@ -1165,7 +1197,7 @@ func (ctrl *ApplicationController) finalizeApplicationDeletion(app *appv1.Applic
|
||||
isValid, cluster := ctrl.isValidDestination(app)
|
||||
if !isValid {
|
||||
app.UnSetCascadedDeletion()
|
||||
app.UnSetPostDeleteFinalizer()
|
||||
app.UnSetPostDeleteFinalizerAll()
|
||||
if err := ctrl.updateFinalizers(app); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1444,7 +1476,7 @@ func (ctrl *ApplicationController) processRequestedAppOperation(app *appv1.Appli
|
||||
} else {
|
||||
state.Phase = synccommon.OperationRunning
|
||||
state.RetryCount++
|
||||
state.Message = fmt.Sprintf("%s. Retrying attempt #%d at %s.", state.Message, state.RetryCount, retryAt.Format(time.Kitchen))
|
||||
state.Message = fmt.Sprintf("%s due to application controller sync timeout. Retrying attempt #%d at %s.", state.Message, state.RetryCount, retryAt.Format(time.Kitchen))
|
||||
}
|
||||
} else if state.RetryCount > 0 {
|
||||
state.Message = fmt.Sprintf("%s (retried %d times).", state.Message, state.RetryCount)
|
||||
@@ -1646,11 +1678,9 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo
|
||||
|
||||
project, hasErrors := ctrl.refreshAppConditions(app)
|
||||
ts.AddCheckpoint("refresh_app_conditions_ms")
|
||||
now := metav1.Now()
|
||||
if hasErrors {
|
||||
app.Status.Sync.Status = appv1.SyncStatusCodeUnknown
|
||||
app.Status.Health.Status = health.HealthStatusUnknown
|
||||
app.Status.Health.LastTransitionTime = &now
|
||||
patchMs = ctrl.persistAppStatus(origApp, &app.Status)
|
||||
|
||||
if err := ctrl.cache.SetAppResourcesTree(app.InstanceName(ctrl.namespace), &appv1.ApplicationTree{}); err != nil {
|
||||
@@ -1741,6 +1771,7 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo
|
||||
ts.AddCheckpoint("auto_sync_ms")
|
||||
|
||||
if app.Status.ReconciledAt == nil || comparisonLevel >= CompareWithLatest {
|
||||
now := metav1.Now()
|
||||
app.Status.ReconciledAt = &now
|
||||
}
|
||||
app.Status.Sync = *compareResult.syncStatus
|
||||
@@ -1774,6 +1805,68 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo
|
||||
return
|
||||
}
|
||||
|
||||
func (ctrl *ApplicationController) processAppHydrateQueueItem() (processNext bool) {
|
||||
appKey, shutdown := ctrl.appHydrateQueue.Get()
|
||||
if shutdown {
|
||||
processNext = false
|
||||
return
|
||||
}
|
||||
processNext = true
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
log.Errorf("Recovered from panic: %+v\n%s", r, debug.Stack())
|
||||
}
|
||||
ctrl.appHydrateQueue.Done(appKey)
|
||||
}()
|
||||
obj, exists, err := ctrl.appInformer.GetIndexer().GetByKey(appKey)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to get application '%s' from informer index: %+v", appKey, err)
|
||||
return
|
||||
}
|
||||
if !exists {
|
||||
// This happens after app was deleted, but the work queue still had an entry for it.
|
||||
return
|
||||
}
|
||||
origApp, ok := obj.(*appv1.Application)
|
||||
if !ok {
|
||||
log.Warnf("Key '%s' in index is not an application", appKey)
|
||||
return
|
||||
}
|
||||
|
||||
ctrl.hydrator.ProcessAppHydrateQueueItem(origApp)
|
||||
|
||||
getAppLog(origApp).Debug("Successfully processed app hydrate queue item")
|
||||
return
|
||||
}
|
||||
|
||||
func (ctrl *ApplicationController) processHydrationQueueItem() (processNext bool) {
|
||||
hydrationKey, shutdown := ctrl.hydrationQueue.Get()
|
||||
if shutdown {
|
||||
processNext = false
|
||||
return
|
||||
}
|
||||
processNext = true
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
log.Errorf("Recovered from panic: %+v\n%s", r, debug.Stack())
|
||||
}
|
||||
ctrl.hydrationQueue.Done(hydrationKey)
|
||||
}()
|
||||
|
||||
logCtx := log.WithFields(log.Fields{
|
||||
"sourceRepoURL": hydrationKey.SourceRepoURL,
|
||||
"sourceTargetRevision": hydrationKey.SourceTargetRevision,
|
||||
"destinationBranch": hydrationKey.DestinationBranch,
|
||||
})
|
||||
|
||||
logCtx.Debug("Processing hydration queue item")
|
||||
|
||||
ctrl.hydrator.ProcessHydrationQueueItem(hydrationKey)
|
||||
|
||||
logCtx.Debug("Successfully processed hydration queue item")
|
||||
return
|
||||
}
|
||||
|
||||
func resourceStatusKey(res appv1.ResourceStatus) string {
|
||||
return strings.Join([]string{res.Group, res.Kind, res.Namespace, res.Name}, "/")
|
||||
}
|
||||
@@ -1782,7 +1875,8 @@ func currentSourceEqualsSyncedSource(app *appv1.Application) bool {
|
||||
if app.Spec.HasMultipleSources() {
|
||||
return app.Spec.Sources.Equals(app.Status.Sync.ComparedTo.Sources)
|
||||
}
|
||||
return app.Spec.Source.Equals(&app.Status.Sync.ComparedTo.Source)
|
||||
source := app.Spec.GetSource()
|
||||
return source.Equals(&app.Status.Sync.ComparedTo.Source)
|
||||
}
|
||||
|
||||
// needRefreshAppStatus answers if application status needs to be refreshed.
|
||||
@@ -1908,9 +2002,15 @@ func (ctrl *ApplicationController) persistAppStatus(orig *appv1.Application, new
|
||||
ctrl.logAppEvent(orig, argo.EventInfo{Reason: argo.EventReasonResourceUpdated, Type: v1.EventTypeNormal}, message, context.TODO())
|
||||
}
|
||||
if orig.Status.Health.Status != newStatus.Health.Status {
|
||||
now := metav1.Now()
|
||||
newStatus.Health.LastTransitionTime = &now
|
||||
message := fmt.Sprintf("Updated health status: %s -> %s", orig.Status.Health.Status, newStatus.Health.Status)
|
||||
ctrl.logAppEvent(orig, argo.EventInfo{Reason: argo.EventReasonResourceUpdated, Type: v1.EventTypeNormal}, message, context.TODO())
|
||||
} else {
|
||||
// make sure the last transition time is the same and populated if the health is the same
|
||||
newStatus.Health.LastTransitionTime = orig.Status.Health.LastTransitionTime
|
||||
}
|
||||
|
||||
var newAnnotations map[string]string
|
||||
if orig.GetAnnotations() != nil {
|
||||
newAnnotations = make(map[string]string)
|
||||
@@ -1918,6 +2018,7 @@ func (ctrl *ApplicationController) persistAppStatus(orig *appv1.Application, new
|
||||
newAnnotations[k] = v
|
||||
}
|
||||
delete(newAnnotations, appv1.AnnotationKeyRefresh)
|
||||
delete(newAnnotations, appv1.AnnotationKeyHydrate)
|
||||
}
|
||||
patch, modified, err := createMergePatch(
|
||||
&appv1.Application{ObjectMeta: metav1.ObjectMeta{Annotations: orig.GetAnnotations()}, Status: orig.Status},
|
||||
@@ -2011,9 +2112,7 @@ func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus *
|
||||
InitiatedBy: appv1.OperationInitiator{Automated: true},
|
||||
Retry: appv1.RetryStrategy{Limit: 5},
|
||||
}
|
||||
if app.Status.OperationState != nil && app.Status.OperationState.Operation.Sync != nil {
|
||||
op.Sync.SelfHealAttemptsCount = app.Status.OperationState.Operation.Sync.SelfHealAttemptsCount
|
||||
}
|
||||
|
||||
if app.Spec.SyncPolicy.Retry != nil {
|
||||
op.Retry = *app.Spec.SyncPolicy.Retry
|
||||
}
|
||||
@@ -2029,8 +2128,18 @@ func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus *
|
||||
}
|
||||
logCtx.Infof("Skipping auto-sync: most recent sync already to %s", desiredCommitSHA)
|
||||
return nil, 0
|
||||
} else if alreadyAttempted && selfHeal {
|
||||
if shouldSelfHeal, retryAfter := ctrl.shouldSelfHeal(app); shouldSelfHeal {
|
||||
} else if selfHeal {
|
||||
shouldSelfHeal, retryAfter := ctrl.shouldSelfHeal(app, alreadyAttempted)
|
||||
if app.Status.OperationState != nil && app.Status.OperationState.Operation.Sync != nil {
|
||||
op.Sync.SelfHealAttemptsCount = app.Status.OperationState.Operation.Sync.SelfHealAttemptsCount
|
||||
}
|
||||
|
||||
if alreadyAttempted {
|
||||
if !shouldSelfHeal {
|
||||
logCtx.Infof("Skipping auto-sync: already attempted sync to %s with timeout %v (retrying in %v)", desiredCommitSHA, ctrl.selfHealTimeout, retryAfter)
|
||||
ctrl.requestAppRefresh(app.QualifiedName(), CompareWithLatest.Pointer(), &retryAfter)
|
||||
return nil, 0
|
||||
}
|
||||
op.Sync.SelfHealAttemptsCount++
|
||||
for _, resource := range resources {
|
||||
if resource.Status != appv1.SyncStatusCodeSynced {
|
||||
@@ -2041,10 +2150,6 @@ func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus *
|
||||
})
|
||||
}
|
||||
}
|
||||
} else {
|
||||
logCtx.Infof("Skipping auto-sync: already attempted sync to %s with timeout %v (retrying in %v)", desiredCommitSHA, ctrl.selfHealTimeout, retryAfter)
|
||||
ctrl.requestAppRefresh(app.QualifiedName(), CompareWithLatest.Pointer(), &retryAfter)
|
||||
return nil, 0
|
||||
}
|
||||
}
|
||||
ts.AddCheckpoint("already_attempted_check_ms")
|
||||
@@ -2128,29 +2233,41 @@ func alreadyAttemptedSync(app *appv1.Application, commitSHA string, commitSHAsMS
|
||||
}
|
||||
}
|
||||
|
||||
func (ctrl *ApplicationController) shouldSelfHeal(app *appv1.Application) (bool, time.Duration) {
|
||||
func (ctrl *ApplicationController) shouldSelfHeal(app *appv1.Application, alreadyAttempted bool) (bool, time.Duration) {
|
||||
if app.Status.OperationState == nil {
|
||||
return true, time.Duration(0)
|
||||
}
|
||||
|
||||
var timeSinceOperation *time.Duration
|
||||
if app.Status.OperationState.FinishedAt != nil {
|
||||
timeSinceOperation = ptr.To(time.Since(app.Status.OperationState.FinishedAt.Time))
|
||||
}
|
||||
|
||||
// Reset counter if the prior sync was successful and the cooldown period is over OR if the revision has changed
|
||||
if !alreadyAttempted || (timeSinceOperation != nil && *timeSinceOperation >= ctrl.selfHealBackoffCooldown && app.Status.Sync.Status == appv1.SyncStatusCodeSynced) {
|
||||
app.Status.OperationState.Operation.Sync.SelfHealAttemptsCount = 0
|
||||
}
|
||||
|
||||
var retryAfter time.Duration
|
||||
if ctrl.selfHealBackOff == nil {
|
||||
if app.Status.OperationState.FinishedAt == nil {
|
||||
if timeSinceOperation == nil {
|
||||
retryAfter = ctrl.selfHealTimeout
|
||||
} else {
|
||||
retryAfter = ctrl.selfHealTimeout - time.Since(app.Status.OperationState.FinishedAt.Time)
|
||||
retryAfter = ctrl.selfHealTimeout - *timeSinceOperation
|
||||
}
|
||||
} else {
|
||||
backOff := *ctrl.selfHealBackOff
|
||||
backOff.Steps = int(app.Status.OperationState.Operation.Sync.SelfHealAttemptsCount)
|
||||
var delay time.Duration
|
||||
for backOff.Steps > 0 {
|
||||
steps := backOff.Steps
|
||||
for i := 0; i < steps; i++ {
|
||||
delay = backOff.Step()
|
||||
}
|
||||
if app.Status.OperationState.FinishedAt == nil {
|
||||
|
||||
if timeSinceOperation == nil {
|
||||
retryAfter = delay
|
||||
} else {
|
||||
retryAfter = delay - time.Since(app.Status.OperationState.FinishedAt.Time)
|
||||
retryAfter = delay - *timeSinceOperation
|
||||
}
|
||||
}
|
||||
return retryAfter <= 0, retryAfter
|
||||
@@ -2325,6 +2442,9 @@ func (ctrl *ApplicationController) newApplicationInformerAndLister() (cache.Shar
|
||||
if !newOK || (delay != nil && *delay != time.Duration(0)) {
|
||||
ctrl.appOperationQueue.AddRateLimited(key)
|
||||
}
|
||||
if ctrl.hydrator != nil {
|
||||
ctrl.appHydrateQueue.AddRateLimited(newApp.QualifiedName())
|
||||
}
|
||||
ctrl.clusterSharding.UpdateApp(newApp)
|
||||
},
|
||||
DeleteFunc: func(obj interface{}) {
|
||||
|
||||
@@ -42,6 +42,7 @@ import (
|
||||
|
||||
dbmocks "github.com/argoproj/argo-cd/v2/util/db/mocks"
|
||||
|
||||
mockcommitclient "github.com/argoproj/argo-cd/v2/commitserver/apiclient/mocks"
|
||||
mockstatecache "github.com/argoproj/argo-cd/v2/controller/cache/mocks"
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
appclientset "github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned/fake"
|
||||
@@ -126,6 +127,8 @@ func newFakeControllerWithResync(data *fakeData, appResyncPeriod time.Duration,
|
||||
|
||||
mockRepoClientset := mockrepoclient.Clientset{RepoServerServiceClient: &mockRepoClient}
|
||||
|
||||
mockCommitClientset := mockcommitclient.Clientset{}
|
||||
|
||||
secret := corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "argocd-secret",
|
||||
@@ -148,7 +151,7 @@ func newFakeControllerWithResync(data *fakeData, appResyncPeriod time.Duration,
|
||||
}
|
||||
runtimeObjs := []runtime.Object{&clust, &secret, &cm}
|
||||
runtimeObjs = append(runtimeObjs, data.additionalObjs...)
|
||||
kubeClient := fake.NewSimpleClientset(runtimeObjs...)
|
||||
kubeClient := fake.NewClientset(runtimeObjs...)
|
||||
settingsMgr := settings.NewSettingsManager(context.Background(), kubeClient, test.FakeArgoCDNamespace)
|
||||
kubectl := &MockKubectl{Kubectl: &kubetest.MockKubectlCmd{}}
|
||||
ctrl, err := NewApplicationController(
|
||||
@@ -157,6 +160,7 @@ func newFakeControllerWithResync(data *fakeData, appResyncPeriod time.Duration,
|
||||
kubeClient,
|
||||
appclientset.NewSimpleClientset(data.apps...),
|
||||
&mockRepoClientset,
|
||||
&mockCommitClientset,
|
||||
appstatecache.NewCache(
|
||||
cacheutil.NewCache(cacheutil.NewInMemoryCache(1*time.Minute)),
|
||||
1*time.Minute,
|
||||
@@ -167,6 +171,7 @@ func newFakeControllerWithResync(data *fakeData, appResyncPeriod time.Duration,
|
||||
time.Second,
|
||||
time.Minute,
|
||||
nil,
|
||||
time.Minute,
|
||||
0,
|
||||
time.Second*10,
|
||||
common.DefaultPortArgoCDMetrics,
|
||||
@@ -182,6 +187,7 @@ func newFakeControllerWithResync(data *fakeData, appResyncPeriod time.Duration,
|
||||
false,
|
||||
normalizers.IgnoreNormalizerOpts{},
|
||||
testEnableEventList,
|
||||
false,
|
||||
)
|
||||
db := &dbmocks.ArgoDB{}
|
||||
db.On("GetApplicationControllerReplicas").Return(1)
|
||||
@@ -1404,6 +1410,25 @@ func TestNeedRefreshAppStatus(t *testing.T) {
|
||||
assert.Equal(t, CompareWithRecent, compareWith)
|
||||
})
|
||||
|
||||
t.Run("requesting refresh with delay gives correct compression level", func(t *testing.T) {
|
||||
needRefresh, _, _ := ctrl.needRefreshAppStatus(app, 1*time.Hour, 2*time.Hour)
|
||||
assert.False(t, needRefresh)
|
||||
|
||||
// use a one-off controller so other tests don't have a manual refresh request
|
||||
ctrl := newFakeController(&fakeData{apps: []runtime.Object{}}, nil)
|
||||
|
||||
// refresh app with a non-nil delay
|
||||
// use zero-second delay to test the add later logic without waiting in the test
|
||||
delay := time.Duration(0)
|
||||
ctrl.requestAppRefresh(app.Name, CompareWithRecent.Pointer(), &delay)
|
||||
|
||||
ctrl.processAppComparisonTypeQueueItem()
|
||||
needRefresh, refreshType, compareWith := ctrl.needRefreshAppStatus(app, 1*time.Hour, 2*time.Hour)
|
||||
assert.True(t, needRefresh)
|
||||
assert.Equal(t, v1alpha1.RefreshTypeNormal, refreshType)
|
||||
assert.Equal(t, CompareWithRecent, compareWith)
|
||||
})
|
||||
|
||||
t.Run("refresh application which status is not reconciled using latest commit", func(t *testing.T) {
|
||||
app := app.DeepCopy()
|
||||
needRefresh, _, _ := ctrl.needRefreshAppStatus(app, 1*time.Hour, 2*time.Hour)
|
||||
@@ -1813,7 +1838,7 @@ apps/Deployment:
|
||||
hs = {}
|
||||
hs.status = ""
|
||||
hs.message = ""
|
||||
|
||||
|
||||
if obj.metadata ~= nil then
|
||||
if obj.metadata.labels ~= nil then
|
||||
current_status = obj.metadata.labels["status"]
|
||||
@@ -2037,7 +2062,7 @@ func TestProcessRequestedAppOperation_FailedHasRetries(t *testing.T) {
|
||||
phase, _, _ := unstructured.NestedString(receivedPatch, "status", "operationState", "phase")
|
||||
assert.Equal(t, string(synccommon.OperationRunning), phase)
|
||||
message, _, _ := unstructured.NestedString(receivedPatch, "status", "operationState", "message")
|
||||
assert.Contains(t, message, "Retrying attempt #1")
|
||||
assert.Contains(t, message, "due to application controller sync timeout. Retrying attempt #1")
|
||||
retryCount, _, _ := unstructured.NestedFloat64(receivedPatch, "status", "operationState", "retryCount")
|
||||
assert.InEpsilon(t, float64(1), retryCount, 0.0001)
|
||||
}
|
||||
@@ -2509,7 +2534,7 @@ func TestSelfHealExponentialBackoff(t *testing.T) {
|
||||
ctrl.selfHealBackOff = &wait.Backoff{
|
||||
Factor: 3,
|
||||
Duration: 2 * time.Second,
|
||||
Cap: 5 * time.Minute,
|
||||
Cap: 2 * time.Minute,
|
||||
}
|
||||
|
||||
app := &v1alpha1.Application{
|
||||
@@ -2524,29 +2549,92 @@ func TestSelfHealExponentialBackoff(t *testing.T) {
|
||||
|
||||
testCases := []struct {
|
||||
attempts int64
|
||||
expectedAttempts int64
|
||||
finishedAt *metav1.Time
|
||||
expectedDuration time.Duration
|
||||
shouldSelfHeal bool
|
||||
alreadyAttempted bool
|
||||
syncStatus v1alpha1.SyncStatusCode
|
||||
}{{
|
||||
attempts: 0,
|
||||
finishedAt: ptr.To(metav1.Now()),
|
||||
expectedDuration: 0,
|
||||
shouldSelfHeal: true,
|
||||
alreadyAttempted: true,
|
||||
expectedAttempts: 0,
|
||||
syncStatus: v1alpha1.SyncStatusCodeOutOfSync,
|
||||
}, {
|
||||
attempts: 1,
|
||||
finishedAt: ptr.To(metav1.Now()),
|
||||
expectedDuration: 2 * time.Second,
|
||||
shouldSelfHeal: false,
|
||||
alreadyAttempted: true,
|
||||
expectedAttempts: 1,
|
||||
syncStatus: v1alpha1.SyncStatusCodeOutOfSync,
|
||||
}, {
|
||||
attempts: 2,
|
||||
finishedAt: ptr.To(metav1.Now()),
|
||||
expectedDuration: 6 * time.Second,
|
||||
shouldSelfHeal: false,
|
||||
alreadyAttempted: true,
|
||||
expectedAttempts: 2,
|
||||
syncStatus: v1alpha1.SyncStatusCodeOutOfSync,
|
||||
}, {
|
||||
attempts: 3,
|
||||
finishedAt: nil,
|
||||
expectedDuration: 18 * time.Second,
|
||||
shouldSelfHeal: false,
|
||||
alreadyAttempted: true,
|
||||
expectedAttempts: 3,
|
||||
syncStatus: v1alpha1.SyncStatusCodeOutOfSync,
|
||||
}, {
|
||||
attempts: 4,
|
||||
finishedAt: nil,
|
||||
expectedDuration: 54 * time.Second,
|
||||
shouldSelfHeal: false,
|
||||
alreadyAttempted: true,
|
||||
expectedAttempts: 4,
|
||||
syncStatus: v1alpha1.SyncStatusCodeOutOfSync,
|
||||
}, {
|
||||
attempts: 5,
|
||||
finishedAt: nil,
|
||||
expectedDuration: 120 * time.Second,
|
||||
shouldSelfHeal: false,
|
||||
alreadyAttempted: true,
|
||||
expectedAttempts: 5,
|
||||
syncStatus: v1alpha1.SyncStatusCodeOutOfSync,
|
||||
}, {
|
||||
attempts: 6,
|
||||
finishedAt: nil,
|
||||
expectedDuration: 120 * time.Second,
|
||||
shouldSelfHeal: false,
|
||||
alreadyAttempted: true,
|
||||
expectedAttempts: 6,
|
||||
syncStatus: v1alpha1.SyncStatusCodeOutOfSync,
|
||||
}, {
|
||||
attempts: 6,
|
||||
finishedAt: nil,
|
||||
expectedDuration: 0,
|
||||
shouldSelfHeal: true,
|
||||
alreadyAttempted: false,
|
||||
expectedAttempts: 0,
|
||||
syncStatus: v1alpha1.SyncStatusCodeOutOfSync,
|
||||
}, { // backoff will not reset as finished tme isn't >= cooldown
|
||||
attempts: 6,
|
||||
finishedAt: ptr.To(metav1.Now()),
|
||||
expectedDuration: 120 * time.Second,
|
||||
shouldSelfHeal: false,
|
||||
alreadyAttempted: true,
|
||||
expectedAttempts: 6,
|
||||
syncStatus: v1alpha1.SyncStatusCodeSynced,
|
||||
}, { // backoff will reset as finished time is >= cooldown
|
||||
attempts: 40,
|
||||
finishedAt: &metav1.Time{Time: time.Now().Add(-(1 * time.Minute))},
|
||||
expectedDuration: -60 * time.Second,
|
||||
shouldSelfHeal: true,
|
||||
alreadyAttempted: true,
|
||||
expectedAttempts: 0,
|
||||
syncStatus: v1alpha1.SyncStatusCodeSynced,
|
||||
}}
|
||||
|
||||
for i := range testCases {
|
||||
@@ -2554,8 +2642,10 @@ func TestSelfHealExponentialBackoff(t *testing.T) {
|
||||
t.Run(fmt.Sprintf("test case %d", i), func(t *testing.T) {
|
||||
app.Status.OperationState.Operation.Sync.SelfHealAttemptsCount = tc.attempts
|
||||
app.Status.OperationState.FinishedAt = tc.finishedAt
|
||||
ok, duration := ctrl.shouldSelfHeal(app)
|
||||
app.Status.Sync.Status = tc.syncStatus
|
||||
ok, duration := ctrl.shouldSelfHeal(app, tc.alreadyAttempted)
|
||||
require.Equal(t, ok, tc.shouldSelfHeal)
|
||||
require.Equal(t, tc.expectedAttempts, app.Status.OperationState.Operation.Sync.SelfHealAttemptsCount)
|
||||
assertDurationAround(t, tc.expectedDuration, duration)
|
||||
})
|
||||
}
|
||||
|
||||
20
controller/cache/cache.go
vendored
20
controller/cache/cache.go
vendored
@@ -69,6 +69,12 @@ const (
|
||||
// EnvClusterCacheRetryUseBackoff is the env variable to control whether to use a backoff strategy with the retry during cluster cache sync
|
||||
EnvClusterCacheRetryUseBackoff = "ARGOCD_CLUSTER_CACHE_RETRY_USE_BACKOFF"
|
||||
|
||||
// EnvClusterCacheBatchEventsProcessing is the env variable to control whether to enable batch events processing
|
||||
EnvClusterCacheBatchEventsProcessing = "ARGOCD_CLUSTER_CACHE_BATCH_EVENTS_PROCESSING"
|
||||
|
||||
// EnvClusterCacheEventsProcessingInterval is the env variable to control the interval between processing events when BatchEventsProcessing is enabled
|
||||
EnvClusterCacheEventsProcessingInterval = "ARGOCD_CLUSTER_CACHE_EVENTS_PROCESSING_INTERVAL"
|
||||
|
||||
// AnnotationIgnoreResourceUpdates when set to true on an untracked resource,
|
||||
// argo will apply `ignoreResourceUpdates` configuration on it.
|
||||
AnnotationIgnoreResourceUpdates = "argocd.argoproj.io/ignore-resource-updates"
|
||||
@@ -103,6 +109,12 @@ var (
|
||||
|
||||
// clusterCacheRetryUseBackoff specifies whether to use a backoff strategy on cluster cache sync, if retry is enabled
|
||||
clusterCacheRetryUseBackoff bool = false
|
||||
|
||||
// clusterCacheBatchEventsProcessing specifies whether to enable batch events processing
|
||||
clusterCacheBatchEventsProcessing bool = false
|
||||
|
||||
// clusterCacheEventsProcessingInterval specifies the interval between processing events when BatchEventsProcessing is enabled
|
||||
clusterCacheEventsProcessingInterval = 100 * time.Millisecond
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -114,6 +126,8 @@ func init() {
|
||||
clusterCacheListSemaphoreSize = env.ParseInt64FromEnv(EnvClusterCacheListSemaphore, clusterCacheListSemaphoreSize, 0, math.MaxInt64)
|
||||
clusterCacheAttemptLimit = int32(env.ParseNumFromEnv(EnvClusterCacheAttemptLimit, int(clusterCacheAttemptLimit), 1, math.MaxInt32))
|
||||
clusterCacheRetryUseBackoff = env.ParseBoolFromEnv(EnvClusterCacheRetryUseBackoff, false)
|
||||
clusterCacheBatchEventsProcessing = env.ParseBoolFromEnv(EnvClusterCacheBatchEventsProcessing, false)
|
||||
clusterCacheEventsProcessingInterval = env.ParseDurationFromEnv(EnvClusterCacheEventsProcessingInterval, clusterCacheEventsProcessingInterval, 0, math.MaxInt64)
|
||||
}
|
||||
|
||||
type LiveStateCache interface {
|
||||
@@ -554,6 +568,8 @@ func (c *liveStateCache) getCluster(server string) (clustercache.ClusterCache, e
|
||||
clustercache.SetLogr(logutils.NewLogrusLogger(log.WithField("server", cluster.Server))),
|
||||
clustercache.SetRetryOptions(clusterCacheAttemptLimit, clusterCacheRetryUseBackoff, isRetryableError),
|
||||
clustercache.SetRespectRBAC(respectRBAC),
|
||||
clustercache.SetBatchEventsProcessing(clusterCacheBatchEventsProcessing),
|
||||
clustercache.SetEventProcessingInterval(clusterCacheEventsProcessingInterval),
|
||||
}
|
||||
|
||||
clusterCache = clustercache.NewClusterCache(clusterCacheConfig, clusterCacheOpts...)
|
||||
@@ -608,6 +624,10 @@ func (c *liveStateCache) getCluster(server string) (clustercache.ClusterCache, e
|
||||
c.metricsServer.IncClusterEventsCount(cluster.Server, gvk.Group, gvk.Kind)
|
||||
})
|
||||
|
||||
_ = clusterCache.OnProcessEventsHandler(func(duration time.Duration, processedEventsNumber int) {
|
||||
c.metricsServer.ObserveResourceEventsProcessingDuration(cluster.Server, duration, processedEventsNumber)
|
||||
})
|
||||
|
||||
c.clusters[server] = clusterCache
|
||||
|
||||
return clusterCache, nil
|
||||
|
||||
2
controller/cache/cache_test.go
vendored
2
controller/cache/cache_test.go
vendored
@@ -140,7 +140,7 @@ func TestHandleDeleteEvent_CacheDeadlock(t *testing.T) {
|
||||
}
|
||||
db := &dbmocks.ArgoDB{}
|
||||
db.On("GetApplicationControllerReplicas").Return(1)
|
||||
fakeClient := fake.NewSimpleClientset()
|
||||
fakeClient := fake.NewClientset()
|
||||
settingsMgr := argosettings.NewSettingsManager(context.TODO(), fakeClient, "argocd")
|
||||
liveStateCacheLock := sync.RWMutex{}
|
||||
gitopsEngineClusterCache := &mocks.ClusterCache{}
|
||||
|
||||
6
controller/cache/mocks/LiveStateCache.go
generated
vendored
6
controller/cache/mocks/LiveStateCache.go
generated
vendored
@@ -1,4 +1,4 @@
|
||||
// Code generated by mockery v2.43.2. DO NOT EDIT.
|
||||
// Code generated by mockery v2.53.4. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
@@ -55,7 +55,7 @@ func (_m *LiveStateCache) GetClusterCache(server string) (cache.ClusterCache, er
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// GetClustersInfo provides a mock function with given fields:
|
||||
// GetClustersInfo provides a mock function with no fields
|
||||
func (_m *LiveStateCache) GetClustersInfo() []cache.ClusterInfo {
|
||||
ret := _m.Called()
|
||||
|
||||
@@ -172,7 +172,7 @@ func (_m *LiveStateCache) GetVersionsInfo(serverURL string) (string, []kube.APIR
|
||||
return r0, r1, r2
|
||||
}
|
||||
|
||||
// Init provides a mock function with given fields:
|
||||
// Init provides a mock function with no fields
|
||||
func (_m *LiveStateCache) Init() error {
|
||||
ret := _m.Called()
|
||||
|
||||
|
||||
@@ -67,7 +67,7 @@ func TestClusterSecretUpdater(t *testing.T) {
|
||||
"server.secretkey": nil,
|
||||
},
|
||||
}
|
||||
kubeclientset := fake.NewSimpleClientset(emptyArgoCDConfigMap, argoCDSecret)
|
||||
kubeclientset := fake.NewClientset(emptyArgoCDConfigMap, argoCDSecret)
|
||||
appclientset := appsfake.NewSimpleClientset()
|
||||
appInformer := appinformers.NewApplicationInformer(appclientset, "", time.Minute, cache.Indexers{})
|
||||
settingsManager := settings.NewSettingsManager(context.Background(), kubeclientset, fakeNamespace)
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"github.com/argoproj/gitops-engine/pkg/sync/ignore"
|
||||
kubeutil "github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
log "github.com/sirupsen/logrus"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/common"
|
||||
@@ -22,7 +21,9 @@ func setApplicationHealth(resources []managedResource, statuses []appv1.Resource
|
||||
var savedErr error
|
||||
var errCount uint
|
||||
|
||||
appHealth := appv1.HealthStatus{Status: health.HealthStatusHealthy}
|
||||
appHealth := app.Status.Health.DeepCopy()
|
||||
appHealth.Status = health.HealthStatusHealthy
|
||||
|
||||
for i, res := range resources {
|
||||
if res.Target != nil && hookutil.Skip(res.Target) {
|
||||
continue
|
||||
@@ -82,18 +83,11 @@ func setApplicationHealth(resources []managedResource, statuses []appv1.Resource
|
||||
}
|
||||
if persistResourceHealth {
|
||||
app.Status.ResourceHealthSource = appv1.ResourceHealthLocationInline
|
||||
// if the status didn't change, don't update the timestamp
|
||||
if app.Status.Health.Status == appHealth.Status && app.Status.Health.LastTransitionTime != nil {
|
||||
appHealth.LastTransitionTime = app.Status.Health.LastTransitionTime
|
||||
} else {
|
||||
now := metav1.Now()
|
||||
appHealth.LastTransitionTime = &now
|
||||
}
|
||||
} else {
|
||||
app.Status.ResourceHealthSource = appv1.ResourceHealthLocationAppTree
|
||||
}
|
||||
if savedErr != nil && errCount > 1 {
|
||||
savedErr = fmt.Errorf("see application-controller logs for %d other errors; most recent error was: %w", errCount-1, savedErr)
|
||||
}
|
||||
return &appHealth, savedErr
|
||||
return appHealth, savedErr
|
||||
}
|
||||
|
||||
@@ -73,7 +73,6 @@ func TestSetApplicationHealth(t *testing.T) {
|
||||
assert.NotNil(t, healthStatus.LastTransitionTime)
|
||||
assert.Nil(t, resourceStatuses[0].Health.LastTransitionTime)
|
||||
assert.Nil(t, resourceStatuses[1].Health.LastTransitionTime)
|
||||
previousLastTransitionTime := healthStatus.LastTransitionTime
|
||||
app.Status.Health = *healthStatus
|
||||
|
||||
// now mark the job as a hook and retry. it should ignore the hook and consider the app healthy
|
||||
@@ -81,9 +80,8 @@ func TestSetApplicationHealth(t *testing.T) {
|
||||
healthStatus, err = setApplicationHealth(resources, resourceStatuses, nil, app, true)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, health.HealthStatusHealthy, healthStatus.Status)
|
||||
// change in health, timestamp should change
|
||||
assert.NotEqual(t, *previousLastTransitionTime, *healthStatus.LastTransitionTime)
|
||||
previousLastTransitionTime = healthStatus.LastTransitionTime
|
||||
// timestamp should be the same in case health did not change
|
||||
assert.Equal(t, app.Status.Health.LastTransitionTime, healthStatus.LastTransitionTime)
|
||||
app.Status.Health = *healthStatus
|
||||
|
||||
// now we set the `argocd.argoproj.io/ignore-healthcheck: "true"` annotation on the job's target.
|
||||
@@ -94,8 +92,7 @@ func TestSetApplicationHealth(t *testing.T) {
|
||||
healthStatus, err = setApplicationHealth(resources, resourceStatuses, nil, app, true)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, health.HealthStatusHealthy, healthStatus.Status)
|
||||
// no change in health, timestamp shouldn't change
|
||||
assert.Equal(t, *previousLastTransitionTime, *healthStatus.LastTransitionTime)
|
||||
assert.Equal(t, app.Status.Health.LastTransitionTime, healthStatus.LastTransitionTime)
|
||||
}
|
||||
|
||||
func TestSetApplicationHealth_ResourceHealthNotPersisted(t *testing.T) {
|
||||
@@ -124,7 +121,7 @@ func TestSetApplicationHealth_MissingResource(t *testing.T) {
|
||||
healthStatus, err := setApplicationHealth(resources, resourceStatuses, lua.ResourceHealthOverrides{}, app, true)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, health.HealthStatusMissing, healthStatus.Status)
|
||||
assert.False(t, healthStatus.LastTransitionTime.IsZero())
|
||||
assert.Equal(t, app.Status.Health.LastTransitionTime, healthStatus.LastTransitionTime)
|
||||
}
|
||||
|
||||
func TestSetApplicationHealth_HealthImproves(t *testing.T) {
|
||||
@@ -156,7 +153,7 @@ func TestSetApplicationHealth_HealthImproves(t *testing.T) {
|
||||
healthStatus, err := setApplicationHealth(resources, resourceStatuses, overrides, app, true)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tc.newStatus, healthStatus.Status)
|
||||
assert.NotEqual(t, testTimestamp, *healthStatus.LastTransitionTime)
|
||||
assert.Equal(t, app.Status.Health.LastTransitionTime, healthStatus.LastTransitionTime)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -173,6 +170,7 @@ func TestSetApplicationHealth_MissingResourceNoBuiltHealthCheck(t *testing.T) {
|
||||
healthStatus, err := setApplicationHealth(resources, resourceStatuses, lua.ResourceHealthOverrides{}, app, true)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, health.HealthStatusHealthy, healthStatus.Status)
|
||||
assert.Equal(t, app.Status.Health.LastTransitionTime, healthStatus.LastTransitionTime)
|
||||
assert.Equal(t, health.HealthStatusMissing, resourceStatuses[0].Health.Status)
|
||||
})
|
||||
|
||||
@@ -184,7 +182,7 @@ func TestSetApplicationHealth_MissingResourceNoBuiltHealthCheck(t *testing.T) {
|
||||
}, app, true)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, health.HealthStatusMissing, healthStatus.Status)
|
||||
assert.False(t, healthStatus.LastTransitionTime.IsZero())
|
||||
assert.Equal(t, app.Status.Health.LastTransitionTime, healthStatus.LastTransitionTime)
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -51,7 +51,7 @@ func (ctrl *ApplicationController) executePostDeleteHooks(app *v1alpha1.Applicat
|
||||
revisions = append(revisions, src.TargetRevision)
|
||||
}
|
||||
|
||||
targets, _, _, err := ctrl.appStateManager.GetRepoObjs(app, app.Spec.GetSources(), appLabelKey, revisions, false, false, false, proj, false)
|
||||
targets, _, _, err := ctrl.appStateManager.GetRepoObjs(app, app.Spec.GetSources(), appLabelKey, revisions, false, false, false, proj, false, true)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
355
controller/hydrator/hydrator.go
Normal file
355
controller/hydrator/hydrator.go
Normal file
@@ -0,0 +1,355 @@
|
||||
package hydrator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
||||
commitclient "github.com/argoproj/argo-cd/v2/commitserver/apiclient"
|
||||
"github.com/argoproj/argo-cd/v2/controller/utils"
|
||||
appv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v2/reposerver/apiclient"
|
||||
argoio "github.com/argoproj/argo-cd/v2/util/io"
|
||||
)
|
||||
|
||||
// Dependencies is the interface for the dependencies of the Hydrator. It serves two purposes: 1) it prevents the
|
||||
// hydrator from having direct access to the app controller, and 2) it allows for easy mocking of dependencies in tests.
|
||||
// If you add something here, be sure that it is something the app controller needs to provide to the hydrator.
|
||||
type Dependencies interface {
|
||||
// TODO: determine if we actually need to get the app, or if all the stuff we need the app for is done already on
|
||||
// the app controller side.
|
||||
GetProcessableAppProj(app *appv1.Application) (*appv1.AppProject, error)
|
||||
GetProcessableApps() (*appv1.ApplicationList, error)
|
||||
GetRepoObjs(app *appv1.Application, source appv1.ApplicationSource, revision string, project *appv1.AppProject) ([]*unstructured.Unstructured, *apiclient.ManifestResponse, error)
|
||||
GetWriteCredentials(ctx context.Context, repoURL string, project string) (*appv1.Repository, error)
|
||||
RequestAppRefresh(appName string, appNamespace string) error
|
||||
// TODO: only allow access to the hydrator status
|
||||
PersistAppHydratorStatus(orig *appv1.Application, newStatus *appv1.SourceHydratorStatus)
|
||||
AddHydrationQueueItem(key HydrationQueueKey)
|
||||
}
|
||||
|
||||
type Hydrator struct {
|
||||
dependencies Dependencies
|
||||
statusRefreshTimeout time.Duration
|
||||
commitClientset commitclient.Clientset
|
||||
}
|
||||
|
||||
func NewHydrator(dependencies Dependencies, statusRefreshTimeout time.Duration, commitClientset commitclient.Clientset) *Hydrator {
|
||||
return &Hydrator{
|
||||
dependencies: dependencies,
|
||||
statusRefreshTimeout: statusRefreshTimeout,
|
||||
commitClientset: commitClientset,
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Hydrator) ProcessAppHydrateQueueItem(origApp *appv1.Application) {
|
||||
origApp = origApp.DeepCopy()
|
||||
app := origApp.DeepCopy()
|
||||
|
||||
if app.Spec.SourceHydrator == nil {
|
||||
return
|
||||
}
|
||||
|
||||
logCtx := utils.GetAppLog(app)
|
||||
|
||||
logCtx.Debug("Processing app hydrate queue item")
|
||||
|
||||
// TODO: don't reuse statusRefreshTimeout. Create a new timeout for hydration.
|
||||
needsHydration, reason := appNeedsHydration(origApp, h.statusRefreshTimeout)
|
||||
if !needsHydration {
|
||||
return
|
||||
}
|
||||
|
||||
logCtx.WithField("reason", reason).Info("Hydrating app")
|
||||
|
||||
app.Status.SourceHydrator.CurrentOperation = &appv1.HydrateOperation{
|
||||
StartedAt: metav1.Now(),
|
||||
FinishedAt: nil,
|
||||
Phase: appv1.HydrateOperationPhaseHydrating,
|
||||
SourceHydrator: *app.Spec.SourceHydrator,
|
||||
}
|
||||
h.dependencies.PersistAppHydratorStatus(origApp, &app.Status.SourceHydrator)
|
||||
origApp.Status.SourceHydrator = app.Status.SourceHydrator
|
||||
h.dependencies.AddHydrationQueueItem(getHydrationQueueKey(app))
|
||||
|
||||
logCtx.Debug("Successfully processed app hydrate queue item")
|
||||
}
|
||||
|
||||
func getHydrationQueueKey(app *appv1.Application) HydrationQueueKey {
|
||||
destinationBranch := app.Spec.SourceHydrator.SyncSource.TargetBranch
|
||||
if app.Spec.SourceHydrator.HydrateTo != nil {
|
||||
destinationBranch = app.Spec.SourceHydrator.HydrateTo.TargetBranch
|
||||
}
|
||||
key := HydrationQueueKey{
|
||||
SourceRepoURL: app.Spec.SourceHydrator.DrySource.RepoURL,
|
||||
SourceTargetRevision: app.Spec.SourceHydrator.DrySource.TargetRevision,
|
||||
DestinationBranch: destinationBranch,
|
||||
}
|
||||
return key
|
||||
}
|
||||
|
||||
type HydrationQueueKey struct {
|
||||
SourceRepoURL string
|
||||
SourceTargetRevision string
|
||||
DestinationBranch string
|
||||
}
|
||||
|
||||
// uniqueHydrationDestination is used to detect duplicate hydrate destinations.
|
||||
type uniqueHydrationDestination struct {
|
||||
sourceRepoURL string
|
||||
sourceTargetRevision string
|
||||
destinationBranch string
|
||||
destinationPath string
|
||||
}
|
||||
|
||||
func (h *Hydrator) ProcessHydrationQueueItem(hydrationKey HydrationQueueKey) (processNext bool) {
|
||||
logCtx := log.WithFields(log.Fields{
|
||||
"sourceRepoURL": hydrationKey.SourceRepoURL,
|
||||
"sourceTargetRevision": hydrationKey.SourceTargetRevision,
|
||||
"destinationBranch": hydrationKey.DestinationBranch,
|
||||
})
|
||||
|
||||
relevantApps, drySHA, hydratedSHA, err := h.hydrateAppsLatestCommit(logCtx, hydrationKey)
|
||||
if drySHA != "" {
|
||||
logCtx = logCtx.WithField("drySHA", drySHA)
|
||||
}
|
||||
if err != nil {
|
||||
logCtx.WithField("appCount", len(relevantApps)).WithError(err).Error("Failed to hydrate apps")
|
||||
for _, app := range relevantApps {
|
||||
origApp := app.DeepCopy()
|
||||
app.Status.SourceHydrator.CurrentOperation.Phase = appv1.HydrateOperationPhaseFailed
|
||||
failedAt := metav1.Now()
|
||||
app.Status.SourceHydrator.CurrentOperation.FinishedAt = &failedAt
|
||||
app.Status.SourceHydrator.CurrentOperation.Message = fmt.Sprintf("Failed to hydrate revision %q: %v", drySHA, err.Error())
|
||||
// We may or may not have gotten far enough in the hydration process to get a non-empty SHA, but set it just
|
||||
// in case we did.
|
||||
app.Status.SourceHydrator.CurrentOperation.DrySHA = drySHA
|
||||
h.dependencies.PersistAppHydratorStatus(origApp, &app.Status.SourceHydrator)
|
||||
logCtx = logCtx.WithField("app", app.QualifiedName())
|
||||
logCtx.Errorf("Failed to hydrate app: %v", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
logCtx.WithField("appCount", len(relevantApps)).Debug("Successfully hydrated apps")
|
||||
finishedAt := metav1.Now()
|
||||
for _, app := range relevantApps {
|
||||
origApp := app.DeepCopy()
|
||||
operation := &appv1.HydrateOperation{
|
||||
StartedAt: app.Status.SourceHydrator.CurrentOperation.StartedAt,
|
||||
FinishedAt: &finishedAt,
|
||||
Phase: appv1.HydrateOperationPhaseHydrated,
|
||||
Message: "",
|
||||
DrySHA: drySHA,
|
||||
HydratedSHA: hydratedSHA,
|
||||
SourceHydrator: app.Status.SourceHydrator.CurrentOperation.SourceHydrator,
|
||||
}
|
||||
app.Status.SourceHydrator.CurrentOperation = operation
|
||||
app.Status.SourceHydrator.LastSuccessfulOperation = &appv1.SuccessfulHydrateOperation{
|
||||
DrySHA: drySHA,
|
||||
HydratedSHA: hydratedSHA,
|
||||
SourceHydrator: app.Status.SourceHydrator.CurrentOperation.SourceHydrator,
|
||||
}
|
||||
h.dependencies.PersistAppHydratorStatus(origApp, &app.Status.SourceHydrator)
|
||||
// Request a refresh since we pushed a new commit.
|
||||
err := h.dependencies.RequestAppRefresh(app.Name, app.Namespace)
|
||||
if err != nil {
|
||||
logCtx.WithField("app", app.QualifiedName()).WithError(err).Error("Failed to request app refresh after hydration")
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (h *Hydrator) hydrateAppsLatestCommit(logCtx *log.Entry, hydrationKey HydrationQueueKey) ([]*appv1.Application, string, string, error) {
|
||||
relevantApps, err := h.getRelevantAppsForHydration(logCtx, hydrationKey)
|
||||
if err != nil {
|
||||
return nil, "", "", fmt.Errorf("failed to get relevant apps for hydration: %w", err)
|
||||
}
|
||||
|
||||
dryRevision, hydratedRevision, err := h.hydrate(logCtx, relevantApps)
|
||||
if err != nil {
|
||||
return relevantApps, dryRevision, "", fmt.Errorf("failed to hydrate apps: %w", err)
|
||||
}
|
||||
|
||||
return relevantApps, dryRevision, hydratedRevision, nil
|
||||
}
|
||||
|
||||
func (h *Hydrator) getRelevantAppsForHydration(logCtx *log.Entry, hydrationKey HydrationQueueKey) ([]*appv1.Application, error) {
|
||||
// Get all apps
|
||||
apps, err := h.dependencies.GetProcessableApps()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list apps: %w", err)
|
||||
}
|
||||
|
||||
var relevantApps []*appv1.Application
|
||||
uniqueDestinations := make(map[uniqueHydrationDestination]bool, len(apps.Items))
|
||||
for _, app := range apps.Items {
|
||||
if app.Spec.SourceHydrator == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if app.Spec.SourceHydrator.DrySource.RepoURL != hydrationKey.SourceRepoURL ||
|
||||
app.Spec.SourceHydrator.DrySource.TargetRevision != hydrationKey.SourceTargetRevision {
|
||||
continue
|
||||
}
|
||||
destinationBranch := app.Spec.SourceHydrator.SyncSource.TargetBranch
|
||||
if app.Spec.SourceHydrator.HydrateTo != nil {
|
||||
destinationBranch = app.Spec.SourceHydrator.HydrateTo.TargetBranch
|
||||
}
|
||||
if destinationBranch != hydrationKey.DestinationBranch {
|
||||
continue
|
||||
}
|
||||
|
||||
var proj *appv1.AppProject
|
||||
proj, err = h.dependencies.GetProcessableAppProj(&app)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get project %q for app %q: %w", app.Spec.Project, app.QualifiedName(), err)
|
||||
}
|
||||
permitted := proj.IsSourcePermitted(app.Spec.GetSource())
|
||||
if !permitted {
|
||||
// Log and skip. We don't want to fail the entire operation because of one app.
|
||||
logCtx.Warnf("App %q is not permitted to use source %q", app.QualifiedName(), app.Spec.Source.String())
|
||||
continue
|
||||
}
|
||||
|
||||
uniqueDestinationKey := uniqueHydrationDestination{
|
||||
sourceRepoURL: app.Spec.SourceHydrator.DrySource.RepoURL,
|
||||
sourceTargetRevision: app.Spec.SourceHydrator.DrySource.TargetRevision,
|
||||
destinationBranch: destinationBranch,
|
||||
destinationPath: app.Spec.SourceHydrator.SyncSource.Path,
|
||||
}
|
||||
// TODO: test the dupe detection
|
||||
if _, ok := uniqueDestinations[uniqueDestinationKey]; ok {
|
||||
return nil, fmt.Errorf("multiple app hydrators use the same destination: %v", uniqueDestinationKey)
|
||||
}
|
||||
uniqueDestinations[uniqueDestinationKey] = true
|
||||
|
||||
relevantApps = append(relevantApps, &app)
|
||||
}
|
||||
return relevantApps, nil
|
||||
}
|
||||
|
||||
func (h *Hydrator) hydrate(logCtx *log.Entry, apps []*appv1.Application) (string, string, error) {
|
||||
if len(apps) == 0 {
|
||||
return "", "", nil
|
||||
}
|
||||
repoURL := apps[0].Spec.SourceHydrator.DrySource.RepoURL
|
||||
syncBranch := apps[0].Spec.SourceHydrator.SyncSource.TargetBranch
|
||||
targetBranch := apps[0].Spec.GetHydrateToSource().TargetRevision
|
||||
var paths []*commitclient.PathDetails
|
||||
projects := make(map[string]bool, len(apps))
|
||||
var targetRevision string
|
||||
// TODO: parallelize this loop
|
||||
for _, app := range apps {
|
||||
project, err := h.dependencies.GetProcessableAppProj(app)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("failed to get project: %w", err)
|
||||
}
|
||||
projects[project.Name] = true
|
||||
drySource := appv1.ApplicationSource{
|
||||
RepoURL: app.Spec.SourceHydrator.DrySource.RepoURL,
|
||||
Path: app.Spec.SourceHydrator.DrySource.Path,
|
||||
TargetRevision: app.Spec.SourceHydrator.DrySource.TargetRevision,
|
||||
}
|
||||
if targetRevision == "" {
|
||||
targetRevision = app.Spec.SourceHydrator.DrySource.TargetRevision
|
||||
}
|
||||
|
||||
// TODO: enable signature verification
|
||||
objs, resp, err := h.dependencies.GetRepoObjs(app, drySource, targetRevision, project)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("failed to get repo objects: %w", err)
|
||||
}
|
||||
|
||||
// This should be the DRY SHA. We set it here so that after processing the first app, all apps are hydrated
|
||||
// using the same SHA.
|
||||
targetRevision = resp.Revision
|
||||
|
||||
// Set up a ManifestsRequest
|
||||
manifestDetails := make([]*commitclient.HydratedManifestDetails, len(objs))
|
||||
for i, obj := range objs {
|
||||
objJson, err := json.Marshal(obj)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("failed to marshal object: %w", err)
|
||||
}
|
||||
manifestDetails[i] = &commitclient.HydratedManifestDetails{ManifestJSON: string(objJson)}
|
||||
}
|
||||
|
||||
paths = append(paths, &commitclient.PathDetails{
|
||||
Path: app.Spec.SourceHydrator.SyncSource.Path,
|
||||
Manifests: manifestDetails,
|
||||
Commands: resp.Commands,
|
||||
})
|
||||
}
|
||||
|
||||
// If all the apps are under the same project, use that project. Otherwise, use an empty string to indicate that we
|
||||
// need global creds.
|
||||
project := ""
|
||||
if len(projects) == 1 {
|
||||
for p := range projects {
|
||||
project = p
|
||||
}
|
||||
}
|
||||
|
||||
repo, err := h.dependencies.GetWriteCredentials(context.Background(), repoURL, project)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("failed to get hydrator credentials: %w", err)
|
||||
}
|
||||
if repo == nil {
|
||||
// Try without credentials.
|
||||
repo = &appv1.Repository{
|
||||
Repo: repoURL,
|
||||
}
|
||||
logCtx.Warn("no credentials found for repo, continuing without credentials")
|
||||
}
|
||||
|
||||
manifestsRequest := commitclient.CommitHydratedManifestsRequest{
|
||||
Repo: repo,
|
||||
SyncBranch: syncBranch,
|
||||
TargetBranch: targetBranch,
|
||||
DrySha: targetRevision,
|
||||
CommitMessage: fmt.Sprintf("[Argo CD Bot] hydrate %s", targetRevision),
|
||||
Paths: paths,
|
||||
}
|
||||
|
||||
closer, commitService, err := h.commitClientset.NewCommitServerClient()
|
||||
if err != nil {
|
||||
return targetRevision, "", fmt.Errorf("failed to create commit service: %w", err)
|
||||
}
|
||||
defer argoio.Close(closer)
|
||||
resp, err := commitService.CommitHydratedManifests(context.Background(), &manifestsRequest)
|
||||
if err != nil {
|
||||
return targetRevision, "", fmt.Errorf("failed to commit hydrated manifests: %w", err)
|
||||
}
|
||||
return targetRevision, resp.HydratedSha, nil
|
||||
}
|
||||
|
||||
// appNeedsHydration answers if application needs manifests hydrated.
|
||||
func appNeedsHydration(app *appv1.Application, statusHydrateTimeout time.Duration) (needsHydration bool, reason string) {
|
||||
if app.Spec.SourceHydrator == nil {
|
||||
return false, "source hydrator not configured"
|
||||
}
|
||||
|
||||
var hydratedAt *metav1.Time
|
||||
if app.Status.SourceHydrator.CurrentOperation != nil {
|
||||
hydratedAt = &app.Status.SourceHydrator.CurrentOperation.StartedAt
|
||||
}
|
||||
|
||||
if app.IsHydrateRequested() {
|
||||
return true, "hydrate requested"
|
||||
} else if app.Status.SourceHydrator.CurrentOperation == nil {
|
||||
return true, "no previous hydrate operation"
|
||||
} else if !app.Spec.SourceHydrator.DeepEquals(app.Status.SourceHydrator.CurrentOperation.SourceHydrator) {
|
||||
return true, "spec.sourceHydrator differs"
|
||||
} else if app.Status.SourceHydrator.CurrentOperation.Phase == appv1.HydrateOperationPhaseFailed && metav1.Now().Sub(app.Status.SourceHydrator.CurrentOperation.FinishedAt.Time) > 2*time.Minute {
|
||||
return true, "previous hydrate operation failed more than 2 minutes ago"
|
||||
} else if hydratedAt == nil || hydratedAt.Add(statusHydrateTimeout).Before(time.Now().UTC()) {
|
||||
return true, "hydration expired"
|
||||
}
|
||||
|
||||
return false, ""
|
||||
}
|
||||
103
controller/hydrator/hydrator_test.go
Normal file
103
controller/hydrator/hydrator_test.go
Normal file
@@ -0,0 +1,103 @@
|
||||
package hydrator
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
)
|
||||
|
||||
func Test_appNeedsHydration(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
now := metav1.NewTime(time.Now())
|
||||
oneHourAgo := metav1.NewTime(now.Add(-1 * time.Hour))
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
app *v1alpha1.Application
|
||||
timeout time.Duration
|
||||
expectedNeedsHydration bool
|
||||
expectedMessage string
|
||||
}{
|
||||
{
|
||||
name: "source hydrator not configured",
|
||||
app: &v1alpha1.Application{},
|
||||
expectedNeedsHydration: false,
|
||||
expectedMessage: "source hydrator not configured",
|
||||
},
|
||||
{
|
||||
name: "hydrate requested",
|
||||
app: &v1alpha1.Application{
|
||||
ObjectMeta: metav1.ObjectMeta{Annotations: map[string]string{v1alpha1.AnnotationKeyHydrate: "normal"}},
|
||||
Spec: v1alpha1.ApplicationSpec{SourceHydrator: &v1alpha1.SourceHydrator{}},
|
||||
},
|
||||
timeout: 1 * time.Hour,
|
||||
expectedNeedsHydration: true,
|
||||
expectedMessage: "hydrate requested",
|
||||
},
|
||||
{
|
||||
name: "no previous hydrate operation",
|
||||
app: &v1alpha1.Application{
|
||||
Spec: v1alpha1.ApplicationSpec{SourceHydrator: &v1alpha1.SourceHydrator{}},
|
||||
},
|
||||
timeout: 1 * time.Hour,
|
||||
expectedNeedsHydration: true,
|
||||
expectedMessage: "no previous hydrate operation",
|
||||
},
|
||||
{
|
||||
name: "spec.sourceHydrator differs",
|
||||
app: &v1alpha1.Application{
|
||||
Spec: v1alpha1.ApplicationSpec{SourceHydrator: &v1alpha1.SourceHydrator{}},
|
||||
Status: v1alpha1.ApplicationStatus{SourceHydrator: v1alpha1.SourceHydratorStatus{CurrentOperation: &v1alpha1.HydrateOperation{
|
||||
SourceHydrator: v1alpha1.SourceHydrator{DrySource: v1alpha1.DrySource{RepoURL: "something new"}},
|
||||
}}},
|
||||
},
|
||||
timeout: 1 * time.Hour,
|
||||
expectedNeedsHydration: true,
|
||||
expectedMessage: "spec.sourceHydrator differs",
|
||||
},
|
||||
{
|
||||
name: "hydration failed more than two minutes ago",
|
||||
app: &v1alpha1.Application{
|
||||
Spec: v1alpha1.ApplicationSpec{SourceHydrator: &v1alpha1.SourceHydrator{}},
|
||||
Status: v1alpha1.ApplicationStatus{SourceHydrator: v1alpha1.SourceHydratorStatus{CurrentOperation: &v1alpha1.HydrateOperation{DrySHA: "abc123", FinishedAt: &oneHourAgo, Phase: v1alpha1.HydrateOperationPhaseFailed}}},
|
||||
},
|
||||
timeout: 1 * time.Hour,
|
||||
expectedNeedsHydration: true,
|
||||
expectedMessage: "previous hydrate operation failed more than 2 minutes ago",
|
||||
},
|
||||
{
|
||||
name: "timeout reached",
|
||||
app: &v1alpha1.Application{
|
||||
Spec: v1alpha1.ApplicationSpec{SourceHydrator: &v1alpha1.SourceHydrator{}},
|
||||
Status: v1alpha1.ApplicationStatus{SourceHydrator: v1alpha1.SourceHydratorStatus{CurrentOperation: &v1alpha1.HydrateOperation{StartedAt: oneHourAgo}}},
|
||||
},
|
||||
timeout: 1 * time.Minute,
|
||||
expectedNeedsHydration: true,
|
||||
expectedMessage: "hydration expired",
|
||||
},
|
||||
{
|
||||
name: "hydrate not needed",
|
||||
app: &v1alpha1.Application{
|
||||
Spec: v1alpha1.ApplicationSpec{SourceHydrator: &v1alpha1.SourceHydrator{}},
|
||||
Status: v1alpha1.ApplicationStatus{SourceHydrator: v1alpha1.SourceHydratorStatus{CurrentOperation: &v1alpha1.HydrateOperation{DrySHA: "abc123", StartedAt: now, FinishedAt: &now, Phase: v1alpha1.HydrateOperationPhaseFailed}}},
|
||||
},
|
||||
timeout: 1 * time.Hour,
|
||||
expectedNeedsHydration: false,
|
||||
expectedMessage: "",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
needsHydration, message := appNeedsHydration(tc.app, tc.timeout)
|
||||
assert.Equal(t, tc.expectedNeedsHydration, needsHydration)
|
||||
assert.Equal(t, tc.expectedMessage, message)
|
||||
})
|
||||
}
|
||||
}
|
||||
90
controller/hydrator_dependencies.go
Normal file
90
controller/hydrator_dependencies.go
Normal file
@@ -0,0 +1,90 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/controller/hydrator"
|
||||
appv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v2/reposerver/apiclient"
|
||||
argoutil "github.com/argoproj/argo-cd/v2/util/argo"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
)
|
||||
|
||||
/**
|
||||
This file implements the hydrator.Dependencies interface for the ApplicationController.
|
||||
|
||||
Hydration logic does not belong in this file. The methods here should be "bookkeeping" methods that keep hydration work
|
||||
in the hydrator and app controller work in the app controller. The only purpose of this file is to provide the hydrator
|
||||
safe, minimal access to certain app controller functionality to avoid duplicate code.
|
||||
*/
|
||||
|
||||
func (ctrl *ApplicationController) GetProcessableAppProj(app *appv1.Application) (*appv1.AppProject, error) {
|
||||
return ctrl.getAppProj(app)
|
||||
}
|
||||
|
||||
// GetProcessableApps returns a list of applications that are processable by the controller.
|
||||
func (ctrl *ApplicationController) GetProcessableApps() (*appv1.ApplicationList, error) {
|
||||
// getAppList already filters out applications that are not processable by the controller.
|
||||
return ctrl.getAppList(metav1.ListOptions{})
|
||||
}
|
||||
|
||||
func (ctrl *ApplicationController) GetRepoObjs(origApp *appv1.Application, drySource appv1.ApplicationSource, revision string, project *appv1.AppProject) ([]*unstructured.Unstructured, *apiclient.ManifestResponse, error) {
|
||||
drySources := []appv1.ApplicationSource{drySource}
|
||||
dryRevisions := []string{revision}
|
||||
|
||||
appLabelKey, err := ctrl.settingsMgr.GetAppInstanceLabelKey()
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to get app instance label key: %w", err)
|
||||
}
|
||||
|
||||
app := origApp.DeepCopy()
|
||||
// Remove the manifest generate path annotation, because the feature will misbehave for apps using source hydrator.
|
||||
// Setting this annotation causes GetRepoObjs to compare the dry source commit to the most recent synced commit. The
|
||||
// problem is that the most recent synced commit is likely on the hydrated branch, not the dry branch. The
|
||||
// comparison will throw an error and break hydration.
|
||||
//
|
||||
// The long-term solution will probably be to persist the synced _dry_ revision and use that for the comparison.
|
||||
delete(app.Annotations, appv1.AnnotationKeyManifestGeneratePaths)
|
||||
|
||||
// FIXME: use cache and revision cache
|
||||
objs, resp, _, err := ctrl.appStateManager.GetRepoObjs(app, drySources, appLabelKey, dryRevisions, true, true, false, project, false, false)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to get repo objects: %w", err)
|
||||
}
|
||||
|
||||
if len(resp) != 1 {
|
||||
return nil, nil, fmt.Errorf("expected one manifest response, got %d", len(resp))
|
||||
}
|
||||
|
||||
return objs, resp[0], nil
|
||||
}
|
||||
|
||||
func (ctrl *ApplicationController) GetWriteCredentials(ctx context.Context, repoURL string, project string) (*appv1.Repository, error) {
|
||||
return ctrl.db.GetWriteRepository(ctx, repoURL, project)
|
||||
}
|
||||
|
||||
func (ctrl *ApplicationController) RequestAppRefresh(appName string, appNamespace string) error {
|
||||
// We request a refresh by setting the annotation instead of by adding it to the refresh queue, because there is no
|
||||
// guarantee that the hydrator is running on the same controller shard as is processing the application.
|
||||
|
||||
// This function is called for each app after a hydrate operation is completed so that the app controller can pick
|
||||
// up the newly-hydrated changes. So we set hydrate=false to avoid a hydrate loop.
|
||||
_, err := argoutil.RefreshApp(ctrl.applicationClientset.ArgoprojV1alpha1().Applications(appNamespace), appName, appv1.RefreshTypeNormal, false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to request app refresh: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ctrl *ApplicationController) PersistAppHydratorStatus(orig *appv1.Application, newStatus *appv1.SourceHydratorStatus) {
|
||||
status := orig.Status.DeepCopy()
|
||||
status.SourceHydrator = *newStatus
|
||||
ctrl.persistAppStatus(orig, status)
|
||||
}
|
||||
|
||||
func (ctrl *ApplicationController) AddHydrationQueueItem(key hydrator.HydrationQueueKey) {
|
||||
ctrl.hydrationQueue.AddRateLimited(key)
|
||||
}
|
||||
@@ -30,18 +30,20 @@ import (
|
||||
|
||||
type MetricsServer struct {
|
||||
*http.Server
|
||||
syncCounter *prometheus.CounterVec
|
||||
kubectlExecCounter *prometheus.CounterVec
|
||||
kubectlExecPendingGauge *prometheus.GaugeVec
|
||||
orphanedResourcesGauge *prometheus.GaugeVec
|
||||
k8sRequestCounter *prometheus.CounterVec
|
||||
clusterEventsCounter *prometheus.CounterVec
|
||||
redisRequestCounter *prometheus.CounterVec
|
||||
reconcileHistogram *prometheus.HistogramVec
|
||||
redisRequestHistogram *prometheus.HistogramVec
|
||||
registry *prometheus.Registry
|
||||
hostname string
|
||||
cron *cron.Cron
|
||||
syncCounter *prometheus.CounterVec
|
||||
kubectlExecCounter *prometheus.CounterVec
|
||||
kubectlExecPendingGauge *prometheus.GaugeVec
|
||||
orphanedResourcesGauge *prometheus.GaugeVec
|
||||
k8sRequestCounter *prometheus.CounterVec
|
||||
clusterEventsCounter *prometheus.CounterVec
|
||||
redisRequestCounter *prometheus.CounterVec
|
||||
reconcileHistogram *prometheus.HistogramVec
|
||||
redisRequestHistogram *prometheus.HistogramVec
|
||||
resourceEventsProcessingHistogram *prometheus.HistogramVec
|
||||
resourceEventsNumberGauge *prometheus.GaugeVec
|
||||
registry *prometheus.Registry
|
||||
hostname string
|
||||
cron *cron.Cron
|
||||
}
|
||||
|
||||
const (
|
||||
@@ -153,6 +155,20 @@ var (
|
||||
},
|
||||
descAppDefaultLabels,
|
||||
)
|
||||
|
||||
resourceEventsProcessingHistogram = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "argocd_resource_events_processing",
|
||||
Help: "Time to process resource events in seconds.",
|
||||
Buckets: []float64{0.25, .5, 1, 2, 4, 8, 16},
|
||||
},
|
||||
[]string{"server"},
|
||||
)
|
||||
|
||||
resourceEventsNumberGauge = prometheus.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "argocd_resource_events_processed_in_batch",
|
||||
Help: "Number of resource events processed in batch",
|
||||
}, []string{"server"})
|
||||
)
|
||||
|
||||
// NewMetricsServer returns a new prometheus server which collects application metrics
|
||||
@@ -202,6 +218,8 @@ func NewMetricsServer(addr string, appLister applister.ApplicationLister, appFil
|
||||
registry.MustRegister(clusterEventsCounter)
|
||||
registry.MustRegister(redisRequestCounter)
|
||||
registry.MustRegister(redisRequestHistogram)
|
||||
registry.MustRegister(resourceEventsProcessingHistogram)
|
||||
registry.MustRegister(resourceEventsNumberGauge)
|
||||
|
||||
return &MetricsServer{
|
||||
registry: registry,
|
||||
@@ -209,16 +227,18 @@ func NewMetricsServer(addr string, appLister applister.ApplicationLister, appFil
|
||||
Addr: addr,
|
||||
Handler: mux,
|
||||
},
|
||||
syncCounter: syncCounter,
|
||||
k8sRequestCounter: k8sRequestCounter,
|
||||
kubectlExecCounter: kubectlExecCounter,
|
||||
kubectlExecPendingGauge: kubectlExecPendingGauge,
|
||||
orphanedResourcesGauge: orphanedResourcesGauge,
|
||||
reconcileHistogram: reconcileHistogram,
|
||||
clusterEventsCounter: clusterEventsCounter,
|
||||
redisRequestCounter: redisRequestCounter,
|
||||
redisRequestHistogram: redisRequestHistogram,
|
||||
hostname: hostname,
|
||||
syncCounter: syncCounter,
|
||||
k8sRequestCounter: k8sRequestCounter,
|
||||
kubectlExecCounter: kubectlExecCounter,
|
||||
kubectlExecPendingGauge: kubectlExecPendingGauge,
|
||||
orphanedResourcesGauge: orphanedResourcesGauge,
|
||||
reconcileHistogram: reconcileHistogram,
|
||||
clusterEventsCounter: clusterEventsCounter,
|
||||
redisRequestCounter: redisRequestCounter,
|
||||
redisRequestHistogram: redisRequestHistogram,
|
||||
resourceEventsProcessingHistogram: resourceEventsProcessingHistogram,
|
||||
resourceEventsNumberGauge: resourceEventsNumberGauge,
|
||||
hostname: hostname,
|
||||
// This cron is used to expire the metrics cache.
|
||||
// Currently clearing the metrics cache is logging and deleting from the map
|
||||
// so there is no possibility of panic, but we will add a chain to keep robfig/cron v1 behavior.
|
||||
@@ -284,6 +304,12 @@ func (m *MetricsServer) ObserveRedisRequestDuration(duration time.Duration) {
|
||||
m.redisRequestHistogram.WithLabelValues(m.hostname, common.ApplicationController).Observe(duration.Seconds())
|
||||
}
|
||||
|
||||
// ObserveResourceEventsProcessingDuration observes resource events processing duration
|
||||
func (m *MetricsServer) ObserveResourceEventsProcessingDuration(server string, duration time.Duration, processedEventsNumber int) {
|
||||
m.resourceEventsProcessingHistogram.WithLabelValues(server).Observe(duration.Seconds())
|
||||
m.resourceEventsNumberGauge.WithLabelValues(server).Set(float64(processedEventsNumber))
|
||||
}
|
||||
|
||||
// IncReconcile increments the reconcile counter for an application
|
||||
func (m *MetricsServer) IncReconcile(app *argoappv1.Application, duration time.Duration) {
|
||||
m.reconcileHistogram.WithLabelValues(app.Namespace, app.Spec.Destination.Server).Observe(duration.Seconds())
|
||||
@@ -311,6 +337,8 @@ func (m *MetricsServer) SetExpiration(cacheExpiration time.Duration) error {
|
||||
m.redisRequestCounter.Reset()
|
||||
m.reconcileHistogram.Reset()
|
||||
m.redisRequestHistogram.Reset()
|
||||
m.resourceEventsProcessingHistogram.Reset()
|
||||
m.resourceEventsNumberGauge.Reset()
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -71,7 +71,7 @@ type managedResource struct {
|
||||
type AppStateManager interface {
|
||||
CompareAppState(app *v1alpha1.Application, project *v1alpha1.AppProject, revisions []string, sources []v1alpha1.ApplicationSource, noCache bool, noRevisionCache bool, localObjects []string, hasMultipleSources bool, rollback bool) (*comparisonResult, error)
|
||||
SyncAppState(app *v1alpha1.Application, state *v1alpha1.OperationState)
|
||||
GetRepoObjs(app *v1alpha1.Application, sources []v1alpha1.ApplicationSource, appLabelKey string, revisions []string, noCache, noRevisionCache, verifySignature bool, proj *v1alpha1.AppProject, rollback bool) ([]*unstructured.Unstructured, []*apiclient.ManifestResponse, bool, error)
|
||||
GetRepoObjs(app *v1alpha1.Application, sources []v1alpha1.ApplicationSource, appLabelKey string, revisions []string, noCache, noRevisionCache, verifySignature bool, proj *v1alpha1.AppProject, rollback, sendRuntimeState bool) ([]*unstructured.Unstructured, []*apiclient.ManifestResponse, bool, error)
|
||||
}
|
||||
|
||||
// comparisonResult holds the state of an application after the reconciliation
|
||||
@@ -108,6 +108,7 @@ type appStateManager struct {
|
||||
appclientset appclientset.Interface
|
||||
projInformer cache.SharedIndexInformer
|
||||
kubectl kubeutil.Kubectl
|
||||
onKubectlRun kubeutil.OnKubectlRunFunc
|
||||
repoClientset apiclient.Clientset
|
||||
liveStateCache statecache.LiveStateCache
|
||||
cache *appstatecache.Cache
|
||||
@@ -125,7 +126,7 @@ type appStateManager struct {
|
||||
// task to the repo-server. It returns the list of generated manifests as unstructured
|
||||
// objects. It also returns the full response from all calls to the repo server as the
|
||||
// second argument.
|
||||
func (m *appStateManager) GetRepoObjs(app *v1alpha1.Application, sources []v1alpha1.ApplicationSource, appLabelKey string, revisions []string, noCache, noRevisionCache, verifySignature bool, proj *v1alpha1.AppProject, rollback bool) ([]*unstructured.Unstructured, []*apiclient.ManifestResponse, bool, error) {
|
||||
func (m *appStateManager) GetRepoObjs(app *v1alpha1.Application, sources []v1alpha1.ApplicationSource, appLabelKey string, revisions []string, noCache, noRevisionCache, verifySignature bool, proj *v1alpha1.AppProject, rollback, sendRuntimeState bool) ([]*unstructured.Unstructured, []*apiclient.ManifestResponse, bool, error) {
|
||||
ts := stats.NewTimingStats()
|
||||
helmRepos, err := m.db.ListHelmRepositories(context.Background())
|
||||
if err != nil {
|
||||
@@ -168,9 +169,13 @@ func (m *appStateManager) GetRepoObjs(app *v1alpha1.Application, sources []v1alp
|
||||
}
|
||||
|
||||
ts.AddCheckpoint("build_options_ms")
|
||||
serverVersion, apiResources, err := m.liveStateCache.GetVersionsInfo(app.Spec.Destination.Server)
|
||||
if err != nil {
|
||||
return nil, nil, false, fmt.Errorf("failed to get cluster version for cluster %q: %w", app.Spec.Destination.Server, err)
|
||||
var serverVersion string
|
||||
var apiResources []kubeutil.APIResourceInfo
|
||||
if sendRuntimeState {
|
||||
serverVersion, apiResources, err = m.liveStateCache.GetVersionsInfo(app.Spec.Destination.Server)
|
||||
if err != nil {
|
||||
return nil, nil, false, fmt.Errorf("failed to get cluster version for cluster %q: %w", app.Spec.Destination.Server, err)
|
||||
}
|
||||
}
|
||||
conn, repoClient, err := m.repoClientset.NewRepoServerClient()
|
||||
if err != nil {
|
||||
@@ -219,6 +224,12 @@ func (m *appStateManager) GetRepoObjs(app *v1alpha1.Application, sources []v1alp
|
||||
|
||||
revision := revisions[i]
|
||||
|
||||
appNamespace := app.Spec.Destination.Namespace
|
||||
apiVersions := argo.APIResourcesToStrings(apiResources, true)
|
||||
if !sendRuntimeState {
|
||||
appNamespace = ""
|
||||
}
|
||||
|
||||
if !source.IsHelm() && syncedRevision != "" && keyManifestGenerateAnnotationExists && keyManifestGenerateAnnotationVal != "" {
|
||||
// Validate the manifest-generate-path annotation to avoid generating manifests if it has not changed.
|
||||
updateRevisionResult, err := repoClient.UpdateRevisionForPaths(context.Background(), &apiclient.UpdateRevisionForPathsRequest{
|
||||
@@ -229,10 +240,10 @@ func (m *appStateManager) GetRepoObjs(app *v1alpha1.Application, sources []v1alp
|
||||
Paths: path.GetAppRefreshPaths(app),
|
||||
AppLabelKey: appLabelKey,
|
||||
AppName: app.InstanceName(m.namespace),
|
||||
Namespace: app.Spec.Destination.Namespace,
|
||||
Namespace: appNamespace,
|
||||
ApplicationSource: &source,
|
||||
KubeVersion: serverVersion,
|
||||
ApiVersions: argo.APIResourcesToStrings(apiResources, true),
|
||||
ApiVersions: apiVersions,
|
||||
TrackingMethod: string(argo.GetTrackingMethod(m.settingsMgr)),
|
||||
RefSources: refSources,
|
||||
HasMultipleSources: app.Spec.HasMultipleSources(),
|
||||
@@ -263,11 +274,11 @@ func (m *appStateManager) GetRepoObjs(app *v1alpha1.Application, sources []v1alp
|
||||
NoRevisionCache: noRevisionCache,
|
||||
AppLabelKey: appLabelKey,
|
||||
AppName: app.InstanceName(m.namespace),
|
||||
Namespace: app.Spec.Destination.Namespace,
|
||||
Namespace: appNamespace,
|
||||
ApplicationSource: &source,
|
||||
KustomizeOptions: kustomizeOptions,
|
||||
KubeVersion: serverVersion,
|
||||
ApiVersions: argo.APIResourcesToStrings(apiResources, true),
|
||||
ApiVersions: apiVersions,
|
||||
VerifySignature: verifySignature,
|
||||
HelmRepoCreds: permittedHelmCredentials,
|
||||
TrackingMethod: string(argo.GetTrackingMethod(m.settingsMgr)),
|
||||
@@ -309,6 +320,39 @@ func (m *appStateManager) GetRepoObjs(app *v1alpha1.Application, sources []v1alp
|
||||
return targetObjs, manifestInfos, revisionUpdated, nil
|
||||
}
|
||||
|
||||
// ResolveGitRevision will resolve the given revision to a full commit SHA. Only works for git.
|
||||
func (m *appStateManager) ResolveGitRevision(repoURL string, revision string) (string, error) {
|
||||
conn, repoClient, err := m.repoClientset.NewRepoServerClient()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to connect to repo server: %w", err)
|
||||
}
|
||||
defer io.Close(conn)
|
||||
|
||||
repo, err := m.db.GetRepository(context.Background(), repoURL, "")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get repo %q: %w", repoURL, err)
|
||||
}
|
||||
|
||||
// Mock the app. The repo-server only needs to know whether the "chart" field is populated.
|
||||
app := &v1alpha1.Application{
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
RepoURL: repoURL,
|
||||
TargetRevision: revision,
|
||||
},
|
||||
},
|
||||
}
|
||||
resp, err := repoClient.ResolveRevision(context.Background(), &apiclient.ResolveRevisionRequest{
|
||||
Repo: repo,
|
||||
App: app,
|
||||
AmbiguousRevision: revision,
|
||||
})
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to determine whether the dry source has changed: %w", err)
|
||||
}
|
||||
return resp.Revision, nil
|
||||
}
|
||||
|
||||
func unmarshalManifests(manifests []string) ([]*unstructured.Unstructured, error) {
|
||||
targetObjs := make([]*unstructured.Unstructured, 0)
|
||||
for _, manifest := range manifests {
|
||||
@@ -437,24 +481,23 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1
|
||||
|
||||
// return unknown comparison result if basic comparison settings cannot be loaded
|
||||
if err != nil {
|
||||
now := metav1.Now()
|
||||
if hasMultipleSources {
|
||||
return &comparisonResult{
|
||||
syncStatus: &v1alpha1.SyncStatus{
|
||||
ComparedTo: v1alpha1.ComparedTo{Destination: app.Spec.Destination, Sources: sources, IgnoreDifferences: app.Spec.IgnoreDifferences},
|
||||
ComparedTo: app.Spec.BuildComparedToStatus(),
|
||||
Status: v1alpha1.SyncStatusCodeUnknown,
|
||||
Revisions: revisions,
|
||||
},
|
||||
healthStatus: &v1alpha1.HealthStatus{Status: health.HealthStatusUnknown, LastTransitionTime: &now},
|
||||
healthStatus: &v1alpha1.HealthStatus{Status: health.HealthStatusUnknown},
|
||||
}, nil
|
||||
} else {
|
||||
return &comparisonResult{
|
||||
syncStatus: &v1alpha1.SyncStatus{
|
||||
ComparedTo: v1alpha1.ComparedTo{Source: sources[0], Destination: app.Spec.Destination, IgnoreDifferences: app.Spec.IgnoreDifferences},
|
||||
ComparedTo: app.Spec.BuildComparedToStatus(),
|
||||
Status: v1alpha1.SyncStatusCodeUnknown,
|
||||
Revision: revisions[0],
|
||||
},
|
||||
healthStatus: &v1alpha1.HealthStatus{Status: health.HealthStatusUnknown, LastTransitionTime: &now},
|
||||
healthStatus: &v1alpha1.HealthStatus{Status: health.HealthStatusUnknown},
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
@@ -490,7 +533,7 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1
|
||||
}
|
||||
}
|
||||
|
||||
targetObjs, manifestInfos, revisionUpdated, err = m.GetRepoObjs(app, sources, appLabelKey, revisions, noCache, noRevisionCache, verifySignature, project, rollback)
|
||||
targetObjs, manifestInfos, revisionUpdated, err = m.GetRepoObjs(app, sources, appLabelKey, revisions, noCache, noRevisionCache, verifySignature, project, rollback, true)
|
||||
if err != nil {
|
||||
targetObjs = make([]*unstructured.Unstructured, 0)
|
||||
msg := fmt.Sprintf("Failed to load target state: %s", err.Error())
|
||||
@@ -698,13 +741,13 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1
|
||||
diffConfigBuilder.WithServerSideDiff(serverSideDiff)
|
||||
|
||||
if serverSideDiff {
|
||||
resourceOps, cleanup, err := m.getResourceOperations(app.Spec.Destination.Server)
|
||||
applier, cleanup, err := m.getServerSideDiffDryRunApplier(app.Spec.Destination.Server)
|
||||
if err != nil {
|
||||
log.Errorf("CompareAppState error getting resource operations: %s", err)
|
||||
log.Errorf("CompareAppState error getting server side diff dry run applier: %s", err)
|
||||
conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionUnknownError, Message: err.Error(), LastTransitionTime: &now})
|
||||
}
|
||||
defer cleanup()
|
||||
diffConfigBuilder.WithServerSideDryRunner(diff.NewK8sServerSideDryRunner(resourceOps))
|
||||
diffConfigBuilder.WithServerSideDryRunner(diff.NewK8sServerSideDryRunner(applier))
|
||||
}
|
||||
|
||||
// enable structured merge diff if application syncs with server-side apply
|
||||
@@ -1029,6 +1072,7 @@ func NewAppStateManager(
|
||||
repoClientset apiclient.Clientset,
|
||||
namespace string,
|
||||
kubectl kubeutil.Kubectl,
|
||||
onKubectlRun kubeutil.OnKubectlRunFunc,
|
||||
settingsMgr *settings.SettingsManager,
|
||||
liveStateCache statecache.LiveStateCache,
|
||||
projInformer cache.SharedIndexInformer,
|
||||
@@ -1047,6 +1091,7 @@ func NewAppStateManager(
|
||||
db: db,
|
||||
appclientset: appclientset,
|
||||
kubectl: kubectl,
|
||||
onKubectlRun: onKubectlRun,
|
||||
repoClientset: repoClientset,
|
||||
namespace: namespace,
|
||||
settingsMgr: settingsMgr,
|
||||
|
||||
@@ -715,7 +715,7 @@ func TestSetHealth(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, health.HealthStatusHealthy, compRes.healthStatus.Status)
|
||||
assert.False(t, compRes.healthStatus.LastTransitionTime.IsZero())
|
||||
assert.Equal(t, app.Status.Health.LastTransitionTime, compRes.healthStatus.LastTransitionTime)
|
||||
}
|
||||
|
||||
func TestPreserveStatusTimestamp(t *testing.T) {
|
||||
@@ -790,7 +790,7 @@ func TestSetHealthSelfReferencedApp(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, health.HealthStatusHealthy, compRes.healthStatus.Status)
|
||||
assert.False(t, compRes.healthStatus.LastTransitionTime.IsZero())
|
||||
assert.Equal(t, app.Status.Health.LastTransitionTime, compRes.healthStatus.LastTransitionTime)
|
||||
}
|
||||
|
||||
func TestSetManagedResourcesWithOrphanedResources(t *testing.T) {
|
||||
@@ -866,7 +866,7 @@ func TestReturnUnknownComparisonStateOnSettingLoadError(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, health.HealthStatusUnknown, compRes.healthStatus.Status)
|
||||
assert.False(t, compRes.healthStatus.LastTransitionTime.IsZero())
|
||||
assert.Equal(t, app.Status.Health.LastTransitionTime, compRes.healthStatus.LastTransitionTime)
|
||||
assert.Equal(t, argoappv1.SyncStatusCodeUnknown, compRes.syncStatus.Status)
|
||||
}
|
||||
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
|
||||
cdcommon "github.com/argoproj/argo-cd/v2/common"
|
||||
|
||||
gitopsDiff "github.com/argoproj/gitops-engine/pkg/diff"
|
||||
"github.com/argoproj/gitops-engine/pkg/sync"
|
||||
"github.com/argoproj/gitops-engine/pkg/sync/common"
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
@@ -33,6 +34,7 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/util/argo"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo/diff"
|
||||
"github.com/argoproj/argo-cd/v2/util/glob"
|
||||
kubeutil "github.com/argoproj/argo-cd/v2/util/kube"
|
||||
logutils "github.com/argoproj/argo-cd/v2/util/log"
|
||||
"github.com/argoproj/argo-cd/v2/util/lua"
|
||||
"github.com/argoproj/argo-cd/v2/util/rand"
|
||||
@@ -66,11 +68,11 @@ func (m *appStateManager) getGVKParser(server string) (*managedfields.GvkParser,
|
||||
return cluster.GetGVKParser(), nil
|
||||
}
|
||||
|
||||
// getResourceOperations will return the kubectl implementation of the ResourceOperations
|
||||
// interface that provides functionality to manage kubernetes resources. Returns a
|
||||
// getServerSideDiffDryRunApplier will return the kubectl implementation of the KubeApplier
|
||||
// interface that provides functionality to dry run apply kubernetes resources. Returns a
|
||||
// cleanup function that must be called to remove the generated kube config for this
|
||||
// server.
|
||||
func (m *appStateManager) getResourceOperations(server string) (kube.ResourceOperations, func(), error) {
|
||||
func (m *appStateManager) getServerSideDiffDryRunApplier(server string) (gitopsDiff.KubeApplier, func(), error) {
|
||||
clusterCache, err := m.liveStateCache.GetClusterCache(server)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("error getting cluster cache: %w", err)
|
||||
@@ -85,7 +87,7 @@ func (m *appStateManager) getResourceOperations(server string) (kube.ResourceOpe
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("error getting cluster REST config: %w", err)
|
||||
}
|
||||
ops, cleanup, err := m.kubectl.ManageResources(rawConfig, clusterCache.GetOpenAPISchema())
|
||||
ops, cleanup, err := kubeutil.ManageServerSideDiffDryRuns(rawConfig, clusterCache.GetOpenAPISchema(), m.onKubectlRun)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("error creating kubectl ResourceOperations: %w", err)
|
||||
}
|
||||
@@ -112,15 +114,6 @@ func (m *appStateManager) SyncAppState(app *v1alpha1.Application, state *v1alpha
|
||||
}
|
||||
syncOp = *state.Operation.Sync
|
||||
|
||||
// validates if it should fail the sync if it finds shared resources
|
||||
hasSharedResource, sharedResourceMessage := hasSharedResourceCondition(app)
|
||||
if syncOp.SyncOptions.HasOption("FailOnSharedResource=true") &&
|
||||
hasSharedResource {
|
||||
state.Phase = common.OperationFailed
|
||||
state.Message = fmt.Sprintf("Shared resource found: %s", sharedResourceMessage)
|
||||
return
|
||||
}
|
||||
|
||||
isMultiSourceRevision := app.Spec.HasMultipleSources()
|
||||
rollback := len(syncOp.Sources) > 0 || syncOp.Source != nil
|
||||
if rollback {
|
||||
@@ -211,6 +204,15 @@ func (m *appStateManager) SyncAppState(app *v1alpha1.Application, state *v1alpha
|
||||
syncRes.Revision = compareResult.syncStatus.Revision
|
||||
syncRes.Revisions = compareResult.syncStatus.Revisions
|
||||
|
||||
// validates if it should fail the sync if it finds shared resources
|
||||
hasSharedResource, sharedResourceMessage := hasSharedResourceCondition(app)
|
||||
if syncOp.SyncOptions.HasOption("FailOnSharedResource=true") &&
|
||||
hasSharedResource {
|
||||
state.Phase = common.OperationFailed
|
||||
state.Message = fmt.Sprintf("Shared resource found: %s", sharedResourceMessage)
|
||||
return
|
||||
}
|
||||
|
||||
// If there are any comparison or spec errors error conditions do not perform the operation
|
||||
if errConditions := app.Status.GetConditions(map[v1alpha1.ApplicationConditionType]bool{
|
||||
v1alpha1.ApplicationConditionComparisonError: true,
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
argocommon "github.com/argoproj/argo-cd/v2/common"
|
||||
"github.com/argoproj/argo-cd/v2/controller/testdata"
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v2/reposerver/apiclient"
|
||||
@@ -56,7 +57,7 @@ func TestPersistRevisionHistory(t *testing.T) {
|
||||
|
||||
updatedApp, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(app.Namespace).Get(context.Background(), app.Name, v1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, updatedApp.Status.History, 1)
|
||||
require.Len(t, updatedApp.Status.History, 1)
|
||||
assert.Equal(t, app.Spec.GetSource(), updatedApp.Status.History[0].Source)
|
||||
assert.Equal(t, "abc123", updatedApp.Status.History[0].Revision)
|
||||
}
|
||||
@@ -190,17 +191,23 @@ func TestSyncComparisonError(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAppStateManager_SyncAppState(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
type fixture struct {
|
||||
project *v1alpha1.AppProject
|
||||
application *v1alpha1.Application
|
||||
project *v1alpha1.AppProject
|
||||
controller *ApplicationController
|
||||
}
|
||||
|
||||
setup := func() *fixture {
|
||||
setup := func(liveObjects map[kube.ResourceKey]*unstructured.Unstructured) *fixture {
|
||||
app := newFakeApp()
|
||||
app.Status.OperationState = nil
|
||||
app.Status.History = nil
|
||||
|
||||
if liveObjects == nil {
|
||||
liveObjects = make(map[kube.ResourceKey]*unstructured.Unstructured)
|
||||
}
|
||||
|
||||
project := &v1alpha1.AppProject{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: test.FakeArgoCDNamespace,
|
||||
@@ -208,6 +215,12 @@ func TestAppStateManager_SyncAppState(t *testing.T) {
|
||||
},
|
||||
Spec: v1alpha1.AppProjectSpec{
|
||||
SignatureKeys: []v1alpha1.SignatureKey{{KeyID: "test"}},
|
||||
Destinations: []v1alpha1.ApplicationDestination{
|
||||
{
|
||||
Namespace: "*",
|
||||
Server: "*",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
data := fakeData{
|
||||
@@ -218,13 +231,13 @@ func TestAppStateManager_SyncAppState(t *testing.T) {
|
||||
Server: test.FakeClusterURL,
|
||||
Revision: "abc123",
|
||||
},
|
||||
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
|
||||
managedLiveObjs: liveObjects,
|
||||
}
|
||||
ctrl := newFakeController(&data, nil)
|
||||
|
||||
return &fixture{
|
||||
project: project,
|
||||
application: app,
|
||||
project: project,
|
||||
controller: ctrl,
|
||||
}
|
||||
}
|
||||
@@ -232,13 +245,23 @@ func TestAppStateManager_SyncAppState(t *testing.T) {
|
||||
t.Run("will fail the sync if finds shared resources", func(t *testing.T) {
|
||||
// given
|
||||
t.Parallel()
|
||||
f := setup()
|
||||
syncErrorMsg := "deployment already applied by another application"
|
||||
condition := v1alpha1.ApplicationCondition{
|
||||
Type: v1alpha1.ApplicationConditionSharedResourceWarning,
|
||||
Message: syncErrorMsg,
|
||||
}
|
||||
f.application.Status.Conditions = append(f.application.Status.Conditions, condition)
|
||||
|
||||
sharedObject := kube.MustToUnstructured(&corev1.ConfigMap{
|
||||
TypeMeta: v1.TypeMeta{
|
||||
APIVersion: "v1",
|
||||
Kind: "ConfigMap",
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "configmap1",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{
|
||||
argocommon.LabelKeyAppInstance: "another-app",
|
||||
},
|
||||
},
|
||||
})
|
||||
liveObjects := make(map[kube.ResourceKey]*unstructured.Unstructured)
|
||||
liveObjects[kube.GetResourceKey(sharedObject)] = sharedObject
|
||||
f := setup(liveObjects)
|
||||
|
||||
// Sync with source unspecified
|
||||
opState := &v1alpha1.OperationState{Operation: v1alpha1.Operation{
|
||||
@@ -253,7 +276,7 @@ func TestAppStateManager_SyncAppState(t *testing.T) {
|
||||
|
||||
// then
|
||||
assert.Equal(t, common.OperationFailed, opState.Phase)
|
||||
assert.Contains(t, opState.Message, syncErrorMsg)
|
||||
assert.Contains(t, opState.Message, "ConfigMap/configmap1 is part of applications fake-argocd-ns/my-app and another-app")
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
17
controller/utils/log.go
Normal file
17
controller/utils/log.go
Normal file
@@ -0,0 +1,17 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
)
|
||||
|
||||
// GetAppLog returns a logrus entry with fields set for the given application.
|
||||
func GetAppLog(app *v1alpha1.Application) *logrus.Entry {
|
||||
return logrus.WithFields(logrus.Fields{
|
||||
"application": app.Name,
|
||||
"app-namespace": app.Namespace,
|
||||
"app-qualified-name": app.QualifiedName(),
|
||||
"project": app.Spec.Project,
|
||||
})
|
||||
}
|
||||
@@ -184,12 +184,11 @@ the argocd-secret with key 'some.argocd.secret.key'.
|
||||
If provided, and multiple services are configured, will have to match
|
||||
the application destination name or server to have requests properly
|
||||
forwarded to this service URL. If there are multiple backends for the
|
||||
same extension this field is required. In this case at least one of
|
||||
the two will be required: name or server. It is better to provide both
|
||||
values to avoid problems with applications unable to send requests to
|
||||
the proper backend service. If only one backend service is
|
||||
configured, this field is ignored, and all requests are forwarded to
|
||||
the configured one.
|
||||
same extension this field is required. In this case, it is necessary
|
||||
to provide both values to avoid problems with applications unable to
|
||||
send requests to the proper backend service. If only one backend
|
||||
service is configured, this field is ignored, and all requests are
|
||||
forwarded to the configured one.
|
||||
|
||||
#### `extensions.backend.services.cluster.name` (*string*)
|
||||
(optional)
|
||||
|
||||
@@ -9,6 +9,9 @@ data:
|
||||
# Repo server address. (default "argocd-repo-server:8081")
|
||||
repo.server: "argocd-repo-server:8081"
|
||||
|
||||
# Commit server address. (default "argocd-commit-server:8086")
|
||||
commit.server: "argocd-commit-server:8086"
|
||||
|
||||
# Redis server hostname and port (e.g. argocd-redis:6379)
|
||||
redis.server: "argocd-redis:6379"
|
||||
# Enable compression for data sent to Redis with the required compression algorithm. (default 'gzip')
|
||||
@@ -16,6 +19,9 @@ data:
|
||||
# Redis database
|
||||
redis.db:
|
||||
|
||||
# Enables the alpha "manifest hydrator" feature. (default "false")
|
||||
hydrator.enabled: "false"
|
||||
|
||||
# Open-Telemetry collector address: (e.g. "otel-collector:4317")
|
||||
otlp.address: ""
|
||||
# Open-Telemetry collector insecure: (e.g. "true")
|
||||
@@ -79,6 +85,13 @@ data:
|
||||
controller.diff.server.side: "false"
|
||||
# Enables profile endpoint on the internal metrics port
|
||||
controller.profile.enabled: "false"
|
||||
# Enables batch-processing mode in the controller's cluster cache. This can help improve performance for clusters that
|
||||
# have high "churn," i.e. lots of resource modifications.
|
||||
controller.cluster.cache.batch.events.processing: "false"
|
||||
# This sets the interval at which the controller's cluster cache processes a batch of cluster events. A lower value
|
||||
# will increase the speed at which Argo CD becomes aware of external cluster state. A higher value will reduce cluster
|
||||
# cache lock contention and better handle high-churn clusters.
|
||||
controller.cluster.cache.events.processing.interval: "100ms"
|
||||
|
||||
## Server properties
|
||||
# Listen on given address for incoming connections (default "0.0.0.0")
|
||||
@@ -195,6 +208,15 @@ data:
|
||||
# Include hidden directories from Git
|
||||
reposerver.include.hidden.directories: "false"
|
||||
|
||||
## Commit-server properties
|
||||
# Listen on given address for incoming connections (default "0.0.0.0")
|
||||
commitserver.listen.address: "0.0.0.0"
|
||||
# Set the logging format. One of: text|json (default "text")
|
||||
commitserver.log.format: "text"
|
||||
# Set the logging level. One of: debug|info|warn|error (default "info")
|
||||
commitserver.log.level: "info"
|
||||
# Listen on given address for metrics (default "0.0.0.0")
|
||||
commitserver.metrics.listen.address: "0.0.0.0"
|
||||
|
||||
# Set the logging format. One of: text|json (default "text")
|
||||
dexserver.log.format: "text"
|
||||
@@ -250,6 +272,8 @@ data:
|
||||
applicationsetcontroller.requeue.after: "3m"
|
||||
# Enable strict mode for tokenRef in ApplicationSet resources. When enabled, the referenced secret must have a label `argocd.argoproj.io/secret-type` with value `scm-creds`.
|
||||
applicationsetcontroller.enable.tokenref.strict.mode: "false"
|
||||
# The maximum number of resources stored in the status of an ApplicationSet. This is a safeguard to prevent the status from growing too large.
|
||||
applicationsetcontroller.status.max.resources.count: "5000"
|
||||
|
||||
## Argo CD Notifications Controller Properties
|
||||
# Set the logging level. One of: debug|info|warn|error (default "info")
|
||||
|
||||
@@ -15,7 +15,7 @@ to indicate their stability and maturity. These are the statuses of non-stable f
|
||||
## Overview
|
||||
|
||||
| Feature | Introduced | Status |
|
||||
| ----------------------------------------- | ---------- | ------ |
|
||||
|-------------------------------------------|------------|--------|
|
||||
| [AppSet Progressive Syncs][2] | v2.6.0 | Alpha |
|
||||
| [Proxy Extensions][3] | v2.7.0 | Alpha |
|
||||
| [Skip Application Reconcile][4] | v2.7.0 | Alpha |
|
||||
@@ -25,6 +25,7 @@ to indicate their stability and maturity. These are the statuses of non-stable f
|
||||
| [Server Side Diff][8] | v2.10.0 | Beta |
|
||||
| [Cluster Sharding: consistent-hashing][9] | v2.12.0 | Alpha |
|
||||
| [Service Account Impersonation][10] | v2.13.0 | Alpha |
|
||||
| [Source Hydrator][11] | v2.14.0 | Alpha |
|
||||
|
||||
## Unstable Configurations
|
||||
|
||||
@@ -83,3 +84,4 @@ to indicate their stability and maturity. These are the statuses of non-stable f
|
||||
[8]: ../user-guide/diff-strategies.md#server-side-diff
|
||||
[9]: ./high_availability.md#argocd-application-controller
|
||||
[10]: app-sync-using-impersonation.md
|
||||
[11]: ../user-guide/source-hydrator.md
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user