mirror of
https://github.com/argoproj/argo-cd.git
synced 2026-03-05 16:08:49 +01:00
Compare commits
295 Commits
dependabot
...
dependabot
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7f9c875ee3 | ||
|
|
808751b6d0 | ||
|
|
87faf58733 | ||
|
|
1db5f2e618 | ||
|
|
d269e6f936 | ||
|
|
e6a7c1d4e2 | ||
|
|
91b8bba570 | ||
|
|
29805b0e8f | ||
|
|
69f24f007b | ||
|
|
7168674403 | ||
|
|
24d4cb57c5 | ||
|
|
d135f73160 | ||
|
|
37b0f0f767 | ||
|
|
8c3b78ef88 | ||
|
|
68e5a4a12c | ||
|
|
d154627681 | ||
|
|
e85e353b81 | ||
|
|
c39fde74f0 | ||
|
|
08cd547750 | ||
|
|
4362e8ccb7 | ||
|
|
a06dfeb832 | ||
|
|
b20fd4342f | ||
|
|
1c5d7f1f65 | ||
|
|
49f3c05d7d | ||
|
|
9bc35de19d | ||
|
|
a4919edffb | ||
|
|
9e804f99f0 | ||
|
|
ef8d03cea5 | ||
|
|
6265da106e | ||
|
|
b1b157068e | ||
|
|
7129a2c147 | ||
|
|
6c38186f7f | ||
|
|
d3bdc9d5f3 | ||
|
|
728262ac55 | ||
|
|
928aee5dff | ||
|
|
5ce60ca6e3 | ||
|
|
88fccc91c6 | ||
|
|
7a2dc7e80f | ||
|
|
8657798324 | ||
|
|
98f2760d50 | ||
|
|
7ed0f2300e | ||
|
|
5d5d17ae35 | ||
|
|
bfe8b30d9a | ||
|
|
65a082b12c | ||
|
|
14a22ad926 | ||
|
|
70c8f4612f | ||
|
|
b1a9fab70c | ||
|
|
1e5761c1d0 | ||
|
|
8c8902b93f | ||
|
|
13c47ee244 | ||
|
|
82391027d9 | ||
|
|
0c82f4079b | ||
|
|
97af89a3b3 | ||
|
|
d737f8fe43 | ||
|
|
57cccb65c2 | ||
|
|
9bca4859e0 | ||
|
|
940a489cfa | ||
|
|
7dae82dfd3 | ||
|
|
0984b03805 | ||
|
|
b74c0a0e1a | ||
|
|
eaef25c3eb | ||
|
|
a8cae97da0 | ||
|
|
b2b6d9822b | ||
|
|
da7f11a826 | ||
|
|
2b1f5959bd | ||
|
|
5e2a8a86d0 | ||
|
|
d3de4435ce | ||
|
|
5510bdfd71 | ||
|
|
c67763b069 | ||
|
|
1d6ba890a8 | ||
|
|
2e90919fe6 | ||
|
|
34bc56352c | ||
|
|
e039293b7e | ||
|
|
2a0eac0ca9 | ||
|
|
6a2077642e | ||
|
|
24b0ecc657 | ||
|
|
d7364b4662 | ||
|
|
f78cddf736 | ||
|
|
45a7a18256 | ||
|
|
04d1ca4733 | ||
|
|
6d9b5bdf53 | ||
|
|
90123bac04 | ||
|
|
37b67fa4a5 | ||
|
|
48faed19f1 | ||
|
|
0e42012778 | ||
|
|
1b3ced9261 | ||
|
|
ad2e4450f2 | ||
|
|
90e2148667 | ||
|
|
2558e80f41 | ||
|
|
3c6449da89 | ||
|
|
e5417e1eb3 | ||
|
|
69f7d39717 | ||
|
|
390ea4ff54 | ||
|
|
7e868da310 | ||
|
|
03ac864dde | ||
|
|
27b70cf56e | ||
|
|
8ed3a24d49 | ||
|
|
9a990b7e89 | ||
|
|
ec80ebdf67 | ||
|
|
4dfab5d136 | ||
|
|
8f23c885b6 | ||
|
|
48a7030125 | ||
|
|
38ad19fd95 | ||
|
|
24c08922e5 | ||
|
|
323f993816 | ||
|
|
6ec53193fd | ||
|
|
559744a65e | ||
|
|
3f03097983 | ||
|
|
9928c906a2 | ||
|
|
99710b5183 | ||
|
|
42d4cfb857 | ||
|
|
751550562c | ||
|
|
72d054d772 | ||
|
|
f3dbc6f9de | ||
|
|
16cc1b15af | ||
|
|
aff3ae3f4d | ||
|
|
b8decb798a | ||
|
|
4393f7deb8 | ||
|
|
4024fe7c22 | ||
|
|
678f61b8d3 | ||
|
|
31e0f428e8 | ||
|
|
998253aa41 | ||
|
|
69d1d88807 | ||
|
|
58b0116d75 | ||
|
|
b9daeac44e | ||
|
|
a78a616566 | ||
|
|
d2b881ae4a | ||
|
|
796f72c3d4 | ||
|
|
7da3ecc08f | ||
|
|
fca42e3fd4 | ||
|
|
7f5072f286 | ||
|
|
fe6aaad4f0 | ||
|
|
90eae48c77 | ||
|
|
9895f55781 | ||
|
|
660295f656 | ||
|
|
be2c243ac8 | ||
|
|
8eac64d54c | ||
|
|
c68ec277d4 | ||
|
|
51d88197d7 | ||
|
|
4e63bc7563 | ||
|
|
19415979e8 | ||
|
|
5ac055d2a2 | ||
|
|
dcf1965c52 | ||
|
|
0a1572b9d9 | ||
|
|
853b8dddd3 | ||
|
|
9fffcd50d3 | ||
|
|
835c1fbd3c | ||
|
|
2ed67e8fac | ||
|
|
dd1547fcb4 | ||
|
|
635b9fe8fb | ||
|
|
195b238a37 | ||
|
|
cb61611816 | ||
|
|
a37a4d4073 | ||
|
|
2de6819422 | ||
|
|
df3a45ac02 | ||
|
|
f8aea44398 | ||
|
|
02de363d9c | ||
|
|
79943d8189 | ||
|
|
36f1a59c09 | ||
|
|
d5383de5c5 | ||
|
|
9cc960d07d | ||
|
|
fd78d66f4d | ||
|
|
3e6f11e08e | ||
|
|
e5b83f1d1b | ||
|
|
60adba2d5f | ||
|
|
be37e0aa3d | ||
|
|
ce35b4c484 | ||
|
|
01d00ac952 | ||
|
|
7f5ef5c087 | ||
|
|
1c9bb478e8 | ||
|
|
bc49329691 | ||
|
|
6747cfa28d | ||
|
|
5ee35ad707 | ||
|
|
908c73255e | ||
|
|
b090ee70a8 | ||
|
|
3eb442ed82 | ||
|
|
a5c6898655 | ||
|
|
ac4ae1779e | ||
|
|
d83ef2c224 | ||
|
|
9dfa9db097 | ||
|
|
20e3877633 | ||
|
|
8e00df5326 | ||
|
|
6b6512ae30 | ||
|
|
262c8151ae | ||
|
|
1bc9adb134 | ||
|
|
733350ce7c | ||
|
|
a74d8996b7 | ||
|
|
4e72dd7c55 | ||
|
|
2c4dd51e15 | ||
|
|
1e2a66d5b2 | ||
|
|
ffc3b1a11d | ||
|
|
54e2648b3f | ||
|
|
aa5d1395bc | ||
|
|
4e69156e18 | ||
|
|
9aff762531 | ||
|
|
0cfc2fd861 | ||
|
|
88ce38e450 | ||
|
|
5bd2d0d917 | ||
|
|
ebff248ba8 | ||
|
|
6f1e27e93c | ||
|
|
965c83e016 | ||
|
|
276d92d4e0 | ||
|
|
79f152c1ba | ||
|
|
a3eb4e722e | ||
|
|
3349949835 | ||
|
|
562194b35c | ||
|
|
56f8797a2b | ||
|
|
313e8bf70f | ||
|
|
786b24e2c4 | ||
|
|
69b1f0a33c | ||
|
|
e452870b0e | ||
|
|
e8e39a996e | ||
|
|
6ead52c21c | ||
|
|
9e25f93e03 | ||
|
|
3f44b85a77 | ||
|
|
ff019243a1 | ||
|
|
cd11e44d8b | ||
|
|
f420cce7a5 | ||
|
|
d39c0083ea | ||
|
|
4cd4e5e74e | ||
|
|
2e4af5fa5a | ||
|
|
e692a22b01 | ||
|
|
abbdfa26fd | ||
|
|
36345afeb2 | ||
|
|
bf035b3cb4 | ||
|
|
d58ba040e9 | ||
|
|
09b5cbdda2 | ||
|
|
c012702ce0 | ||
|
|
fb94cad141 | ||
|
|
c94874fd18 | ||
|
|
a90c54599b | ||
|
|
3b1ac4b22d | ||
|
|
0864f1ac95 | ||
|
|
671107cb10 | ||
|
|
5c2b13f07c | ||
|
|
5ca752429e | ||
|
|
1fbd63d095 | ||
|
|
07bd5e0f9e | ||
|
|
be042c4474 | ||
|
|
346a749cde | ||
|
|
04794332d2 | ||
|
|
39b9e4f8c5 | ||
|
|
0f822ff801 | ||
|
|
4d16fdcea4 | ||
|
|
c60a727524 | ||
|
|
6ec1aa1b84 | ||
|
|
a6a78ef8d6 | ||
|
|
99fea7c12e | ||
|
|
0c1eb30b4d | ||
|
|
ca6e205332 | ||
|
|
5107ec1ce3 | ||
|
|
3401d3bf92 | ||
|
|
0a0176f4fd | ||
|
|
d6ecc66216 | ||
|
|
203e07c9a4 | ||
|
|
29df864ae1 | ||
|
|
7d0820f5ca | ||
|
|
8d47727d38 | ||
|
|
3df2883a4d | ||
|
|
54b3c95e84 | ||
|
|
b8ac5ef635 | ||
|
|
986e1f8589 | ||
|
|
ea31d17f53 | ||
|
|
9567183b7c | ||
|
|
460111f7bc | ||
|
|
ac49c67403 | ||
|
|
e9811678fa | ||
|
|
061c1fc7c5 | ||
|
|
e37c3dbd40 | ||
|
|
a1bcd4246e | ||
|
|
4501ebb93f | ||
|
|
e0f4b00126 | ||
|
|
d518f13b2a | ||
|
|
c880373aae | ||
|
|
b0336b8f79 | ||
|
|
9fd0601e52 | ||
|
|
83d553ca51 | ||
|
|
d43fbe6148 | ||
|
|
1b48f363bb | ||
|
|
614c85cb72 | ||
|
|
414d9eb5db | ||
|
|
5c9a5ef9a6 | ||
|
|
d1113970cd | ||
|
|
58d82bedb8 | ||
|
|
4dd9bc7642 | ||
|
|
7f3709374b | ||
|
|
7922c77991 | ||
|
|
20f9081fb4 | ||
|
|
1d09c8c8a1 | ||
|
|
bee23628a8 | ||
|
|
f03ffb3592 | ||
|
|
d4ebcc0c15 | ||
|
|
a671cc9b23 | ||
|
|
49514c9b4c | ||
|
|
f7590fa302 |
15
.github/configs/renovate-config.js
vendored
Normal file
15
.github/configs/renovate-config.js
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
module.exports = {
|
||||
platform: 'github',
|
||||
gitAuthor: 'renovate[bot] <renovate[bot]@users.noreply.github.com>',
|
||||
autodiscover: false,
|
||||
allowPostUpgradeCommandTemplating: true,
|
||||
allowedPostUpgradeCommands: ["make mockgen"],
|
||||
extends: [
|
||||
"github>argoproj/argo-cd//renovate-presets/commons.json5",
|
||||
"github>argoproj/argo-cd//renovate-presets/custom-managers/shell.json5",
|
||||
"github>argoproj/argo-cd//renovate-presets/custom-managers/yaml.json5",
|
||||
"github>argoproj/argo-cd//renovate-presets/fix/disable-all-updates.json5",
|
||||
"github>argoproj/argo-cd//renovate-presets/devtool.json5",
|
||||
"github>argoproj/argo-cd//renovate-presets/docs.json5"
|
||||
]
|
||||
}
|
||||
2
.github/pull_request_template.md
vendored
2
.github/pull_request_template.md
vendored
@@ -8,7 +8,7 @@ Checklist:
|
||||
|
||||
* [ ] Either (a) I've created an [enhancement proposal](https://github.com/argoproj/argo-cd/issues/new/choose) and discussed it with the community, (b) this is a bug fix, or (c) this does not need to be in the release notes.
|
||||
* [ ] The title of the PR states what changed and the related issues number (used for the release note).
|
||||
* [ ] The title of the PR conforms to the [Toolchain Guide](https://argo-cd.readthedocs.io/en/latest/developer-guide/toolchain-guide/#title-of-the-pr)
|
||||
* [ ] The title of the PR conforms to the [Title of the PR](https://argo-cd.readthedocs.io/en/latest/developer-guide/submit-your-pr/#title-of-the-pr)
|
||||
* [ ] I've included "Closes [ISSUE #]" or "Fixes [ISSUE #]" in the description to automatically close the associated issue.
|
||||
* [ ] I've updated both the CLI and UI to expose my feature, or I plan to submit a second PR with them.
|
||||
* [ ] Does this PR require documentation updates?
|
||||
|
||||
26
.github/workflows/ci-build.yaml
vendored
26
.github/workflows/ci-build.yaml
vendored
@@ -14,7 +14,7 @@ on:
|
||||
env:
|
||||
# Golang version to use across CI steps
|
||||
# renovate: datasource=golang-version packageName=golang
|
||||
GOLANG_VERSION: '1.24.4'
|
||||
GOLANG_VERSION: '1.25.0'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
@@ -82,7 +82,7 @@ jobs:
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Restore go build cache
|
||||
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
|
||||
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
|
||||
with:
|
||||
path: ~/.cache/go-build
|
||||
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
|
||||
@@ -112,7 +112,7 @@ jobs:
|
||||
uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0
|
||||
with:
|
||||
# renovate: datasource=go packageName=github.com/golangci/golangci-lint versioning=regex:^v(?<major>\d+)\.(?<minor>\d+)\.(?<patch>\d+)?$
|
||||
version: v2.1.6
|
||||
version: v2.4.0
|
||||
args: --verbose
|
||||
|
||||
test-go:
|
||||
@@ -153,7 +153,7 @@ jobs:
|
||||
run: |
|
||||
echo "/usr/local/bin" >> $GITHUB_PATH
|
||||
- name: Restore go build cache
|
||||
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
|
||||
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
|
||||
with:
|
||||
path: ~/.cache/go-build
|
||||
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
|
||||
@@ -217,7 +217,7 @@ jobs:
|
||||
run: |
|
||||
echo "/usr/local/bin" >> $GITHUB_PATH
|
||||
- name: Restore go build cache
|
||||
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
|
||||
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
|
||||
with:
|
||||
path: ~/.cache/go-build
|
||||
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
|
||||
@@ -311,7 +311,7 @@ jobs:
|
||||
node-version: '22.9.0'
|
||||
- name: Restore node dependency cache
|
||||
id: cache-dependencies
|
||||
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
|
||||
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
|
||||
with:
|
||||
path: ui/node_modules
|
||||
key: ${{ runner.os }}-node-dep-v2-${{ hashFiles('**/yarn.lock') }}
|
||||
@@ -339,7 +339,7 @@ jobs:
|
||||
- uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
|
||||
- run: |
|
||||
sudo apt-get install shellcheck
|
||||
shellcheck -e SC2086 -e SC2046 -e SC2068 -e SC2206 -e SC2048 -e SC2059 -e SC2154 -e SC2034 -e SC2016 -e SC2128 -e SC1091 -e SC2207 $(find . -type f -name '*.sh') | tee sc.log
|
||||
shellcheck -e SC2059 -e SC2154 -e SC2034 -e SC2016 -e SC1091 $(find . -type f -name '*.sh' | grep -v './ui/node_modules') | tee sc.log
|
||||
test ! -s sc.log
|
||||
|
||||
analyze:
|
||||
@@ -360,7 +360,7 @@ jobs:
|
||||
fetch-depth: 0
|
||||
- name: Restore node dependency cache
|
||||
id: cache-dependencies
|
||||
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
|
||||
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
|
||||
with:
|
||||
path: ui/node_modules
|
||||
key: ${{ runner.os }}-node-dep-v2-${{ hashFiles('**/yarn.lock') }}
|
||||
@@ -368,12 +368,12 @@ jobs:
|
||||
run: |
|
||||
rm -rf ui/node_modules/argo-ui/node_modules
|
||||
- name: Get e2e code coverage
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0
|
||||
with:
|
||||
name: e2e-code-coverage
|
||||
path: e2e-code-coverage
|
||||
- name: Get unit test code coverage
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0
|
||||
with:
|
||||
name: test-results
|
||||
path: test-results
|
||||
@@ -385,7 +385,7 @@ jobs:
|
||||
run: |
|
||||
go tool covdata percent -i=test-results,e2e-code-coverage/applicationset-controller,e2e-code-coverage/repo-server,e2e-code-coverage/app-controller,e2e-code-coverage/commit-server -o test-results/full-coverage.out
|
||||
- name: Upload code coverage information to codecov.io
|
||||
uses: codecov/codecov-action@18283e04ce6e62d37312384ff67231eb8fd56d24 # v5.4.3
|
||||
uses: codecov/codecov-action@fdcc8476540edceab3de004e990f80d881c6cc00 # v5.5.0
|
||||
with:
|
||||
files: test-results/full-coverage.out
|
||||
fail_ci_if_error: true
|
||||
@@ -402,7 +402,7 @@ jobs:
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}
|
||||
uses: SonarSource/sonarqube-scan-action@2500896589ef8f7247069a56136f8dc177c27ccf # v5.2.0
|
||||
uses: SonarSource/sonarqube-scan-action@8c71dc039c2dd71d3821e89a2b58ecc7fee6ced9 # v5.3.0
|
||||
if: env.sonar_secret != ''
|
||||
test-e2e:
|
||||
name: Run end-to-end tests
|
||||
@@ -468,7 +468,7 @@ jobs:
|
||||
sudo chmod go-r $HOME/.kube/config
|
||||
kubectl version
|
||||
- name: Restore go build cache
|
||||
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
|
||||
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
|
||||
with:
|
||||
path: ~/.cache/go-build
|
||||
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
|
||||
|
||||
10
.github/workflows/image-reuse.yaml
vendored
10
.github/workflows/image-reuse.yaml
vendored
@@ -73,10 +73,10 @@ jobs:
|
||||
cache: false
|
||||
|
||||
- name: Install cosign
|
||||
uses: sigstore/cosign-installer@3454372f43399081ed03b604cb2d021dabca52bb # v3.8.2
|
||||
uses: sigstore/cosign-installer@d58896d6a1865668819e1d91763c7751a165e159 # v3.9.2
|
||||
|
||||
- uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3.6.0
|
||||
- uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
|
||||
- uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||
|
||||
- name: Setup tags for container image as a CSV type
|
||||
run: |
|
||||
@@ -103,7 +103,7 @@ jobs:
|
||||
echo 'EOF' >> $GITHUB_ENV
|
||||
|
||||
- name: Login to Quay.io
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.quay_username }}
|
||||
@@ -111,7 +111,7 @@ jobs:
|
||||
if: ${{ inputs.quay_image_name && inputs.push }}
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ secrets.ghcr_username }}
|
||||
@@ -119,7 +119,7 @@ jobs:
|
||||
if: ${{ inputs.ghcr_image_name && inputs.push }}
|
||||
|
||||
- name: Login to dockerhub Container Registry
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
|
||||
with:
|
||||
username: ${{ secrets.docker_username }}
|
||||
password: ${{ secrets.docker_password }}
|
||||
|
||||
4
.github/workflows/image.yaml
vendored
4
.github/workflows/image.yaml
vendored
@@ -53,7 +53,7 @@ jobs:
|
||||
with:
|
||||
# Note: cannot use env variables to set go-version (https://docs.github.com/en/actions/using-workflows/reusing-workflows#limitations)
|
||||
# renovate: datasource=golang-version packageName=golang
|
||||
go-version: 1.24.4
|
||||
go-version: 1.25.0
|
||||
platforms: ${{ needs.set-vars.outputs.platforms }}
|
||||
push: false
|
||||
|
||||
@@ -70,7 +70,7 @@ jobs:
|
||||
ghcr_image_name: ghcr.io/argoproj/argo-cd/argocd:${{ needs.set-vars.outputs.image-tag }}
|
||||
# Note: cannot use env variables to set go-version (https://docs.github.com/en/actions/using-workflows/reusing-workflows#limitations)
|
||||
# renovate: datasource=golang-version packageName=golang
|
||||
go-version: 1.24.4
|
||||
go-version: 1.25.0
|
||||
platforms: ${{ needs.set-vars.outputs.platforms }}
|
||||
push: true
|
||||
secrets:
|
||||
|
||||
6
.github/workflows/release.yaml
vendored
6
.github/workflows/release.yaml
vendored
@@ -11,7 +11,7 @@ permissions: {}
|
||||
|
||||
env:
|
||||
# renovate: datasource=golang-version packageName=golang
|
||||
GOLANG_VERSION: '1.24.4' # Note: go-version must also be set in job argocd-image.with.go-version
|
||||
GOLANG_VERSION: '1.25.0' # Note: go-version must also be set in job argocd-image.with.go-version
|
||||
|
||||
jobs:
|
||||
argocd-image:
|
||||
@@ -25,7 +25,7 @@ jobs:
|
||||
quay_image_name: quay.io/argoproj/argocd:${{ github.ref_name }}
|
||||
# Note: cannot use env variables to set go-version (https://docs.github.com/en/actions/using-workflows/reusing-workflows#limitations)
|
||||
# renovate: datasource=golang-version packageName=golang
|
||||
go-version: 1.24.4
|
||||
go-version: 1.25.0
|
||||
platforms: linux/amd64,linux/arm64,linux/s390x,linux/ppc64le
|
||||
push: true
|
||||
secrets:
|
||||
@@ -96,7 +96,7 @@ jobs:
|
||||
tool-cache: false
|
||||
|
||||
- name: Run GoReleaser
|
||||
uses: goreleaser/goreleaser-action@9c156ee8a17a598857849441385a2041ef570552 # v6.3.0
|
||||
uses: goreleaser/goreleaser-action@e435ccd777264be153ace6237001ef4d979d3a7a # v6.4.0
|
||||
id: run-goreleaser
|
||||
with:
|
||||
version: latest
|
||||
|
||||
31
.github/workflows/renovate.yaml
vendored
Normal file
31
.github/workflows/renovate.yaml
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
name: Renovate
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 * * * *'
|
||||
workflow_dispatch: {}
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
renovate:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Get token
|
||||
id: get_token
|
||||
uses: actions/create-github-app-token@a8d616148505b5069dccd32f177bb87d7f39123b # v2.1.1
|
||||
with:
|
||||
app-id: ${{ vars.RENOVATE_APP_ID }}
|
||||
private-key: ${{ secrets.RENOVATE_APP_PRIVATE_KEY }}
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # 4.2.2
|
||||
|
||||
- name: Self-hosted Renovate
|
||||
uses: renovatebot/github-action@b11417b9eaac3145fe9a8544cee66503724e32b6 #43.0.8
|
||||
with:
|
||||
configurationFile: .github/configs/renovate-config.js
|
||||
token: '${{ steps.get_token.outputs.token }}'
|
||||
env:
|
||||
LOG_LEVEL: 'debug'
|
||||
RENOVATE_REPOSITORIES: '${{ github.repository }}'
|
||||
@@ -58,7 +58,6 @@ linters:
|
||||
- commentedOutCode
|
||||
- deferInLoop
|
||||
- exitAfterDefer
|
||||
- exposedSyncMutex
|
||||
- hugeParam
|
||||
- importShadow
|
||||
- paramTypeCombine # Leave disabled, there are too many failures to be worth fixing.
|
||||
|
||||
@@ -21,7 +21,7 @@ builds:
|
||||
- -X github.com/argoproj/argo-cd/v3/common.gitCommit={{ .FullCommit }}
|
||||
- -X github.com/argoproj/argo-cd/v3/common.gitTreeState={{ .Env.GIT_TREE_STATE }}
|
||||
- -X github.com/argoproj/argo-cd/v3/common.kubectlVersion={{ .Env.KUBECTL_VERSION }}
|
||||
- '{{ if or (eq .Runtime.Goos "linux") (eq .Runtime.Goos "windows") }}-extldflags="-static"{{ end }}'
|
||||
- -extldflags="-static"
|
||||
goos:
|
||||
- linux
|
||||
- windows
|
||||
@@ -42,15 +42,6 @@ builds:
|
||||
goarch: ppc64le
|
||||
- goos: windows
|
||||
goarch: arm64
|
||||
overrides:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
env:
|
||||
- CGO_ENABLED=1
|
||||
- goos: darwin
|
||||
goarch: arm64
|
||||
env:
|
||||
- CGO_ENABLED=1
|
||||
|
||||
archives:
|
||||
- id: argocd-archive
|
||||
|
||||
@@ -32,6 +32,9 @@ packages:
|
||||
github.com/argoproj/argo-cd/v3/controller/cache:
|
||||
interfaces:
|
||||
LiveStateCache: {}
|
||||
github.com/argoproj/argo-cd/v3/controller/hydrator:
|
||||
interfaces:
|
||||
Dependencies: {}
|
||||
github.com/argoproj/argo-cd/v3/pkg/apiclient/cluster:
|
||||
interfaces:
|
||||
ClusterServiceServer: {}
|
||||
@@ -66,6 +69,9 @@ packages:
|
||||
github.com/argoproj/argo-cd/v3/util/helm:
|
||||
interfaces:
|
||||
Client: {}
|
||||
github.com/argoproj/argo-cd/v3/util/oci:
|
||||
interfaces:
|
||||
Client: {}
|
||||
github.com/argoproj/argo-cd/v3/util/io:
|
||||
interfaces:
|
||||
TempPaths: {}
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
ARG BASE_IMAGE=docker.io/library/ubuntu:24.04@sha256:80dd3c3b9c6cecb9f1667e9290b3bc61b78c2678c02cbdae5f0fea92cc6734ab
|
||||
ARG BASE_IMAGE=docker.io/library/ubuntu:25.04@sha256:10bb10bb062de665d4dc3e0ea36715270ead632cfcb74d08ca2273712a0dfb42
|
||||
####################################################################################################
|
||||
# Builder image
|
||||
# Initial stage which pulls prepares build dependencies and CLI tooling we need for our final image
|
||||
# Also used as the image in CI jobs so needs all dependencies
|
||||
####################################################################################################
|
||||
FROM docker.io/library/golang:1.24.4@sha256:db5d0afbfb4ab648af2393b92e87eaae9ad5e01132803d80caef91b5752d289c AS builder
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/golang:1.25.0@sha256:9e56f0d0f043a68bb8c47c819e47dc29f6e8f5129b8885bed9d43f058f7f3ed6 AS builder
|
||||
|
||||
WORKDIR /tmp
|
||||
|
||||
@@ -103,7 +103,7 @@ RUN HOST_ARCH=$TARGETARCH NODE_ENV='production' NODE_ONLINE_ENV='online' NODE_OP
|
||||
####################################################################################################
|
||||
# Argo CD Build stage which performs the actual build of Argo CD binaries
|
||||
####################################################################################################
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/golang:1.24.4@sha256:db5d0afbfb4ab648af2393b92e87eaae9ad5e01132803d80caef91b5752d289c AS argocd-build
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/golang:1.25.0@sha256:9e56f0d0f043a68bb8c47c819e47dc29f6e8f5129b8885bed9d43f058f7f3ed6 AS argocd-build
|
||||
|
||||
WORKDIR /go/src/github.com/argoproj/argo-cd
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM docker.io/library/golang:1.24.1@sha256:c5adecdb7b3f8c5ca3c88648a861882849cc8b02fed68ece31e25de88ad13418
|
||||
FROM docker.io/library/golang:1.25.0@sha256:9e56f0d0f043a68bb8c47c819e47dc29f6e8f5129b8885bed9d43f058f7f3ed6
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
|
||||
3
Makefile
3
Makefile
@@ -113,7 +113,6 @@ define run-in-test-server
|
||||
-v ${GOPATH}/pkg/mod:/go/pkg/mod${VOLUME_MOUNT} \
|
||||
-v ${GOCACHE}:/tmp/go-build-cache${VOLUME_MOUNT} \
|
||||
-v ${HOME}/.kube:/home/user/.kube${VOLUME_MOUNT} \
|
||||
-v /tmp:/tmp${VOLUME_MOUNT} \
|
||||
-w ${DOCKER_WORKDIR} \
|
||||
-p ${ARGOCD_E2E_APISERVER_PORT}:8080 \
|
||||
-p 4000:4000 \
|
||||
@@ -138,7 +137,6 @@ define run-in-test-client
|
||||
-v ${GOPATH}/pkg/mod:/go/pkg/mod${VOLUME_MOUNT} \
|
||||
-v ${GOCACHE}:/tmp/go-build-cache${VOLUME_MOUNT} \
|
||||
-v ${HOME}/.kube:/home/user/.kube${VOLUME_MOUNT} \
|
||||
-v /tmp:/tmp${VOLUME_MOUNT} \
|
||||
-w ${DOCKER_WORKDIR} \
|
||||
$(PODMAN_ARGS) \
|
||||
$(TEST_TOOLS_PREFIX)$(TEST_TOOLS_IMAGE):$(TEST_TOOLS_TAG) \
|
||||
@@ -604,6 +602,7 @@ install-test-tools-local:
|
||||
.PHONY: install-codegen-tools-local
|
||||
install-codegen-tools-local:
|
||||
./hack/install.sh codegen-tools
|
||||
./hack/install.sh codegen-go-tools
|
||||
|
||||
# Installs all tools required for running codegen (Go packages)
|
||||
.PHONY: install-go-tools-local
|
||||
|
||||
@@ -3,9 +3,9 @@ header:
|
||||
expiration-date: '2024-10-31T00:00:00.000Z' # One year from initial release.
|
||||
last-updated: '2023-10-27'
|
||||
last-reviewed: '2023-10-27'
|
||||
commit-hash: 226a670fe6b3c6769ff6d18e6839298a58e4577d
|
||||
commit-hash: 320f46f06beaf75f9c406e3a47e2e09d36e2047a
|
||||
project-url: https://github.com/argoproj/argo-cd
|
||||
project-release: v3.1.0
|
||||
project-release: v3.2.0
|
||||
changelog: https://github.com/argoproj/argo-cd/releases
|
||||
license: https://github.com/argoproj/argo-cd/blob/master/LICENSE
|
||||
project-lifecycle:
|
||||
|
||||
3
Tiltfile
3
Tiltfile
@@ -69,7 +69,7 @@ docker_build_with_restart(
|
||||
],
|
||||
platform=platform,
|
||||
live_update=[
|
||||
sync('.tilt-bin/argocd_linux_amd64', '/usr/local/bin/argocd'),
|
||||
sync('.tilt-bin/argocd_linux', '/usr/local/bin/argocd'),
|
||||
],
|
||||
only=[
|
||||
'.tilt-bin',
|
||||
@@ -260,6 +260,7 @@ local_resource(
|
||||
'make lint-local',
|
||||
deps = code_deps,
|
||||
allow_parallel=True,
|
||||
resource_deps=['vendor']
|
||||
)
|
||||
|
||||
local_resource(
|
||||
|
||||
8
USERS.md
8
USERS.md
@@ -5,8 +5,10 @@ PR with your organization name if you are using Argo CD.
|
||||
|
||||
Currently, the following organizations are **officially** using Argo CD:
|
||||
|
||||
1. [100ms](https://www.100ms.ai/)
|
||||
1. [127Labs](https://127labs.com/)
|
||||
1. [3Rein](https://www.3rein.com/)
|
||||
1. [42 School](https://42.fr/)
|
||||
1. [4data](https://4data.ch/)
|
||||
1. [7shifts](https://www.7shifts.com/)
|
||||
1. [Adevinta](https://www.adevinta.com/)
|
||||
@@ -40,6 +42,7 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Back Market](https://www.backmarket.com)
|
||||
1. [Bajaj Finserv Health Ltd.](https://www.bajajfinservhealth.in)
|
||||
1. [Baloise](https://www.baloise.com)
|
||||
1. [Batumbu](https://batumbu.id)
|
||||
1. [BCDevExchange DevOps Platform](https://bcdevexchange.org/DevOpsPlatform)
|
||||
1. [Beat](https://thebeat.co/en/)
|
||||
1. [Beez Innovation Labs](https://www.beezlabs.com/)
|
||||
@@ -71,6 +74,7 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Chime](https://www.chime.com)
|
||||
1. [Chronicle Labs](https://chroniclelabs.org)
|
||||
1. [Cisco ET&I](https://eti.cisco.com/)
|
||||
1. [Close](https://www.close.com/)
|
||||
1. [Cloud Posse](https://www.cloudposse.com/)
|
||||
1. [Cloud Scale](https://cloudscaleinc.com/)
|
||||
1. [CloudScript](https://www.cloudscript.com.br/)
|
||||
@@ -160,6 +164,7 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Hiya](https://hiya.com)
|
||||
1. [Honestbank](https://honestbank.com)
|
||||
1. [Hostinger](https://www.hostinger.com)
|
||||
1. [Hotjar](https://www.hotjar.com)
|
||||
1. [IABAI](https://www.iab.ai)
|
||||
1. [IBM](https://www.ibm.com/)
|
||||
1. [Ibotta](https://home.ibotta.com)
|
||||
@@ -173,6 +178,7 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Info Support](https://www.infosupport.com/)
|
||||
1. [InsideBoard](https://www.insideboard.com)
|
||||
1. [Instruqt](https://www.instruqt.com)
|
||||
1. [Intel](https://www.intel.com)
|
||||
1. [Intuit](https://www.intuit.com/)
|
||||
1. [Jellysmack](https://www.jellysmack.com)
|
||||
1. [Joblift](https://joblift.com/)
|
||||
@@ -321,6 +327,7 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [SEKAI](https://www.sekai.io/)
|
||||
1. [Semgrep](https://semgrep.com)
|
||||
1. [Shield](https://shield.com)
|
||||
1. [Shipfox](https://www.shipfox.io)
|
||||
1. [SI Analytics](https://si-analytics.ai)
|
||||
1. [Sidewalk Entertainment](https://sidewalkplay.com/)
|
||||
1. [Skit](https://skit.ai/)
|
||||
@@ -333,6 +340,7 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Snapp](https://snapp.ir/)
|
||||
1. [Snyk](https://snyk.io/)
|
||||
1. [Softway Medical](https://www.softwaymedical.fr/)
|
||||
1. [Sophotech](https://sopho.tech)
|
||||
1. [South China Morning Post (SCMP)](https://www.scmp.com/)
|
||||
1. [Speee](https://speee.jp/)
|
||||
1. [Spendesk](https://spendesk.com/)
|
||||
|
||||
@@ -16,6 +16,7 @@ package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"runtime/debug"
|
||||
@@ -67,6 +68,8 @@ const (
|
||||
// https://github.com/argoproj-labs/argocd-notifications/blob/33d345fa838829bb50fca5c08523aba380d2c12b/pkg/controller/state.go#L17
|
||||
NotifiedAnnotationKey = "notified.notifications.argoproj.io"
|
||||
ReconcileRequeueOnValidationError = time.Minute * 3
|
||||
ReverseDeletionOrder = "Reverse"
|
||||
AllAtOnceDeletionOrder = "AllAtOnce"
|
||||
)
|
||||
|
||||
var defaultPreservedAnnotations = []string{
|
||||
@@ -74,6 +77,11 @@ var defaultPreservedAnnotations = []string{
|
||||
argov1alpha1.AnnotationKeyRefresh,
|
||||
}
|
||||
|
||||
type deleteInOrder struct {
|
||||
AppName string
|
||||
Step int
|
||||
}
|
||||
|
||||
// ApplicationSetReconciler reconciles a ApplicationSet object
|
||||
type ApplicationSetReconciler struct {
|
||||
client.Client
|
||||
@@ -139,6 +147,19 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
||||
}
|
||||
logCtx.Debugf("ownerReferences referring %s is deleted from generated applications", appsetName)
|
||||
}
|
||||
if isProgressiveSyncDeletionOrderReversed(&applicationSetInfo) {
|
||||
logCtx.Debugf("DeletionOrder is set as Reverse on %s", appsetName)
|
||||
currentApplications, err := r.getCurrentApplications(ctx, applicationSetInfo)
|
||||
if err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
requeueTime, err := r.performReverseDeletion(ctx, logCtx, applicationSetInfo, currentApplications)
|
||||
if err != nil {
|
||||
return ctrl.Result{}, err
|
||||
} else if requeueTime > 0 {
|
||||
return ctrl.Result{RequeueAfter: requeueTime}, err
|
||||
}
|
||||
}
|
||||
controllerutil.RemoveFinalizer(&applicationSetInfo, argov1alpha1.ResourcesFinalizerName)
|
||||
if err := r.Update(ctx, &applicationSetInfo); err != nil {
|
||||
return ctrl.Result{}, err
|
||||
@@ -154,7 +175,7 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
||||
// Log a warning if there are unrecognized generators
|
||||
_ = utils.CheckInvalidGenerators(&applicationSetInfo)
|
||||
// desiredApplications is the main list of all expected Applications from all generators in this appset.
|
||||
desiredApplications, applicationSetReason, err := template.GenerateApplications(logCtx, applicationSetInfo, r.Generators, r.Renderer, r.Client)
|
||||
generatedApplications, applicationSetReason, err := template.GenerateApplications(logCtx, applicationSetInfo, r.Generators, r.Renderer, r.Client)
|
||||
if err != nil {
|
||||
logCtx.Errorf("unable to generate applications: %v", err)
|
||||
_ = r.setApplicationSetStatusCondition(ctx,
|
||||
@@ -172,7 +193,7 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
||||
|
||||
parametersGenerated = true
|
||||
|
||||
validateErrors, err := r.validateGeneratedApplications(ctx, desiredApplications, applicationSetInfo)
|
||||
validateErrors, err := r.validateGeneratedApplications(ctx, generatedApplications, applicationSetInfo)
|
||||
if err != nil {
|
||||
// While some generators may return an error that requires user intervention,
|
||||
// other generators reference external resources that may change to cause
|
||||
@@ -225,7 +246,7 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
||||
appMap[app.Name] = app
|
||||
}
|
||||
|
||||
appSyncMap, err = r.performProgressiveSyncs(ctx, logCtx, applicationSetInfo, currentApplications, desiredApplications, appMap)
|
||||
appSyncMap, err = r.performProgressiveSyncs(ctx, logCtx, applicationSetInfo, currentApplications, generatedApplications, appMap)
|
||||
if err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("failed to perform progressive sync reconciliation for application set: %w", err)
|
||||
}
|
||||
@@ -233,17 +254,23 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
||||
}
|
||||
|
||||
var validApps []argov1alpha1.Application
|
||||
for i := range desiredApplications {
|
||||
if validateErrors[i] == nil {
|
||||
validApps = append(validApps, desiredApplications[i])
|
||||
for i := range generatedApplications {
|
||||
if validateErrors[generatedApplications[i].QualifiedName()] == nil {
|
||||
validApps = append(validApps, generatedApplications[i])
|
||||
}
|
||||
}
|
||||
|
||||
if len(validateErrors) > 0 {
|
||||
errorApps := make([]string, 0, len(validateErrors))
|
||||
for key := range validateErrors {
|
||||
errorApps = append(errorApps, key)
|
||||
}
|
||||
sort.Strings(errorApps)
|
||||
|
||||
var message string
|
||||
for _, v := range validateErrors {
|
||||
message = v.Error()
|
||||
logCtx.Errorf("validation error found during application validation: %s", message)
|
||||
for _, appName := range errorApps {
|
||||
message = validateErrors[appName].Error()
|
||||
logCtx.WithField("application", appName).Errorf("validation error found during application validation: %s", message)
|
||||
}
|
||||
if len(validateErrors) > 1 {
|
||||
// Only the last message gets added to the appset status, to keep the size reasonable.
|
||||
@@ -298,12 +325,12 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
||||
}
|
||||
|
||||
if utils.DefaultPolicy(applicationSetInfo.Spec.SyncPolicy, r.Policy, r.EnablePolicyOverride).AllowDelete() {
|
||||
err = r.deleteInCluster(ctx, logCtx, applicationSetInfo, desiredApplications)
|
||||
err = r.deleteInCluster(ctx, logCtx, applicationSetInfo, generatedApplications)
|
||||
if err != nil {
|
||||
_ = r.setApplicationSetStatusCondition(ctx,
|
||||
&applicationSetInfo,
|
||||
argov1alpha1.ApplicationSetCondition{
|
||||
Type: argov1alpha1.ApplicationSetConditionResourcesUpToDate,
|
||||
Type: argov1alpha1.ApplicationSetConditionErrorOccurred,
|
||||
Message: err.Error(),
|
||||
Reason: argov1alpha1.ApplicationSetReasonDeleteApplicationError,
|
||||
Status: argov1alpha1.ApplicationSetConditionStatusTrue,
|
||||
@@ -357,120 +384,169 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (r *ApplicationSetReconciler) performReverseDeletion(ctx context.Context, logCtx *log.Entry, appset argov1alpha1.ApplicationSet, currentApps []argov1alpha1.Application) (time.Duration, error) {
|
||||
requeueTime := 10 * time.Second
|
||||
stepLength := len(appset.Spec.Strategy.RollingSync.Steps)
|
||||
|
||||
// map applications by name using current applications
|
||||
appMap := make(map[string]*argov1alpha1.Application)
|
||||
for _, app := range currentApps {
|
||||
appMap[app.Name] = &app
|
||||
}
|
||||
|
||||
// Get Rolling Sync Step Maps
|
||||
_, appStepMap := r.buildAppDependencyList(logCtx, appset, currentApps)
|
||||
// reverse the AppStepMap to perform deletion
|
||||
var reverseDeleteAppSteps []deleteInOrder
|
||||
for appName, appStep := range appStepMap {
|
||||
reverseDeleteAppSteps = append(reverseDeleteAppSteps, deleteInOrder{appName, stepLength - appStep - 1})
|
||||
}
|
||||
|
||||
sort.Slice(reverseDeleteAppSteps, func(i, j int) bool {
|
||||
return reverseDeleteAppSteps[i].Step < reverseDeleteAppSteps[j].Step
|
||||
})
|
||||
|
||||
for _, step := range reverseDeleteAppSteps {
|
||||
logCtx.Infof("step %v : app %v", step.Step, step.AppName)
|
||||
app := appMap[step.AppName]
|
||||
retrievedApp := argov1alpha1.Application{}
|
||||
if err := r.Get(ctx, types.NamespacedName{Name: app.Name, Namespace: app.Namespace}, &retrievedApp); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
logCtx.Infof("application %s successfully deleted", step.AppName)
|
||||
continue
|
||||
}
|
||||
}
|
||||
// Check if the application is already being deleted
|
||||
if retrievedApp.DeletionTimestamp != nil {
|
||||
logCtx.Infof("application %s has been marked for deletion, but object not removed yet", step.AppName)
|
||||
if time.Since(retrievedApp.DeletionTimestamp.Time) > 2*time.Minute {
|
||||
return 0, errors.New("application has not been deleted in over 2 minutes")
|
||||
}
|
||||
}
|
||||
// The application has not been deleted yet, trigger its deletion
|
||||
if err := r.Delete(ctx, &retrievedApp); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return requeueTime, nil
|
||||
}
|
||||
logCtx.Infof("completed reverse deletion for ApplicationSet %v", appset.Name)
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func getParametersGeneratedCondition(parametersGenerated bool, message string) argov1alpha1.ApplicationSetCondition {
|
||||
var paramtersGeneratedCondition argov1alpha1.ApplicationSetCondition
|
||||
var parametersGeneratedCondition argov1alpha1.ApplicationSetCondition
|
||||
if parametersGenerated {
|
||||
paramtersGeneratedCondition = argov1alpha1.ApplicationSetCondition{
|
||||
parametersGeneratedCondition = argov1alpha1.ApplicationSetCondition{
|
||||
Type: argov1alpha1.ApplicationSetConditionParametersGenerated,
|
||||
Message: "Successfully generated parameters for all Applications",
|
||||
Reason: argov1alpha1.ApplicationSetReasonParametersGenerated,
|
||||
Status: argov1alpha1.ApplicationSetConditionStatusTrue,
|
||||
}
|
||||
} else {
|
||||
paramtersGeneratedCondition = argov1alpha1.ApplicationSetCondition{
|
||||
parametersGeneratedCondition = argov1alpha1.ApplicationSetCondition{
|
||||
Type: argov1alpha1.ApplicationSetConditionParametersGenerated,
|
||||
Message: message,
|
||||
Reason: argov1alpha1.ApplicationSetReasonErrorOccurred,
|
||||
Status: argov1alpha1.ApplicationSetConditionStatusFalse,
|
||||
}
|
||||
}
|
||||
return paramtersGeneratedCondition
|
||||
return parametersGeneratedCondition
|
||||
}
|
||||
|
||||
func getResourceUpToDateCondition(errorOccurred bool, message string, reason string) argov1alpha1.ApplicationSetCondition {
|
||||
var resourceUpToDateCondition argov1alpha1.ApplicationSetCondition
|
||||
if errorOccurred {
|
||||
resourceUpToDateCondition = argov1alpha1.ApplicationSetCondition{
|
||||
Type: argov1alpha1.ApplicationSetConditionResourcesUpToDate,
|
||||
Message: message,
|
||||
Reason: reason,
|
||||
Status: argov1alpha1.ApplicationSetConditionStatusFalse,
|
||||
}
|
||||
} else {
|
||||
resourceUpToDateCondition = argov1alpha1.ApplicationSetCondition{
|
||||
Type: argov1alpha1.ApplicationSetConditionResourcesUpToDate,
|
||||
Message: "ApplicationSet up to date",
|
||||
Reason: argov1alpha1.ApplicationSetReasonApplicationSetUpToDate,
|
||||
Status: argov1alpha1.ApplicationSetConditionStatusTrue,
|
||||
}
|
||||
}
|
||||
return resourceUpToDateCondition
|
||||
}
|
||||
|
||||
func (r *ApplicationSetReconciler) setApplicationSetStatusCondition(ctx context.Context, applicationSet *argov1alpha1.ApplicationSet, condition argov1alpha1.ApplicationSetCondition, paramtersGenerated bool) error {
|
||||
// check if error occurred during reconcile process
|
||||
errOccurred := condition.Type == argov1alpha1.ApplicationSetConditionErrorOccurred
|
||||
|
||||
var errOccurredCondition argov1alpha1.ApplicationSetCondition
|
||||
|
||||
if errOccurred {
|
||||
errOccurredCondition = condition
|
||||
} else {
|
||||
errOccurredCondition = argov1alpha1.ApplicationSetCondition{
|
||||
Type: argov1alpha1.ApplicationSetConditionErrorOccurred,
|
||||
Message: "Successfully generated parameters for all Applications",
|
||||
Reason: argov1alpha1.ApplicationSetReasonApplicationSetUpToDate,
|
||||
Status: argov1alpha1.ApplicationSetConditionStatusFalse,
|
||||
}
|
||||
}
|
||||
|
||||
paramtersGeneratedCondition := getParametersGeneratedCondition(paramtersGenerated, condition.Message)
|
||||
resourceUpToDateCondition := getResourceUpToDateCondition(errOccurred, condition.Message, condition.Reason)
|
||||
|
||||
func (r *ApplicationSetReconciler) setApplicationSetStatusCondition(ctx context.Context, applicationSet *argov1alpha1.ApplicationSet, condition argov1alpha1.ApplicationSetCondition, parametersGenerated bool) error {
|
||||
// Initialize the default condition types that this method evaluates
|
||||
evaluatedTypes := map[argov1alpha1.ApplicationSetConditionType]bool{
|
||||
argov1alpha1.ApplicationSetConditionErrorOccurred: true,
|
||||
argov1alpha1.ApplicationSetConditionParametersGenerated: true,
|
||||
argov1alpha1.ApplicationSetConditionResourcesUpToDate: true,
|
||||
argov1alpha1.ApplicationSetConditionErrorOccurred: false,
|
||||
argov1alpha1.ApplicationSetConditionResourcesUpToDate: false,
|
||||
argov1alpha1.ApplicationSetConditionRolloutProgressing: false,
|
||||
}
|
||||
newConditions := []argov1alpha1.ApplicationSetCondition{errOccurredCondition, paramtersGeneratedCondition, resourceUpToDateCondition}
|
||||
// Evaluate current condition
|
||||
evaluatedTypes[condition.Type] = true
|
||||
newConditions := []argov1alpha1.ApplicationSetCondition{condition}
|
||||
|
||||
if progressiveSyncsRollingSyncStrategyEnabled(applicationSet) {
|
||||
if !isRollingSyncStrategy(applicationSet) {
|
||||
// Progressing sync is always evaluated so conditions are removed when it is not enabled
|
||||
evaluatedTypes[argov1alpha1.ApplicationSetConditionRolloutProgressing] = true
|
||||
}
|
||||
|
||||
if condition.Type == argov1alpha1.ApplicationSetConditionRolloutProgressing {
|
||||
newConditions = append(newConditions, condition)
|
||||
// Evaluate ParametersGenerated since it is always provided
|
||||
if condition.Type != argov1alpha1.ApplicationSetConditionParametersGenerated {
|
||||
newConditions = append(newConditions, getParametersGeneratedCondition(parametersGenerated, condition.Message))
|
||||
}
|
||||
|
||||
// Evaluate dependencies between conditions.
|
||||
switch condition.Type {
|
||||
case argov1alpha1.ApplicationSetConditionResourcesUpToDate:
|
||||
if condition.Status == argov1alpha1.ApplicationSetConditionStatusTrue {
|
||||
// If the resources are up to date, we know there was no errors
|
||||
evaluatedTypes[argov1alpha1.ApplicationSetConditionErrorOccurred] = true
|
||||
newConditions = append(newConditions, argov1alpha1.ApplicationSetCondition{
|
||||
Type: argov1alpha1.ApplicationSetConditionErrorOccurred,
|
||||
Status: argov1alpha1.ApplicationSetConditionStatusFalse,
|
||||
Reason: condition.Reason,
|
||||
Message: condition.Message,
|
||||
})
|
||||
}
|
||||
case argov1alpha1.ApplicationSetConditionErrorOccurred:
|
||||
if condition.Status == argov1alpha1.ApplicationSetConditionStatusTrue {
|
||||
// If there is an error anywhere in the reconciliation, we cannot consider the resources up to date
|
||||
evaluatedTypes[argov1alpha1.ApplicationSetConditionResourcesUpToDate] = true
|
||||
newConditions = append(newConditions, argov1alpha1.ApplicationSetCondition{
|
||||
Type: argov1alpha1.ApplicationSetConditionResourcesUpToDate,
|
||||
Status: argov1alpha1.ApplicationSetConditionStatusFalse,
|
||||
Reason: argov1alpha1.ApplicationSetReasonErrorOccurred,
|
||||
Message: condition.Message,
|
||||
})
|
||||
}
|
||||
case argov1alpha1.ApplicationSetConditionRolloutProgressing:
|
||||
if !isRollingSyncStrategy(applicationSet) {
|
||||
// if the condition is a rolling sync and it is disabled, ignore it
|
||||
evaluatedTypes[condition.Type] = false
|
||||
}
|
||||
}
|
||||
|
||||
needToUpdateConditions := false
|
||||
for _, condition := range newConditions {
|
||||
// do nothing if appset already has same condition
|
||||
for _, c := range applicationSet.Status.Conditions {
|
||||
if c.Type == condition.Type && (c.Reason != condition.Reason || c.Status != condition.Status || c.Message != condition.Message) {
|
||||
// Update the applicationSet conditions
|
||||
previousConditions := applicationSet.Status.Conditions
|
||||
applicationSet.Status.SetConditions(newConditions, evaluatedTypes)
|
||||
|
||||
// Try to not call get/update if nothing has changed
|
||||
needToUpdateConditions := len(applicationSet.Status.Conditions) != len(previousConditions)
|
||||
if !needToUpdateConditions {
|
||||
for i, c := range applicationSet.Status.Conditions {
|
||||
previous := previousConditions[i]
|
||||
if c.Type != previous.Type || c.Reason != previous.Reason || c.Status != previous.Status || c.Message != previous.Message {
|
||||
needToUpdateConditions = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if needToUpdateConditions || len(applicationSet.Status.Conditions) < len(newConditions) {
|
||||
// fetch updated Application Set object before updating it
|
||||
// DefaultRetry will retry 5 times with a backoff factor of 1, jitter of 0.1 and a duration of 10ms
|
||||
err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
namespacedName := types.NamespacedName{Namespace: applicationSet.Namespace, Name: applicationSet.Name}
|
||||
updatedAppset := &argov1alpha1.ApplicationSet{}
|
||||
if err := r.Get(ctx, namespacedName, updatedAppset); err != nil {
|
||||
if client.IgnoreNotFound(err) != nil {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("error fetching updated application set: %w", err)
|
||||
if !needToUpdateConditions {
|
||||
return nil
|
||||
}
|
||||
// DefaultRetry will retry 5 times with a backoff factor of 1, jitter of 0.1 and a duration of 10ms
|
||||
err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
updatedAppset := &argov1alpha1.ApplicationSet{}
|
||||
if err := r.Get(ctx, types.NamespacedName{Namespace: applicationSet.Namespace, Name: applicationSet.Name}, updatedAppset); err != nil {
|
||||
if client.IgnoreNotFound(err) != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
updatedAppset.Status.SetConditions(
|
||||
newConditions, evaluatedTypes,
|
||||
)
|
||||
|
||||
// Update the newly fetched object with new set of conditions
|
||||
err := r.Client.Status().Update(ctx, updatedAppset)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
updatedAppset.DeepCopyInto(applicationSet)
|
||||
return nil
|
||||
})
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
return fmt.Errorf("unable to set application set condition: %w", err)
|
||||
return fmt.Errorf("error fetching updated application set: %w", err)
|
||||
}
|
||||
|
||||
updatedAppset.Status.SetConditions(newConditions, evaluatedTypes)
|
||||
|
||||
// Update the newly fetched object with new set of conditions
|
||||
err := r.Client.Status().Update(ctx, updatedAppset)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
updatedAppset.DeepCopyInto(applicationSet)
|
||||
return nil
|
||||
})
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
return fmt.Errorf("unable to set application set condition: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -478,33 +554,33 @@ func (r *ApplicationSetReconciler) setApplicationSetStatusCondition(ctx context.
|
||||
|
||||
// validateGeneratedApplications uses the Argo CD validation functions to verify the correctness of the
|
||||
// generated applications.
|
||||
func (r *ApplicationSetReconciler) validateGeneratedApplications(ctx context.Context, desiredApplications []argov1alpha1.Application, applicationSetInfo argov1alpha1.ApplicationSet) (map[int]error, error) {
|
||||
errorsByIndex := map[int]error{}
|
||||
func (r *ApplicationSetReconciler) validateGeneratedApplications(ctx context.Context, desiredApplications []argov1alpha1.Application, applicationSetInfo argov1alpha1.ApplicationSet) (map[string]error, error) {
|
||||
errorsByApp := map[string]error{}
|
||||
namesSet := map[string]bool{}
|
||||
for i, app := range desiredApplications {
|
||||
for i := range desiredApplications {
|
||||
app := &desiredApplications[i]
|
||||
if namesSet[app.Name] {
|
||||
errorsByIndex[i] = fmt.Errorf("ApplicationSet %s contains applications with duplicate name: %s", applicationSetInfo.Name, app.Name)
|
||||
errorsByApp[app.QualifiedName()] = fmt.Errorf("ApplicationSet %s contains applications with duplicate name: %s", applicationSetInfo.Name, app.Name)
|
||||
continue
|
||||
}
|
||||
namesSet[app.Name] = true
|
||||
|
||||
appProject := &argov1alpha1.AppProject{}
|
||||
err := r.Get(ctx, types.NamespacedName{Name: app.Spec.Project, Namespace: r.ArgoCDNamespace}, appProject)
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
errorsByIndex[i] = fmt.Errorf("application references project %s which does not exist", app.Spec.Project)
|
||||
errorsByApp[app.QualifiedName()] = fmt.Errorf("application references project %s which does not exist", app.Spec.Project)
|
||||
continue
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if _, err = argoutil.GetDestinationCluster(ctx, app.Spec.Destination, r.ArgoDB); err != nil {
|
||||
errorsByIndex[i] = fmt.Errorf("application destination spec is invalid: %s", err.Error())
|
||||
errorsByApp[app.QualifiedName()] = fmt.Errorf("application destination spec is invalid: %s", err.Error())
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
return errorsByIndex, nil
|
||||
return errorsByApp, nil
|
||||
}
|
||||
|
||||
func (r *ApplicationSetReconciler) getMinRequeueAfter(applicationSetInfo *argov1alpha1.ApplicationSet) time.Duration {
|
||||
@@ -732,7 +808,7 @@ func (r *ApplicationSetReconciler) deleteInCluster(ctx context.Context, logCtx *
|
||||
return fmt.Errorf("error getting current applications: %w", err)
|
||||
}
|
||||
|
||||
m := make(map[string]bool) // Will holds the app names in appList for the deletion process
|
||||
m := make(map[string]bool) // will hold the app names in appList for the deletion process
|
||||
|
||||
for _, app := range desiredApplications {
|
||||
m[app.Name] = true
|
||||
@@ -800,7 +876,7 @@ func (r *ApplicationSetReconciler) removeFinalizerOnInvalidDestination(ctx conte
|
||||
}
|
||||
|
||||
if !matchingCluster {
|
||||
appLog.Warnf("A match for the destination cluster for %s, by server url, couldn't be found.", app.Name)
|
||||
appLog.Warnf("A match for the destination cluster for %s, by server url, couldn't be found", app.Name)
|
||||
}
|
||||
|
||||
validDestination = matchingCluster
|
||||
@@ -1011,6 +1087,11 @@ func progressiveSyncsRollingSyncStrategyEnabled(appset *argov1alpha1.Application
|
||||
return isRollingSyncStrategy(appset) && len(appset.Spec.Strategy.RollingSync.Steps) > 0
|
||||
}
|
||||
|
||||
func isProgressiveSyncDeletionOrderReversed(appset *argov1alpha1.ApplicationSet) bool {
|
||||
// When progressive sync is enabled + deletionOrder is set to Reverse (case-insensitive)
|
||||
return progressiveSyncsRollingSyncStrategyEnabled(appset) && strings.EqualFold(appset.Spec.Strategy.DeletionOrder, ReverseDeletionOrder)
|
||||
}
|
||||
|
||||
func isApplicationHealthy(app argov1alpha1.Application) bool {
|
||||
healthStatusString, syncStatusString, operationPhaseString := statusStrings(app)
|
||||
|
||||
@@ -1140,15 +1221,10 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatusProgress
|
||||
|
||||
// if we have no RollingUpdate steps, clear out the existing ApplicationStatus entries
|
||||
if progressiveSyncsRollingSyncStrategyEnabled(applicationSet) {
|
||||
updateCountMap := []int{}
|
||||
totalCountMap := []int{}
|
||||
|
||||
length := len(applicationSet.Spec.Strategy.RollingSync.Steps)
|
||||
|
||||
for s := 0; s < length; s++ {
|
||||
updateCountMap = append(updateCountMap, 0)
|
||||
totalCountMap = append(totalCountMap, 0)
|
||||
}
|
||||
updateCountMap := make([]int, length)
|
||||
totalCountMap := make([]int, length)
|
||||
|
||||
// populate updateCountMap with counts of existing Pending and Progressing Applications
|
||||
for _, appStatus := range applicationSet.Status.ApplicationStatus {
|
||||
@@ -1207,44 +1283,56 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatusProgress
|
||||
}
|
||||
|
||||
func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatusConditions(ctx context.Context, applicationSet *argov1alpha1.ApplicationSet) []argov1alpha1.ApplicationSetCondition {
|
||||
appSetProgressing := false
|
||||
if !isRollingSyncStrategy(applicationSet) {
|
||||
return applicationSet.Status.Conditions
|
||||
}
|
||||
|
||||
completedWaves := map[string]bool{}
|
||||
for _, appStatus := range applicationSet.Status.ApplicationStatus {
|
||||
if appStatus.Status != "Healthy" {
|
||||
appSetProgressing = true
|
||||
if v, ok := completedWaves[appStatus.Step]; !ok {
|
||||
completedWaves[appStatus.Step] = appStatus.Status == "Healthy"
|
||||
} else {
|
||||
completedWaves[appStatus.Step] = v && appStatus.Status == "Healthy"
|
||||
}
|
||||
}
|
||||
|
||||
isProgressing := false
|
||||
progressingStep := ""
|
||||
for i := range applicationSet.Spec.Strategy.RollingSync.Steps {
|
||||
step := strconv.Itoa(i + 1)
|
||||
isCompleted, ok := completedWaves[step]
|
||||
if !ok {
|
||||
// Step has no applications, so it is completed
|
||||
continue
|
||||
}
|
||||
if !isCompleted {
|
||||
isProgressing = true
|
||||
progressingStep = step
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
appSetConditionProgressing := false
|
||||
for _, appSetCondition := range applicationSet.Status.Conditions {
|
||||
if appSetCondition.Type == argov1alpha1.ApplicationSetConditionRolloutProgressing && appSetCondition.Status == argov1alpha1.ApplicationSetConditionStatusTrue {
|
||||
appSetConditionProgressing = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if appSetProgressing && !appSetConditionProgressing {
|
||||
if isProgressing {
|
||||
_ = r.setApplicationSetStatusCondition(ctx,
|
||||
applicationSet,
|
||||
argov1alpha1.ApplicationSetCondition{
|
||||
Type: argov1alpha1.ApplicationSetConditionRolloutProgressing,
|
||||
Message: "ApplicationSet Rollout Rollout started",
|
||||
Message: "ApplicationSet is performing rollout of step " + progressingStep,
|
||||
Reason: argov1alpha1.ApplicationSetReasonApplicationSetModified,
|
||||
Status: argov1alpha1.ApplicationSetConditionStatusTrue,
|
||||
}, true,
|
||||
)
|
||||
} else if !appSetProgressing && appSetConditionProgressing {
|
||||
} else {
|
||||
_ = r.setApplicationSetStatusCondition(ctx,
|
||||
applicationSet,
|
||||
argov1alpha1.ApplicationSetCondition{
|
||||
Type: argov1alpha1.ApplicationSetConditionRolloutProgressing,
|
||||
Message: "ApplicationSet Rollout Rollout complete",
|
||||
Message: "ApplicationSet Rollout has completed",
|
||||
Reason: argov1alpha1.ApplicationSetReasonApplicationSetRolloutComplete,
|
||||
Status: argov1alpha1.ApplicationSetConditionStatusFalse,
|
||||
}, true,
|
||||
)
|
||||
}
|
||||
|
||||
return applicationSet.Status.Conditions
|
||||
}
|
||||
|
||||
@@ -1345,17 +1433,37 @@ func (r *ApplicationSetReconciler) setAppSetApplicationStatus(ctx context.Contex
|
||||
needToUpdateStatus := false
|
||||
|
||||
if len(applicationStatuses) != len(applicationSet.Status.ApplicationStatus) {
|
||||
logCtx.WithFields(log.Fields{
|
||||
"current_count": len(applicationSet.Status.ApplicationStatus),
|
||||
"expected_count": len(applicationStatuses),
|
||||
}).Debug("application status count changed")
|
||||
needToUpdateStatus = true
|
||||
} else {
|
||||
for i := range applicationStatuses {
|
||||
appStatus := applicationStatuses[i]
|
||||
idx := findApplicationStatusIndex(applicationSet.Status.ApplicationStatus, appStatus.Application)
|
||||
if idx == -1 {
|
||||
logCtx.WithFields(log.Fields{"application": appStatus.Application}).Debug("application not found in current status")
|
||||
needToUpdateStatus = true
|
||||
break
|
||||
}
|
||||
currentStatus := applicationSet.Status.ApplicationStatus[idx]
|
||||
if currentStatus.Message != appStatus.Message || currentStatus.Status != appStatus.Status || currentStatus.Step != appStatus.Step {
|
||||
statusChanged := currentStatus.Status != appStatus.Status
|
||||
stepChanged := currentStatus.Step != appStatus.Step
|
||||
messageChanged := currentStatus.Message != appStatus.Message
|
||||
|
||||
if statusChanged || stepChanged || messageChanged {
|
||||
if statusChanged {
|
||||
logCtx.WithFields(log.Fields{"application": appStatus.Application, "previous_status": currentStatus.Status, "new_status": appStatus.Status}).
|
||||
Debug("application status changed")
|
||||
}
|
||||
if stepChanged {
|
||||
logCtx.WithFields(log.Fields{"application": appStatus.Application, "previous_step": currentStatus.Step, "new_step": appStatus.Step}).
|
||||
Debug("application step changed")
|
||||
}
|
||||
if messageChanged {
|
||||
logCtx.WithFields(log.Fields{"application": appStatus.Application}).Debug("application message changed")
|
||||
}
|
||||
needToUpdateStatus = true
|
||||
break
|
||||
}
|
||||
@@ -1363,17 +1471,17 @@ func (r *ApplicationSetReconciler) setAppSetApplicationStatus(ctx context.Contex
|
||||
}
|
||||
|
||||
if needToUpdateStatus {
|
||||
namespacedName := types.NamespacedName{Namespace: applicationSet.Namespace, Name: applicationSet.Name}
|
||||
// sort to make sure the array is always in the same order
|
||||
applicationSet.Status.ApplicationStatus = make([]argov1alpha1.ApplicationSetApplicationStatus, len(applicationStatuses))
|
||||
copy(applicationSet.Status.ApplicationStatus, applicationStatuses)
|
||||
sort.Slice(applicationSet.Status.ApplicationStatus, func(i, j int) bool {
|
||||
return applicationSet.Status.ApplicationStatus[i].Application < applicationSet.Status.ApplicationStatus[j].Application
|
||||
})
|
||||
|
||||
// rebuild ApplicationStatus from scratch, we don't need any previous status history
|
||||
applicationSet.Status.ApplicationStatus = []argov1alpha1.ApplicationSetApplicationStatus{}
|
||||
for i := range applicationStatuses {
|
||||
applicationSet.Status.SetApplicationStatus(applicationStatuses[i])
|
||||
}
|
||||
// DefaultRetry will retry 5 times with a backoff factor of 1, jitter of 0.1 and a duration of 10ms
|
||||
err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
updatedAppset := &argov1alpha1.ApplicationSet{}
|
||||
if err := r.Get(ctx, namespacedName, updatedAppset); err != nil {
|
||||
if err := r.Get(ctx, types.NamespacedName{Namespace: applicationSet.Namespace, Name: applicationSet.Name}, updatedAppset); err != nil {
|
||||
if client.IgnoreNotFound(err) != nil {
|
||||
return nil
|
||||
}
|
||||
@@ -1437,7 +1545,7 @@ func syncApplication(application argov1alpha1.Application, prune bool) argov1alp
|
||||
Info: []*argov1alpha1.Info{
|
||||
{
|
||||
Name: "Reason",
|
||||
Value: "ApplicationSet RollingSync triggered a sync of this Application resource.",
|
||||
Value: "ApplicationSet RollingSync triggered a sync of this Application resource",
|
||||
},
|
||||
},
|
||||
Sync: &argov1alpha1.SyncOperation{},
|
||||
@@ -1614,14 +1722,15 @@ func shouldRequeueForApplicationSet(appSetOld, appSetNew *argov1alpha1.Applicati
|
||||
}
|
||||
}
|
||||
|
||||
// only compare the applicationset spec, annotations, labels and finalizers, specifically avoiding
|
||||
// only compare the applicationset spec, annotations, labels and finalizers, deletionTimestamp, specifically avoiding
|
||||
// the status field. status is owned by the applicationset controller,
|
||||
// and we do not need to requeue when it does bookkeeping
|
||||
// NB: the ApplicationDestination comes from the ApplicationSpec being embedded
|
||||
// in the ApplicationSetTemplate from the generators
|
||||
if !cmp.Equal(appSetOld.Spec, appSetNew.Spec, cmpopts.EquateEmpty(), cmpopts.EquateComparable(argov1alpha1.ApplicationDestination{})) ||
|
||||
!cmp.Equal(appSetOld.GetLabels(), appSetNew.GetLabels(), cmpopts.EquateEmpty()) ||
|
||||
!cmp.Equal(appSetOld.GetFinalizers(), appSetNew.GetFinalizers(), cmpopts.EquateEmpty()) {
|
||||
!cmp.Equal(appSetOld.GetFinalizers(), appSetNew.GetFinalizers(), cmpopts.EquateEmpty()) ||
|
||||
!cmp.Equal(appSetOld.DeletionTimestamp, appSetNew.DeletionTimestamp, cmpopts.EquateEmpty()) {
|
||||
return true
|
||||
}
|
||||
|
||||
|
||||
@@ -1954,14 +1954,15 @@ func TestValidateGeneratedApplications(t *testing.T) {
|
||||
for _, cc := range []struct {
|
||||
name string
|
||||
apps []v1alpha1.Application
|
||||
validationErrors map[int]error
|
||||
validationErrors map[string]error
|
||||
}{
|
||||
{
|
||||
name: "valid app should return true",
|
||||
apps: []v1alpha1.Application{
|
||||
{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "default",
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
@@ -1976,14 +1977,15 @@ func TestValidateGeneratedApplications(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
validationErrors: map[int]error{},
|
||||
validationErrors: map[string]error{},
|
||||
},
|
||||
{
|
||||
name: "can't have both name and server defined",
|
||||
apps: []v1alpha1.Application{
|
||||
{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "default",
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
@@ -1999,14 +2001,15 @@ func TestValidateGeneratedApplications(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
validationErrors: map[int]error{0: errors.New("application destination spec is invalid: application destination can't have both name and server defined: my-cluster my-server")},
|
||||
validationErrors: map[string]error{"app": errors.New("application destination spec is invalid: application destination can't have both name and server defined: my-cluster my-server")},
|
||||
},
|
||||
{
|
||||
name: "project mismatch should return error",
|
||||
apps: []v1alpha1.Application{
|
||||
{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "DOES-NOT-EXIST",
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
@@ -2021,14 +2024,15 @@ func TestValidateGeneratedApplications(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
validationErrors: map[int]error{0: errors.New("application references project DOES-NOT-EXIST which does not exist")},
|
||||
validationErrors: map[string]error{"app": errors.New("application references project DOES-NOT-EXIST which does not exist")},
|
||||
},
|
||||
{
|
||||
name: "valid app should return true",
|
||||
apps: []v1alpha1.Application{
|
||||
{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "default",
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
@@ -2043,14 +2047,15 @@ func TestValidateGeneratedApplications(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
validationErrors: map[int]error{},
|
||||
validationErrors: map[string]error{},
|
||||
},
|
||||
{
|
||||
name: "cluster should match",
|
||||
apps: []v1alpha1.Application{
|
||||
{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "default",
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
@@ -2065,7 +2070,7 @@ func TestValidateGeneratedApplications(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
validationErrors: map[int]error{0: errors.New("application destination spec is invalid: there are no clusters with this name: nonexistent-cluster")},
|
||||
validationErrors: map[string]error{"app": errors.New("application destination spec is invalid: there are no clusters with this name: nonexistent-cluster")},
|
||||
},
|
||||
} {
|
||||
t.Run(cc.name, func(t *testing.T) {
|
||||
@@ -2198,13 +2203,20 @@ func TestSetApplicationSetStatusCondition(t *testing.T) {
|
||||
scheme := runtime.NewScheme()
|
||||
err := v1alpha1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
kubeclientset := kubefake.NewSimpleClientset([]runtime.Object{}...)
|
||||
someTime := &metav1.Time{Time: time.Now().Add(-5 * time.Minute)}
|
||||
existingParameterGeneratedCondition := getParametersGeneratedCondition(true, "")
|
||||
existingParameterGeneratedCondition.LastTransitionTime = someTime
|
||||
|
||||
testCases := []struct {
|
||||
appset v1alpha1.ApplicationSet
|
||||
conditions []v1alpha1.ApplicationSetCondition
|
||||
testfunc func(t *testing.T, appset v1alpha1.ApplicationSet)
|
||||
for _, c := range []struct {
|
||||
name string
|
||||
appset v1alpha1.ApplicationSet
|
||||
condition v1alpha1.ApplicationSetCondition
|
||||
parametersGenerated bool
|
||||
testfunc func(t *testing.T, conditions []v1alpha1.ApplicationSetCondition)
|
||||
}{
|
||||
{
|
||||
name: "has parameters generated condition when false",
|
||||
appset: v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "name",
|
||||
@@ -2221,20 +2233,28 @@ func TestSetApplicationSetStatusCondition(t *testing.T) {
|
||||
Template: v1alpha1.ApplicationSetTemplate{},
|
||||
},
|
||||
},
|
||||
conditions: []v1alpha1.ApplicationSetCondition{
|
||||
{
|
||||
Type: v1alpha1.ApplicationSetConditionResourcesUpToDate,
|
||||
Message: "All applications have been generated successfully",
|
||||
Reason: v1alpha1.ApplicationSetReasonApplicationSetUpToDate,
|
||||
Status: v1alpha1.ApplicationSetConditionStatusTrue,
|
||||
},
|
||||
condition: v1alpha1.ApplicationSetCondition{
|
||||
Type: v1alpha1.ApplicationSetConditionResourcesUpToDate,
|
||||
Message: "This is a message",
|
||||
Reason: "test",
|
||||
Status: v1alpha1.ApplicationSetConditionStatusFalse,
|
||||
},
|
||||
testfunc: func(t *testing.T, appset v1alpha1.ApplicationSet) {
|
||||
parametersGenerated: false,
|
||||
testfunc: func(t *testing.T, conditions []v1alpha1.ApplicationSetCondition) {
|
||||
t.Helper()
|
||||
assert.Len(t, appset.Status.Conditions, 3)
|
||||
require.Len(t, conditions, 2)
|
||||
|
||||
// Conditions are ordered by type, so the order is deterministic
|
||||
assert.Equal(t, v1alpha1.ApplicationSetConditionParametersGenerated, conditions[0].Type)
|
||||
assert.Equal(t, v1alpha1.ApplicationSetConditionStatusFalse, conditions[0].Status)
|
||||
|
||||
assert.Equal(t, v1alpha1.ApplicationSetConditionResourcesUpToDate, conditions[1].Type)
|
||||
assert.Equal(t, v1alpha1.ApplicationSetConditionStatusFalse, conditions[1].Status)
|
||||
assert.Equal(t, "test", conditions[1].Reason)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "parameters generated condition is used when specified",
|
||||
appset: v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "name",
|
||||
@@ -2251,37 +2271,24 @@ func TestSetApplicationSetStatusCondition(t *testing.T) {
|
||||
Template: v1alpha1.ApplicationSetTemplate{},
|
||||
},
|
||||
},
|
||||
conditions: []v1alpha1.ApplicationSetCondition{
|
||||
{
|
||||
Type: v1alpha1.ApplicationSetConditionResourcesUpToDate,
|
||||
Message: "All applications have been generated successfully",
|
||||
Reason: v1alpha1.ApplicationSetReasonApplicationSetUpToDate,
|
||||
Status: v1alpha1.ApplicationSetConditionStatusTrue,
|
||||
},
|
||||
{
|
||||
Type: v1alpha1.ApplicationSetConditionRolloutProgressing,
|
||||
Message: "ApplicationSet Rollout Rollout started",
|
||||
Reason: v1alpha1.ApplicationSetReasonApplicationSetUpToDate,
|
||||
Status: v1alpha1.ApplicationSetConditionStatusTrue,
|
||||
},
|
||||
condition: v1alpha1.ApplicationSetCondition{
|
||||
Type: v1alpha1.ApplicationSetConditionParametersGenerated,
|
||||
Message: "This is a message",
|
||||
Reason: "test",
|
||||
Status: v1alpha1.ApplicationSetConditionStatusFalse,
|
||||
},
|
||||
testfunc: func(t *testing.T, appset v1alpha1.ApplicationSet) {
|
||||
parametersGenerated: true,
|
||||
testfunc: func(t *testing.T, conditions []v1alpha1.ApplicationSetCondition) {
|
||||
t.Helper()
|
||||
assert.Len(t, appset.Status.Conditions, 3)
|
||||
require.Len(t, conditions, 1)
|
||||
|
||||
isProgressingCondition := false
|
||||
|
||||
for _, condition := range appset.Status.Conditions {
|
||||
if condition.Type == v1alpha1.ApplicationSetConditionRolloutProgressing {
|
||||
isProgressingCondition = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
assert.False(t, isProgressingCondition, "no RolloutProgressing should be set for applicationsets that don't have rolling strategy")
|
||||
assert.Equal(t, v1alpha1.ApplicationSetConditionParametersGenerated, conditions[0].Type)
|
||||
assert.Equal(t, v1alpha1.ApplicationSetConditionStatusFalse, conditions[0].Status)
|
||||
assert.Equal(t, "test", conditions[0].Reason)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "has parameter conditions when true",
|
||||
appset: v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "name",
|
||||
@@ -2296,83 +2303,369 @@ func TestSetApplicationSetStatusCondition(t *testing.T) {
|
||||
}},
|
||||
},
|
||||
Template: v1alpha1.ApplicationSetTemplate{},
|
||||
},
|
||||
},
|
||||
condition: v1alpha1.ApplicationSetCondition{
|
||||
Type: v1alpha1.ApplicationSetConditionResourcesUpToDate,
|
||||
Message: "This is a message",
|
||||
Reason: "test",
|
||||
Status: v1alpha1.ApplicationSetConditionStatusFalse,
|
||||
},
|
||||
parametersGenerated: true,
|
||||
testfunc: func(t *testing.T, conditions []v1alpha1.ApplicationSetCondition) {
|
||||
t.Helper()
|
||||
require.Len(t, conditions, 2)
|
||||
|
||||
// Conditions are ordered by type, so the order is deterministic
|
||||
assert.Equal(t, v1alpha1.ApplicationSetConditionParametersGenerated, conditions[0].Type)
|
||||
assert.Equal(t, v1alpha1.ApplicationSetConditionStatusTrue, conditions[0].Status)
|
||||
|
||||
assert.Equal(t, v1alpha1.ApplicationSetConditionResourcesUpToDate, conditions[1].Type)
|
||||
assert.Equal(t, v1alpha1.ApplicationSetConditionStatusFalse, conditions[1].Status)
|
||||
assert.Equal(t, "test", conditions[1].Reason)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "resource up to date sets error condition to false",
|
||||
appset: v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "name",
|
||||
Namespace: "argocd",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Generators: []v1alpha1.ApplicationSetGenerator{
|
||||
{List: &v1alpha1.ListGenerator{
|
||||
Elements: []apiextensionsv1.JSON{{
|
||||
Raw: []byte(`{"cluster": "my-cluster","url": "https://kubernetes.default.svc"}`),
|
||||
}},
|
||||
}},
|
||||
},
|
||||
Template: v1alpha1.ApplicationSetTemplate{},
|
||||
},
|
||||
},
|
||||
condition: v1alpha1.ApplicationSetCondition{
|
||||
Type: v1alpha1.ApplicationSetConditionResourcesUpToDate,
|
||||
Message: "Completed",
|
||||
Reason: "test",
|
||||
Status: v1alpha1.ApplicationSetConditionStatusTrue,
|
||||
},
|
||||
testfunc: func(t *testing.T, conditions []v1alpha1.ApplicationSetCondition) {
|
||||
t.Helper()
|
||||
require.Len(t, conditions, 3)
|
||||
|
||||
assert.Equal(t, v1alpha1.ApplicationSetConditionErrorOccurred, conditions[0].Type)
|
||||
assert.Equal(t, v1alpha1.ApplicationSetConditionStatusFalse, conditions[0].Status)
|
||||
assert.Equal(t, "test", conditions[0].Reason)
|
||||
assert.Equal(t, "Completed", conditions[0].Message)
|
||||
|
||||
assert.Equal(t, v1alpha1.ApplicationSetConditionParametersGenerated, conditions[1].Type)
|
||||
|
||||
assert.Equal(t, v1alpha1.ApplicationSetConditionResourcesUpToDate, conditions[2].Type)
|
||||
assert.Equal(t, v1alpha1.ApplicationSetConditionStatusTrue, conditions[2].Status)
|
||||
assert.Equal(t, "test", conditions[2].Reason)
|
||||
assert.Equal(t, "Completed", conditions[2].Message)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "error condition sets resource up to date to false",
|
||||
appset: v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "name",
|
||||
Namespace: "argocd",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Generators: []v1alpha1.ApplicationSetGenerator{
|
||||
{List: &v1alpha1.ListGenerator{
|
||||
Elements: []apiextensionsv1.JSON{{
|
||||
Raw: []byte(`{"cluster": "my-cluster","url": "https://kubernetes.default.svc"}`),
|
||||
}},
|
||||
}},
|
||||
},
|
||||
Template: v1alpha1.ApplicationSetTemplate{},
|
||||
},
|
||||
},
|
||||
condition: v1alpha1.ApplicationSetCondition{
|
||||
Type: v1alpha1.ApplicationSetConditionErrorOccurred,
|
||||
Message: "Error",
|
||||
Reason: "test",
|
||||
Status: v1alpha1.ApplicationSetConditionStatusTrue,
|
||||
},
|
||||
testfunc: func(t *testing.T, conditions []v1alpha1.ApplicationSetCondition) {
|
||||
t.Helper()
|
||||
require.Len(t, conditions, 3)
|
||||
|
||||
assert.Equal(t, v1alpha1.ApplicationSetConditionErrorOccurred, conditions[0].Type)
|
||||
assert.Equal(t, v1alpha1.ApplicationSetConditionStatusTrue, conditions[0].Status)
|
||||
assert.Equal(t, "test", conditions[0].Reason)
|
||||
assert.Equal(t, "Error", conditions[0].Message)
|
||||
|
||||
assert.Equal(t, v1alpha1.ApplicationSetConditionParametersGenerated, conditions[1].Type)
|
||||
|
||||
assert.Equal(t, v1alpha1.ApplicationSetConditionResourcesUpToDate, conditions[2].Type)
|
||||
assert.Equal(t, v1alpha1.ApplicationSetConditionStatusFalse, conditions[2].Status)
|
||||
assert.Equal(t, v1alpha1.ApplicationSetReasonErrorOccurred, conditions[2].Reason)
|
||||
assert.Equal(t, "Error", conditions[2].Message)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "updating an unchanged condition does not mutate existing conditions",
|
||||
appset: v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "name",
|
||||
Namespace: "argocd",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Generators: []v1alpha1.ApplicationSetGenerator{
|
||||
{List: &v1alpha1.ListGenerator{
|
||||
Elements: []apiextensionsv1.JSON{{
|
||||
Raw: []byte(`{"cluster": "my-cluster","url": "https://kubernetes.default.svc"}`),
|
||||
}},
|
||||
}},
|
||||
},
|
||||
Strategy: &v1alpha1.ApplicationSetStrategy{
|
||||
Type: "RollingSync",
|
||||
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{
|
||||
Steps: []v1alpha1.ApplicationSetRolloutStep{
|
||||
{
|
||||
MatchExpressions: []v1alpha1.ApplicationMatchExpression{
|
||||
{
|
||||
Key: "test",
|
||||
Operator: "In",
|
||||
Values: []string{"test"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Type: "RollingSync",
|
||||
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{},
|
||||
},
|
||||
Template: v1alpha1.ApplicationSetTemplate{},
|
||||
},
|
||||
Status: v1alpha1.ApplicationSetStatus{
|
||||
Conditions: []v1alpha1.ApplicationSetCondition{
|
||||
{
|
||||
Type: v1alpha1.ApplicationSetConditionErrorOccurred,
|
||||
Message: "existing",
|
||||
LastTransitionTime: someTime,
|
||||
},
|
||||
existingParameterGeneratedCondition,
|
||||
{
|
||||
Type: v1alpha1.ApplicationSetConditionResourcesUpToDate,
|
||||
Message: "existing",
|
||||
Status: v1alpha1.ApplicationSetConditionStatusFalse,
|
||||
LastTransitionTime: someTime,
|
||||
},
|
||||
{
|
||||
Type: v1alpha1.ApplicationSetConditionRolloutProgressing,
|
||||
Message: "existing",
|
||||
LastTransitionTime: someTime,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
conditions: []v1alpha1.ApplicationSetCondition{
|
||||
{
|
||||
Type: v1alpha1.ApplicationSetConditionResourcesUpToDate,
|
||||
Message: "All applications have been generated successfully",
|
||||
Reason: v1alpha1.ApplicationSetReasonApplicationSetUpToDate,
|
||||
Status: v1alpha1.ApplicationSetConditionStatusTrue,
|
||||
},
|
||||
{
|
||||
Type: v1alpha1.ApplicationSetConditionRolloutProgressing,
|
||||
Message: "ApplicationSet Rollout Rollout started",
|
||||
Reason: v1alpha1.ApplicationSetReasonApplicationSetUpToDate,
|
||||
Status: v1alpha1.ApplicationSetConditionStatusTrue,
|
||||
},
|
||||
condition: v1alpha1.ApplicationSetCondition{
|
||||
Type: v1alpha1.ApplicationSetConditionResourcesUpToDate,
|
||||
Message: "existing",
|
||||
Status: v1alpha1.ApplicationSetConditionStatusFalse,
|
||||
},
|
||||
testfunc: func(t *testing.T, appset v1alpha1.ApplicationSet) {
|
||||
parametersGenerated: true,
|
||||
testfunc: func(t *testing.T, conditions []v1alpha1.ApplicationSetCondition) {
|
||||
t.Helper()
|
||||
assert.Len(t, appset.Status.Conditions, 4)
|
||||
require.Len(t, conditions, 4)
|
||||
|
||||
isProgressingCondition := false
|
||||
assert.Equal(t, v1alpha1.ApplicationSetConditionErrorOccurred, conditions[0].Type)
|
||||
assert.Equal(t, someTime, conditions[0].LastTransitionTime)
|
||||
|
||||
for _, condition := range appset.Status.Conditions {
|
||||
if condition.Type == v1alpha1.ApplicationSetConditionRolloutProgressing {
|
||||
isProgressingCondition = true
|
||||
break
|
||||
}
|
||||
}
|
||||
assert.Equal(t, v1alpha1.ApplicationSetConditionParametersGenerated, conditions[1].Type)
|
||||
assert.Equal(t, someTime, conditions[1].LastTransitionTime)
|
||||
|
||||
assert.True(t, isProgressingCondition, "RolloutProgressing should be set for rollout strategy appset")
|
||||
assert.Equal(t, v1alpha1.ApplicationSetConditionResourcesUpToDate, conditions[2].Type)
|
||||
assert.Equal(t, someTime, conditions[2].LastTransitionTime)
|
||||
|
||||
assert.Equal(t, v1alpha1.ApplicationSetConditionRolloutProgressing, conditions[3].Type)
|
||||
assert.Equal(t, someTime, conditions[3].LastTransitionTime)
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
kubeclientset := kubefake.NewSimpleClientset([]runtime.Object{}...)
|
||||
|
||||
for _, testCase := range testCases {
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&testCase.appset).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).WithStatusSubresource(&testCase.appset).Build()
|
||||
metrics := appsetmetrics.NewFakeAppsetMetrics()
|
||||
|
||||
argodb := db.NewDB("argocd", settings.NewSettingsManager(t.Context(), kubeclientset, "argocd"), kubeclientset)
|
||||
|
||||
r := ApplicationSetReconciler{
|
||||
Client: client,
|
||||
Scheme: scheme,
|
||||
Renderer: &utils.Render{},
|
||||
Recorder: record.NewFakeRecorder(1),
|
||||
Generators: map[string]generators.Generator{
|
||||
"List": generators.NewListGenerator(),
|
||||
{
|
||||
name: "progressing conditions is removed when AppSet is not configured",
|
||||
appset: v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "name",
|
||||
Namespace: "argocd",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Generators: []v1alpha1.ApplicationSetGenerator{
|
||||
{List: &v1alpha1.ListGenerator{
|
||||
Elements: []apiextensionsv1.JSON{{
|
||||
Raw: []byte(`{"cluster": "my-cluster","url": "https://kubernetes.default.svc"}`),
|
||||
}},
|
||||
}},
|
||||
},
|
||||
// Strategy removed
|
||||
// Strategy: &v1alpha1.ApplicationSetStrategy{
|
||||
// Type: "RollingSync",
|
||||
// RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{},
|
||||
// },
|
||||
Template: v1alpha1.ApplicationSetTemplate{},
|
||||
},
|
||||
Status: v1alpha1.ApplicationSetStatus{
|
||||
Conditions: []v1alpha1.ApplicationSetCondition{
|
||||
{
|
||||
Type: v1alpha1.ApplicationSetConditionErrorOccurred,
|
||||
Message: "existing",
|
||||
LastTransitionTime: someTime,
|
||||
},
|
||||
existingParameterGeneratedCondition,
|
||||
{
|
||||
Type: v1alpha1.ApplicationSetConditionResourcesUpToDate,
|
||||
Message: "existing",
|
||||
Status: v1alpha1.ApplicationSetConditionStatusFalse,
|
||||
LastTransitionTime: someTime,
|
||||
},
|
||||
{
|
||||
Type: v1alpha1.ApplicationSetConditionRolloutProgressing,
|
||||
Message: "existing",
|
||||
LastTransitionTime: someTime,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
ArgoDB: argodb,
|
||||
KubeClientset: kubeclientset,
|
||||
Metrics: metrics,
|
||||
}
|
||||
condition: v1alpha1.ApplicationSetCondition{
|
||||
Type: v1alpha1.ApplicationSetConditionResourcesUpToDate,
|
||||
Message: "existing",
|
||||
Status: v1alpha1.ApplicationSetConditionStatusFalse,
|
||||
},
|
||||
parametersGenerated: true,
|
||||
testfunc: func(t *testing.T, conditions []v1alpha1.ApplicationSetCondition) {
|
||||
t.Helper()
|
||||
require.Len(t, conditions, 3)
|
||||
for _, c := range conditions {
|
||||
assert.NotEqual(t, v1alpha1.ApplicationSetConditionRolloutProgressing, c.Type)
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "progressing conditions is ignored when AppSet is not configured",
|
||||
appset: v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "name",
|
||||
Namespace: "argocd",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Generators: []v1alpha1.ApplicationSetGenerator{
|
||||
{List: &v1alpha1.ListGenerator{
|
||||
Elements: []apiextensionsv1.JSON{{
|
||||
Raw: []byte(`{"cluster": "my-cluster","url": "https://kubernetes.default.svc"}`),
|
||||
}},
|
||||
}},
|
||||
},
|
||||
// Strategy removed
|
||||
// Strategy: &v1alpha1.ApplicationSetStrategy{
|
||||
// Type: "RollingSync",
|
||||
// RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{},
|
||||
// },
|
||||
Template: v1alpha1.ApplicationSetTemplate{},
|
||||
},
|
||||
Status: v1alpha1.ApplicationSetStatus{
|
||||
Conditions: []v1alpha1.ApplicationSetCondition{
|
||||
{
|
||||
Type: v1alpha1.ApplicationSetConditionErrorOccurred,
|
||||
Message: "existing",
|
||||
LastTransitionTime: someTime,
|
||||
},
|
||||
existingParameterGeneratedCondition,
|
||||
{
|
||||
Type: v1alpha1.ApplicationSetConditionResourcesUpToDate,
|
||||
Message: "existing",
|
||||
Status: v1alpha1.ApplicationSetConditionStatusFalse,
|
||||
LastTransitionTime: someTime,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
condition: v1alpha1.ApplicationSetCondition{
|
||||
Type: v1alpha1.ApplicationSetConditionRolloutProgressing,
|
||||
Message: "do not add me",
|
||||
Status: v1alpha1.ApplicationSetConditionStatusTrue,
|
||||
},
|
||||
parametersGenerated: true,
|
||||
testfunc: func(t *testing.T, conditions []v1alpha1.ApplicationSetCondition) {
|
||||
t.Helper()
|
||||
require.Len(t, conditions, 3)
|
||||
for _, c := range conditions {
|
||||
assert.NotEqual(t, v1alpha1.ApplicationSetConditionRolloutProgressing, c.Type)
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "progressing conditions is updated correctly when configured",
|
||||
appset: v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "name",
|
||||
Namespace: "argocd",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Generators: []v1alpha1.ApplicationSetGenerator{
|
||||
{List: &v1alpha1.ListGenerator{
|
||||
Elements: []apiextensionsv1.JSON{{
|
||||
Raw: []byte(`{"cluster": "my-cluster","url": "https://kubernetes.default.svc"}`),
|
||||
}},
|
||||
}},
|
||||
},
|
||||
Strategy: &v1alpha1.ApplicationSetStrategy{
|
||||
Type: "RollingSync",
|
||||
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{},
|
||||
},
|
||||
Template: v1alpha1.ApplicationSetTemplate{},
|
||||
},
|
||||
Status: v1alpha1.ApplicationSetStatus{
|
||||
Conditions: []v1alpha1.ApplicationSetCondition{
|
||||
{
|
||||
Type: v1alpha1.ApplicationSetConditionErrorOccurred,
|
||||
Message: "existing",
|
||||
LastTransitionTime: someTime,
|
||||
},
|
||||
existingParameterGeneratedCondition,
|
||||
{
|
||||
Type: v1alpha1.ApplicationSetConditionResourcesUpToDate,
|
||||
Message: "existing",
|
||||
Status: v1alpha1.ApplicationSetConditionStatusFalse,
|
||||
LastTransitionTime: someTime,
|
||||
},
|
||||
{
|
||||
Type: v1alpha1.ApplicationSetConditionRolloutProgressing,
|
||||
Message: "old value",
|
||||
Status: v1alpha1.ApplicationSetConditionStatusTrue,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
condition: v1alpha1.ApplicationSetCondition{
|
||||
Type: v1alpha1.ApplicationSetConditionRolloutProgressing,
|
||||
Message: "new value",
|
||||
Status: v1alpha1.ApplicationSetConditionStatusFalse,
|
||||
},
|
||||
parametersGenerated: true,
|
||||
testfunc: func(t *testing.T, conditions []v1alpha1.ApplicationSetCondition) {
|
||||
t.Helper()
|
||||
require.Len(t, conditions, 4)
|
||||
|
||||
for _, condition := range testCase.conditions {
|
||||
err = r.setApplicationSetStatusCondition(t.Context(), &testCase.appset, condition, true)
|
||||
assert.Equal(t, v1alpha1.ApplicationSetConditionRolloutProgressing, conditions[3].Type)
|
||||
assert.Equal(t, v1alpha1.ApplicationSetConditionStatusFalse, conditions[3].Status)
|
||||
assert.Equal(t, "new value", conditions[3].Message)
|
||||
},
|
||||
},
|
||||
} {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&c.appset).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).WithStatusSubresource(&c.appset).Build()
|
||||
metrics := appsetmetrics.NewFakeAppsetMetrics()
|
||||
argodb := db.NewDB("argocd", settings.NewSettingsManager(t.Context(), kubeclientset, "argocd"), kubeclientset)
|
||||
|
||||
r := ApplicationSetReconciler{
|
||||
Client: client,
|
||||
Scheme: scheme,
|
||||
Renderer: &utils.Render{},
|
||||
Recorder: record.NewFakeRecorder(1),
|
||||
Generators: map[string]generators.Generator{
|
||||
"List": generators.NewListGenerator(),
|
||||
},
|
||||
ArgoDB: argodb,
|
||||
KubeClientset: kubeclientset,
|
||||
Metrics: metrics,
|
||||
}
|
||||
|
||||
err = r.setApplicationSetStatusCondition(t.Context(), &c.appset, c.condition, c.parametersGenerated)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
testCase.testfunc(t, testCase.appset)
|
||||
c.testfunc(t, c.appset.Status.Conditions)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6813,6 +7106,28 @@ func TestApplicationSetOwnsHandlerUpdate(t *testing.T) {
|
||||
enableProgressiveSyncs: false,
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "deletionTimestamp present when progressive sync enabled",
|
||||
appSetOld: buildAppSet(map[string]string{}),
|
||||
appSetNew: &v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
DeletionTimestamp: &metav1.Time{Time: time.Now()},
|
||||
},
|
||||
},
|
||||
enableProgressiveSyncs: true,
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "deletionTimestamp present when progressive sync disabled",
|
||||
appSetOld: buildAppSet(map[string]string{}),
|
||||
appSetNew: &v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
DeletionTimestamp: &metav1.Time{Time: time.Now()},
|
||||
},
|
||||
},
|
||||
enableProgressiveSyncs: false,
|
||||
want: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
@@ -6961,6 +7276,36 @@ func TestShouldRequeueForApplicationSet(t *testing.T) {
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "ApplicationSetWithDeletionTimestamp",
|
||||
args: args{
|
||||
appSetOld: &v1alpha1.ApplicationSet{
|
||||
Status: v1alpha1.ApplicationSetStatus{
|
||||
ApplicationStatus: []v1alpha1.ApplicationSetApplicationStatus{
|
||||
{
|
||||
Application: "app1",
|
||||
Status: "Healthy",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
appSetNew: &v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
DeletionTimestamp: &metav1.Time{Time: time.Now()},
|
||||
},
|
||||
Status: v1alpha1.ApplicationSetStatus{
|
||||
ApplicationStatus: []v1alpha1.ApplicationSetApplicationStatus{
|
||||
{
|
||||
Application: "app1",
|
||||
Status: "Waiting",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
enableProgressiveSyncs: false,
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
@@ -7134,7 +7479,7 @@ func TestSyncApplication(t *testing.T) {
|
||||
Info: []*v1alpha1.Info{
|
||||
{
|
||||
Name: "Reason",
|
||||
Value: "ApplicationSet RollingSync triggered a sync of this Application resource.",
|
||||
Value: "ApplicationSet RollingSync triggered a sync of this Application resource",
|
||||
},
|
||||
},
|
||||
Sync: &v1alpha1.SyncOperation{
|
||||
@@ -7176,7 +7521,7 @@ func TestSyncApplication(t *testing.T) {
|
||||
Info: []*v1alpha1.Info{
|
||||
{
|
||||
Name: "Reason",
|
||||
Value: "ApplicationSet RollingSync triggered a sync of this Application resource.",
|
||||
Value: "ApplicationSet RollingSync triggered a sync of this Application resource",
|
||||
},
|
||||
},
|
||||
Sync: &v1alpha1.SyncOperation{
|
||||
@@ -7198,3 +7543,110 @@ func TestSyncApplication(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsRollingSyncDeletionReversed(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
appset *v1alpha1.ApplicationSet
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
name: "Deletion Order on strategy is set as Reverse",
|
||||
appset: &v1alpha1.ApplicationSet{
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Strategy: &v1alpha1.ApplicationSetStrategy{
|
||||
Type: "RollingSync",
|
||||
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{
|
||||
Steps: []v1alpha1.ApplicationSetRolloutStep{
|
||||
{
|
||||
MatchExpressions: []v1alpha1.ApplicationMatchExpression{
|
||||
{
|
||||
Key: "environment",
|
||||
Operator: "In",
|
||||
Values: []string{
|
||||
"dev",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
MatchExpressions: []v1alpha1.ApplicationMatchExpression{
|
||||
{
|
||||
Key: "environment",
|
||||
Operator: "In",
|
||||
Values: []string{
|
||||
"staging",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
DeletionOrder: ReverseDeletionOrder,
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "Deletion Order on strategy is set as AllAtOnce",
|
||||
appset: &v1alpha1.ApplicationSet{
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Strategy: &v1alpha1.ApplicationSetStrategy{
|
||||
Type: "RollingSync",
|
||||
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{
|
||||
Steps: []v1alpha1.ApplicationSetRolloutStep{},
|
||||
},
|
||||
DeletionOrder: AllAtOnceDeletionOrder,
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "Deletion Order on strategy is set as Reverse but no steps in RollingSync",
|
||||
appset: &v1alpha1.ApplicationSet{
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Strategy: &v1alpha1.ApplicationSetStrategy{
|
||||
Type: "RollingSync",
|
||||
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{
|
||||
Steps: []v1alpha1.ApplicationSetRolloutStep{},
|
||||
},
|
||||
DeletionOrder: ReverseDeletionOrder,
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "Deletion Order on strategy is set as Reverse, but AllAtOnce is explicitly set",
|
||||
appset: &v1alpha1.ApplicationSet{
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Strategy: &v1alpha1.ApplicationSetStrategy{
|
||||
Type: "AllAtOnce",
|
||||
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{
|
||||
Steps: []v1alpha1.ApplicationSetRolloutStep{},
|
||||
},
|
||||
DeletionOrder: ReverseDeletionOrder,
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "Strategy is Nil",
|
||||
appset: &v1alpha1.ApplicationSet{
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Strategy: &v1alpha1.ApplicationSetStrategy{},
|
||||
},
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := isProgressiveSyncDeletionOrderReversed(tt.appset)
|
||||
assert.Equal(t, tt.expected, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -79,14 +79,10 @@ func (g *ClusterGenerator) GenerateParams(appSetGenerator *argoappsetv1alpha1.Ap
|
||||
return nil, fmt.Errorf("error getting cluster secrets: %w", err)
|
||||
}
|
||||
|
||||
res := []map[string]any{}
|
||||
paramHolder := ¶mHolder{isFlatMode: appSetGenerator.Clusters.FlatList}
|
||||
logCtx.Debugf("Using flat mode = %t for cluster generator", paramHolder.isFlatMode)
|
||||
|
||||
secretsFound := []corev1.Secret{}
|
||||
|
||||
isFlatMode := appSetGenerator.Clusters.FlatList
|
||||
logCtx.Debugf("Using flat mode = %t for cluster generator", isFlatMode)
|
||||
clustersParams := make([]map[string]any, 0)
|
||||
|
||||
for _, cluster := range clustersFromArgoCD {
|
||||
// If there is a secret for this cluster, then it's a non-local cluster, so it will be
|
||||
// handled by the next step.
|
||||
@@ -105,72 +101,80 @@ func (g *ClusterGenerator) GenerateParams(appSetGenerator *argoappsetv1alpha1.Ap
|
||||
return nil, fmt.Errorf("error appending templated values for local cluster: %w", err)
|
||||
}
|
||||
|
||||
if isFlatMode {
|
||||
clustersParams = append(clustersParams, params)
|
||||
} else {
|
||||
res = append(res, params)
|
||||
}
|
||||
|
||||
paramHolder.append(params)
|
||||
logCtx.WithField("cluster", "local cluster").Info("matched local cluster")
|
||||
}
|
||||
}
|
||||
|
||||
// For each matching cluster secret (non-local clusters only)
|
||||
for _, cluster := range secretsFound {
|
||||
params := map[string]any{}
|
||||
|
||||
params["name"] = string(cluster.Data["name"])
|
||||
params["nameNormalized"] = utils.SanitizeName(string(cluster.Data["name"]))
|
||||
params["server"] = string(cluster.Data["server"])
|
||||
|
||||
project, ok := cluster.Data["project"]
|
||||
if ok {
|
||||
params["project"] = string(project)
|
||||
} else {
|
||||
params["project"] = ""
|
||||
}
|
||||
|
||||
if appSet.Spec.GoTemplate {
|
||||
meta := map[string]any{}
|
||||
|
||||
if len(cluster.Annotations) > 0 {
|
||||
meta["annotations"] = cluster.Annotations
|
||||
}
|
||||
if len(cluster.Labels) > 0 {
|
||||
meta["labels"] = cluster.Labels
|
||||
}
|
||||
|
||||
params["metadata"] = meta
|
||||
} else {
|
||||
for key, value := range cluster.Annotations {
|
||||
params["metadata.annotations."+key] = value
|
||||
}
|
||||
|
||||
for key, value := range cluster.Labels {
|
||||
params["metadata.labels."+key] = value
|
||||
}
|
||||
}
|
||||
params := g.getClusterParameters(cluster, appSet)
|
||||
|
||||
err = appendTemplatedValues(appSetGenerator.Clusters.Values, params, appSet.Spec.GoTemplate, appSet.Spec.GoTemplateOptions)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error appending templated values for cluster: %w", err)
|
||||
}
|
||||
|
||||
if isFlatMode {
|
||||
clustersParams = append(clustersParams, params)
|
||||
} else {
|
||||
res = append(res, params)
|
||||
}
|
||||
|
||||
paramHolder.append(params)
|
||||
logCtx.WithField("cluster", cluster.Name).Debug("matched cluster secret")
|
||||
}
|
||||
|
||||
if isFlatMode {
|
||||
res = append(res, map[string]any{
|
||||
"clusters": clustersParams,
|
||||
})
|
||||
return paramHolder.consolidate(), nil
|
||||
}
|
||||
|
||||
type paramHolder struct {
|
||||
isFlatMode bool
|
||||
params []map[string]any
|
||||
}
|
||||
|
||||
func (p *paramHolder) append(params map[string]any) {
|
||||
p.params = append(p.params, params)
|
||||
}
|
||||
|
||||
func (p *paramHolder) consolidate() []map[string]any {
|
||||
if p.isFlatMode {
|
||||
p.params = []map[string]any{
|
||||
{"clusters": p.params},
|
||||
}
|
||||
}
|
||||
return res, nil
|
||||
return p.params
|
||||
}
|
||||
|
||||
func (g *ClusterGenerator) getClusterParameters(cluster corev1.Secret, appSet *argoappsetv1alpha1.ApplicationSet) map[string]any {
|
||||
params := map[string]any{}
|
||||
|
||||
params["name"] = string(cluster.Data["name"])
|
||||
params["nameNormalized"] = utils.SanitizeName(string(cluster.Data["name"]))
|
||||
params["server"] = string(cluster.Data["server"])
|
||||
|
||||
project, ok := cluster.Data["project"]
|
||||
if ok {
|
||||
params["project"] = string(project)
|
||||
} else {
|
||||
params["project"] = ""
|
||||
}
|
||||
|
||||
if appSet.Spec.GoTemplate {
|
||||
meta := map[string]any{}
|
||||
|
||||
if len(cluster.Annotations) > 0 {
|
||||
meta["annotations"] = cluster.Annotations
|
||||
}
|
||||
if len(cluster.Labels) > 0 {
|
||||
meta["labels"] = cluster.Labels
|
||||
}
|
||||
|
||||
params["metadata"] = meta
|
||||
} else {
|
||||
for key, value := range cluster.Annotations {
|
||||
params["metadata.annotations."+key] = value
|
||||
}
|
||||
|
||||
for key, value := range cluster.Labels {
|
||||
params["metadata.labels."+key] = value
|
||||
}
|
||||
}
|
||||
return params
|
||||
}
|
||||
|
||||
func (g *ClusterGenerator) getSecretsByClusterName(log *log.Entry, appSetGenerator *argoappsetv1alpha1.ApplicationSetGenerator) (map[string]corev1.Secret, error) {
|
||||
|
||||
@@ -222,19 +222,18 @@ func (g *GitGenerator) generateParamsForGitFiles(appSetGenerator *argoprojiov1al
|
||||
func (g *GitGenerator) generateParamsFromGitFile(filePath string, fileContent []byte, values map[string]string, useGoTemplate bool, goTemplateOptions []string, pathParamPrefix string) ([]map[string]any, error) {
|
||||
objectsFound := []map[string]any{}
|
||||
|
||||
// First, we attempt to parse as an array
|
||||
err := yaml.Unmarshal(fileContent, &objectsFound)
|
||||
if err != nil {
|
||||
// If unable to parse as an array, attempt to parse as a single object
|
||||
singleObj := make(map[string]any)
|
||||
err = yaml.Unmarshal(fileContent, &singleObj)
|
||||
// First, we attempt to parse as a single object.
|
||||
// This will also succeed for empty files.
|
||||
singleObj := map[string]any{}
|
||||
err := yaml.Unmarshal(fileContent, &singleObj)
|
||||
if err == nil {
|
||||
objectsFound = append(objectsFound, singleObj)
|
||||
} else {
|
||||
// If unable to parse as an object, try to parse as an array
|
||||
err = yaml.Unmarshal(fileContent, &objectsFound)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to parse file: %w", err)
|
||||
}
|
||||
objectsFound = append(objectsFound, singleObj)
|
||||
} else if len(objectsFound) == 0 {
|
||||
// If file is valid but empty, add a default empty item
|
||||
objectsFound = append(objectsFound, map[string]any{})
|
||||
}
|
||||
|
||||
res := []map[string]any{}
|
||||
|
||||
@@ -825,7 +825,7 @@ func TestGitGenerateParamsFromFiles(t *testing.T) {
|
||||
},
|
||||
repoPathsError: nil,
|
||||
expected: []map[string]any{},
|
||||
expectedError: errors.New("error generating params from git: unable to process file 'cluster-config/production/config.json': unable to parse file: error unmarshaling JSON: while decoding JSON: json: cannot unmarshal string into Go value of type map[string]interface {}"),
|
||||
expectedError: errors.New("error generating params from git: unable to process file 'cluster-config/production/config.json': unable to parse file: error unmarshaling JSON: while decoding JSON: json: cannot unmarshal string into Go value of type []map[string]interface {}"),
|
||||
},
|
||||
{
|
||||
name: "test JSON array",
|
||||
@@ -982,6 +982,16 @@ cluster:
|
||||
},
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "test empty YAML array",
|
||||
files: []v1alpha1.GitFileGeneratorItem{{Path: "**/config.yaml"}},
|
||||
repoFileContents: map[string][]byte{
|
||||
"cluster-config/production/config.yaml": []byte(`[]`),
|
||||
},
|
||||
repoPathsError: nil,
|
||||
expected: []map[string]any{},
|
||||
expectedError: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range cases {
|
||||
@@ -2060,7 +2070,7 @@ func TestGitGenerateParamsFromFilesGoTemplate(t *testing.T) {
|
||||
},
|
||||
repoPathsError: nil,
|
||||
expected: []map[string]any{},
|
||||
expectedError: errors.New("error generating params from git: unable to process file 'cluster-config/production/config.json': unable to parse file: error unmarshaling JSON: while decoding JSON: json: cannot unmarshal string into Go value of type map[string]interface {}"),
|
||||
expectedError: errors.New("error generating params from git: unable to process file 'cluster-config/production/config.json': unable to parse file: error unmarshaling JSON: while decoding JSON: json: cannot unmarshal string into Go value of type []map[string]interface {}"),
|
||||
},
|
||||
{
|
||||
name: "test JSON array",
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/gosimple/slug"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/applicationset/services"
|
||||
pullrequest "github.com/argoproj/argo-cd/v3/applicationset/services/pull_request"
|
||||
@@ -18,8 +19,6 @@ import (
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
|
||||
)
|
||||
|
||||
var _ Generator = (*PullRequestGenerator)(nil)
|
||||
|
||||
const (
|
||||
DefaultPullRequestRequeueAfter = 30 * time.Minute
|
||||
)
|
||||
@@ -49,6 +48,10 @@ func (g *PullRequestGenerator) GetRequeueAfter(appSetGenerator *argoprojiov1alph
|
||||
return DefaultPullRequestRequeueAfter
|
||||
}
|
||||
|
||||
func (g *PullRequestGenerator) GetContinueOnRepoNotFoundError(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) bool {
|
||||
return appSetGenerator.PullRequest.ContinueOnRepoNotFoundError
|
||||
}
|
||||
|
||||
func (g *PullRequestGenerator) GetTemplate(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) *argoprojiov1alpha1.ApplicationSetTemplate {
|
||||
return &appSetGenerator.PullRequest.Template
|
||||
}
|
||||
@@ -69,10 +72,15 @@ func (g *PullRequestGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha
|
||||
}
|
||||
|
||||
pulls, err := pullrequest.ListPullRequests(ctx, svc, appSetGenerator.PullRequest.Filters)
|
||||
params := make([]map[string]any, 0, len(pulls))
|
||||
if err != nil {
|
||||
if pullrequest.IsRepositoryNotFoundError(err) && g.GetContinueOnRepoNotFoundError(appSetGenerator) {
|
||||
log.WithError(err).WithField("generator", g).
|
||||
Warn("Skipping params generation for this repository since it was not found.")
|
||||
return params, nil
|
||||
}
|
||||
return nil, fmt.Errorf("error listing repos: %w", err)
|
||||
}
|
||||
params := make([]map[string]any, 0, len(pulls))
|
||||
|
||||
// In order to follow the DNS label standard as defined in RFC 1123,
|
||||
// we need to limit the 'branch' to 50 to give room to append/suffix-ing it
|
||||
|
||||
@@ -16,11 +16,12 @@ import (
|
||||
func TestPullRequestGithubGenerateParams(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
cases := []struct {
|
||||
selectFunc func(context.Context, *argoprojiov1alpha1.PullRequestGenerator, *argoprojiov1alpha1.ApplicationSet) (pullrequest.PullRequestService, error)
|
||||
values map[string]string
|
||||
expected []map[string]any
|
||||
expectedErr error
|
||||
applicationSet argoprojiov1alpha1.ApplicationSet
|
||||
selectFunc func(context.Context, *argoprojiov1alpha1.PullRequestGenerator, *argoprojiov1alpha1.ApplicationSet) (pullrequest.PullRequestService, error)
|
||||
values map[string]string
|
||||
expected []map[string]any
|
||||
expectedErr error
|
||||
applicationSet argoprojiov1alpha1.ApplicationSet
|
||||
continueOnRepoNotFoundError bool
|
||||
}{
|
||||
{
|
||||
selectFunc: func(context.Context, *argoprojiov1alpha1.PullRequestGenerator, *argoprojiov1alpha1.ApplicationSet) (pullrequest.PullRequestService, error) {
|
||||
@@ -171,6 +172,30 @@ func TestPullRequestGithubGenerateParams(t *testing.T) {
|
||||
expected: nil,
|
||||
expectedErr: errors.New("error listing repos: fake error"),
|
||||
},
|
||||
{
|
||||
selectFunc: func(context.Context, *argoprojiov1alpha1.PullRequestGenerator, *argoprojiov1alpha1.ApplicationSet) (pullrequest.PullRequestService, error) {
|
||||
return pullrequest.NewFakeService(
|
||||
ctx,
|
||||
nil,
|
||||
pullrequest.NewRepositoryNotFoundError(errors.New("repository not found")),
|
||||
)
|
||||
},
|
||||
expected: []map[string]any{},
|
||||
expectedErr: nil,
|
||||
continueOnRepoNotFoundError: true,
|
||||
},
|
||||
{
|
||||
selectFunc: func(context.Context, *argoprojiov1alpha1.PullRequestGenerator, *argoprojiov1alpha1.ApplicationSet) (pullrequest.PullRequestService, error) {
|
||||
return pullrequest.NewFakeService(
|
||||
ctx,
|
||||
nil,
|
||||
pullrequest.NewRepositoryNotFoundError(errors.New("repository not found")),
|
||||
)
|
||||
},
|
||||
expected: nil,
|
||||
expectedErr: errors.New("error listing repos: repository not found"),
|
||||
continueOnRepoNotFoundError: false,
|
||||
},
|
||||
{
|
||||
selectFunc: func(context.Context, *argoprojiov1alpha1.PullRequestGenerator, *argoprojiov1alpha1.ApplicationSet) (pullrequest.PullRequestService, error) {
|
||||
return pullrequest.NewFakeService(
|
||||
@@ -260,7 +285,8 @@ func TestPullRequestGithubGenerateParams(t *testing.T) {
|
||||
}
|
||||
generatorConfig := argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
PullRequest: &argoprojiov1alpha1.PullRequestGenerator{
|
||||
Values: c.values,
|
||||
Values: c.values,
|
||||
ContinueOnRepoNotFoundError: c.continueOnRepoNotFoundError,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -10,7 +10,10 @@ import (
|
||||
"github.com/microsoft/azure-devops-go-api/azuredevops/v7/git"
|
||||
)
|
||||
|
||||
const AZURE_DEVOPS_DEFAULT_URL = "https://dev.azure.com"
|
||||
const (
|
||||
AZURE_DEVOPS_DEFAULT_URL = "https://dev.azure.com"
|
||||
AZURE_DEVOPS_PROJECT_NOT_FOUND_ERROR = "The following project does not exist"
|
||||
)
|
||||
|
||||
type AzureDevOpsClientFactory interface {
|
||||
// Returns an Azure Devops Client interface.
|
||||
@@ -70,13 +73,22 @@ func (a *AzureDevOpsService) List(ctx context.Context) ([]*PullRequest, error) {
|
||||
SearchCriteria: &git.GitPullRequestSearchCriteria{},
|
||||
}
|
||||
|
||||
pullRequests := []*PullRequest{}
|
||||
|
||||
azurePullRequests, err := client.GetPullRequestsByProject(ctx, args)
|
||||
if err != nil {
|
||||
// A standard Http 404 error is not returned for Azure DevOps,
|
||||
// so checking the error message for a specific pattern.
|
||||
// NOTE: Since the repos are filtered later, only existence of the project
|
||||
// is relevant for AzureDevOps
|
||||
if strings.Contains(err.Error(), AZURE_DEVOPS_PROJECT_NOT_FOUND_ERROR) {
|
||||
// return a custom error indicating that the repository is not found,
|
||||
// but also return the empty result since the decision to continue or not in this case is made by the caller
|
||||
return pullRequests, NewRepositoryNotFoundError(err)
|
||||
}
|
||||
return nil, fmt.Errorf("failed to get pull requests by project: %w", err)
|
||||
}
|
||||
|
||||
pullRequests := []*PullRequest{}
|
||||
|
||||
for _, pr := range *azurePullRequests {
|
||||
if pr.Repository == nil ||
|
||||
pr.Repository.Name == nil ||
|
||||
|
||||
@@ -2,6 +2,7 @@ package pull_request
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/microsoft/azure-devops-go-api/azuredevops/v7/core"
|
||||
@@ -235,3 +236,36 @@ func TestBuildURL(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAzureDevOpsListReturnsRepositoryNotFoundError(t *testing.T) {
|
||||
args := git.GetPullRequestsByProjectArgs{
|
||||
Project: createStringPtr("nonexistent"),
|
||||
SearchCriteria: &git.GitPullRequestSearchCriteria{},
|
||||
}
|
||||
|
||||
pullRequestMock := []git.GitPullRequest{}
|
||||
|
||||
gitClientMock := azureMock.Client{}
|
||||
clientFactoryMock := &AzureClientFactoryMock{mock: &mock.Mock{}}
|
||||
clientFactoryMock.mock.On("GetClient", mock.Anything).Return(&gitClientMock, nil)
|
||||
|
||||
// Mock the GetPullRequestsByProject to return an error containing "404"
|
||||
gitClientMock.On("GetPullRequestsByProject", t.Context(), args).Return(&pullRequestMock,
|
||||
errors.New("The following project does not exist:"))
|
||||
|
||||
provider := AzureDevOpsService{
|
||||
clientFactory: clientFactoryMock,
|
||||
project: "nonexistent",
|
||||
repo: "nonexistent",
|
||||
labels: nil,
|
||||
}
|
||||
|
||||
prs, err := provider.List(t.Context())
|
||||
|
||||
// Should return empty pull requests list
|
||||
assert.Empty(t, prs)
|
||||
|
||||
// Should return RepositoryNotFoundError
|
||||
require.Error(t, err)
|
||||
assert.True(t, IsRepositoryNotFoundError(err), "Expected RepositoryNotFoundError but got: %v", err)
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/ktrysmt/go-bitbucket"
|
||||
)
|
||||
@@ -117,8 +118,17 @@ func (b *BitbucketCloudService) List(_ context.Context) ([]*PullRequest, error)
|
||||
RepoSlug: b.repositorySlug,
|
||||
}
|
||||
|
||||
pullRequests := []*PullRequest{}
|
||||
|
||||
response, err := b.client.Repositories.PullRequests.Gets(opts)
|
||||
if err != nil {
|
||||
// A standard Http 404 error is not returned for Bitbucket Cloud,
|
||||
// so checking the error message for a specific pattern
|
||||
if strings.Contains(err.Error(), "404 Not Found") {
|
||||
// return a custom error indicating that the repository is not found,
|
||||
// but also return the empty result since the decision to continue or not in this case is made by the caller
|
||||
return pullRequests, NewRepositoryNotFoundError(err)
|
||||
}
|
||||
return nil, fmt.Errorf("error listing pull requests for %s/%s: %w", b.owner, b.repositorySlug, err)
|
||||
}
|
||||
|
||||
@@ -142,7 +152,6 @@ func (b *BitbucketCloudService) List(_ context.Context) ([]*PullRequest, error)
|
||||
return nil, fmt.Errorf("error unmarshalling json to type '[]BitbucketCloudPullRequest': %w", err)
|
||||
}
|
||||
|
||||
pullRequests := []*PullRequest{}
|
||||
for _, pull := range pulls {
|
||||
pullRequests = append(pullRequests, &PullRequest{
|
||||
Number: pull.ID,
|
||||
|
||||
@@ -492,3 +492,29 @@ func TestListPullRequestBranchMatchCloud(t *testing.T) {
|
||||
TargetBranch: "branch-200",
|
||||
}, *pullRequests[0])
|
||||
}
|
||||
|
||||
func TestBitbucketCloudListReturnsRepositoryNotFoundError(t *testing.T) {
|
||||
mux := http.NewServeMux()
|
||||
server := httptest.NewServer(mux)
|
||||
defer server.Close()
|
||||
|
||||
path := "/repositories/nonexistent/nonexistent/pullrequests/"
|
||||
|
||||
mux.HandleFunc(path, func(w http.ResponseWriter, _ *http.Request) {
|
||||
// Return 404 status to simulate repository not found
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
_, _ = w.Write([]byte(`{"message": "404 Project Not Found"}`))
|
||||
})
|
||||
|
||||
svc, err := NewBitbucketCloudServiceNoAuth(server.URL, "nonexistent", "nonexistent")
|
||||
require.NoError(t, err)
|
||||
|
||||
prs, err := svc.List(t.Context())
|
||||
|
||||
// Should return empty pull requests list
|
||||
assert.Empty(t, prs)
|
||||
|
||||
// Should return RepositoryNotFoundError
|
||||
require.Error(t, err)
|
||||
assert.True(t, IsRepositoryNotFoundError(err), "Expected RepositoryNotFoundError but got: %v", err)
|
||||
}
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
bitbucketv1 "github.com/gfleury/go-bitbucket-v1"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/applicationset/utils"
|
||||
"github.com/argoproj/argo-cd/v3/applicationset/services"
|
||||
)
|
||||
|
||||
type BitbucketService struct {
|
||||
@@ -49,15 +49,10 @@ func NewBitbucketServiceNoAuth(ctx context.Context, url, projectKey, repositoryS
|
||||
}
|
||||
|
||||
func newBitbucketService(ctx context.Context, bitbucketConfig *bitbucketv1.Configuration, projectKey, repositorySlug string, scmRootCAPath string, insecure bool, caCerts []byte) (PullRequestService, error) {
|
||||
bitbucketConfig.BasePath = utils.NormalizeBitbucketBasePath(bitbucketConfig.BasePath)
|
||||
tlsConfig := utils.GetTlsConfig(scmRootCAPath, insecure, caCerts)
|
||||
bitbucketConfig.HTTPClient = &http.Client{Transport: &http.Transport{
|
||||
TLSClientConfig: tlsConfig,
|
||||
}}
|
||||
bitbucketClient := bitbucketv1.NewAPIClient(ctx, bitbucketConfig)
|
||||
bbClient := services.SetupBitbucketClient(ctx, bitbucketConfig, scmRootCAPath, insecure, caCerts)
|
||||
|
||||
return &BitbucketService{
|
||||
client: bitbucketClient,
|
||||
client: bbClient,
|
||||
projectKey: projectKey,
|
||||
repositorySlug: repositorySlug,
|
||||
}, nil
|
||||
@@ -72,6 +67,11 @@ func (b *BitbucketService) List(_ context.Context) ([]*PullRequest, error) {
|
||||
for {
|
||||
response, err := b.client.DefaultApi.GetPullRequestsPage(b.projectKey, b.repositorySlug, paged)
|
||||
if err != nil {
|
||||
if response != nil && response.Response != nil && response.StatusCode == http.StatusNotFound {
|
||||
// return a custom error indicating that the repository is not found,
|
||||
// but also return the empty result since the decision to continue or not in this case is made by the caller
|
||||
return pullRequests, NewRepositoryNotFoundError(err)
|
||||
}
|
||||
return nil, fmt.Errorf("error listing pull requests for %s/%s: %w", b.projectKey, b.repositorySlug, err)
|
||||
}
|
||||
pulls, err := bitbucketv1.GetPullRequestsResponse(response)
|
||||
|
||||
@@ -510,3 +510,29 @@ func TestListPullRequestBranchMatch(t *testing.T) {
|
||||
})
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
func TestBitbucketServerListReturnsRepositoryNotFoundError(t *testing.T) {
|
||||
mux := http.NewServeMux()
|
||||
server := httptest.NewServer(mux)
|
||||
defer server.Close()
|
||||
|
||||
path := "/rest/api/1.0/projects/nonexistent/repos/nonexistent/pull-requests?limit=100"
|
||||
|
||||
mux.HandleFunc(path, func(w http.ResponseWriter, _ *http.Request) {
|
||||
// Return 404 status to simulate repository not found
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
_, _ = w.Write([]byte(`{"message": "404 Project Not Found"}`))
|
||||
})
|
||||
|
||||
svc, err := NewBitbucketServiceNoAuth(t.Context(), server.URL, "nonexistent", "nonexistent", "", false, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
prs, err := svc.List(t.Context())
|
||||
|
||||
// Should return empty pull requests list
|
||||
assert.Empty(t, prs)
|
||||
|
||||
// Should return RepositoryNotFoundError
|
||||
require.Error(t, err)
|
||||
assert.True(t, IsRepositoryNotFoundError(err), "Expected RepositoryNotFoundError but got: %v", err)
|
||||
}
|
||||
|
||||
23
applicationset/services/pull_request/errors.go
Normal file
23
applicationset/services/pull_request/errors.go
Normal file
@@ -0,0 +1,23 @@
|
||||
package pull_request
|
||||
|
||||
import "errors"
|
||||
|
||||
// RepositoryNotFoundError represents an error when a repository is not found by a pull request provider
|
||||
type RepositoryNotFoundError struct {
|
||||
causingError error
|
||||
}
|
||||
|
||||
func (e *RepositoryNotFoundError) Error() string {
|
||||
return e.causingError.Error()
|
||||
}
|
||||
|
||||
// NewRepositoryNotFoundError creates a new repository not found error
|
||||
func NewRepositoryNotFoundError(err error) error {
|
||||
return &RepositoryNotFoundError{causingError: err}
|
||||
}
|
||||
|
||||
// IsRepositoryNotFoundError checks if the given error is a repository not found error
|
||||
func IsRepositoryNotFoundError(err error) bool {
|
||||
var repoErr *RepositoryNotFoundError
|
||||
return errors.As(err, &repoErr)
|
||||
}
|
||||
48
applicationset/services/pull_request/errors_test.go
Normal file
48
applicationset/services/pull_request/errors_test.go
Normal file
@@ -0,0 +1,48 @@
|
||||
package pull_request
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestRepositoryNotFoundError(t *testing.T) {
|
||||
t.Run("NewRepositoryNotFoundError creates correct error type", func(t *testing.T) {
|
||||
originalErr := errors.New("repository does not exist")
|
||||
repoNotFoundErr := NewRepositoryNotFoundError(originalErr)
|
||||
|
||||
require.Error(t, repoNotFoundErr)
|
||||
assert.Equal(t, "repository does not exist", repoNotFoundErr.Error())
|
||||
})
|
||||
|
||||
t.Run("IsRepositoryNotFoundError identifies RepositoryNotFoundError", func(t *testing.T) {
|
||||
originalErr := errors.New("repository does not exist")
|
||||
repoNotFoundErr := NewRepositoryNotFoundError(originalErr)
|
||||
|
||||
assert.True(t, IsRepositoryNotFoundError(repoNotFoundErr))
|
||||
})
|
||||
|
||||
t.Run("IsRepositoryNotFoundError returns false for regular errors", func(t *testing.T) {
|
||||
regularErr := errors.New("some other error")
|
||||
|
||||
assert.False(t, IsRepositoryNotFoundError(regularErr))
|
||||
})
|
||||
|
||||
t.Run("IsRepositoryNotFoundError returns false for nil error", func(t *testing.T) {
|
||||
assert.False(t, IsRepositoryNotFoundError(nil))
|
||||
})
|
||||
|
||||
t.Run("IsRepositoryNotFoundError works with wrapped errors", func(t *testing.T) {
|
||||
originalErr := errors.New("repository does not exist")
|
||||
repoNotFoundErr := NewRepositoryNotFoundError(originalErr)
|
||||
wrappedErr := errors.New("wrapped: " + repoNotFoundErr.Error())
|
||||
|
||||
// Direct RepositoryNotFoundError should be identified
|
||||
assert.True(t, IsRepositoryNotFoundError(repoNotFoundErr))
|
||||
|
||||
// Wrapped string error should not be identified (this is expected behavior)
|
||||
assert.False(t, IsRepositoryNotFoundError(wrappedErr))
|
||||
})
|
||||
}
|
||||
@@ -52,11 +52,17 @@ func (g *GiteaService) List(ctx context.Context) ([]*PullRequest, error) {
|
||||
State: gitea.StateOpen,
|
||||
}
|
||||
g.client.SetContext(ctx)
|
||||
prs, _, err := g.client.ListRepoPullRequests(g.owner, g.repo, opts)
|
||||
list := []*PullRequest{}
|
||||
prs, resp, err := g.client.ListRepoPullRequests(g.owner, g.repo, opts)
|
||||
if err != nil {
|
||||
if resp != nil && resp.StatusCode == http.StatusNotFound {
|
||||
// return a custom error indicating that the repository is not found,
|
||||
// but also returning the empty result since the decision to continue or not in this case is made by the caller
|
||||
return list, NewRepositoryNotFoundError(err)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
list := []*PullRequest{}
|
||||
|
||||
for _, pr := range prs {
|
||||
if !giteaContainLabels(g.labels, pr.Labels) {
|
||||
continue
|
||||
|
||||
@@ -339,3 +339,35 @@ func TestGetGiteaPRLabelNames(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGiteaListReturnsRepositoryNotFoundError(t *testing.T) {
|
||||
mux := http.NewServeMux()
|
||||
server := httptest.NewServer(mux)
|
||||
defer server.Close()
|
||||
|
||||
// Handle version endpoint that Gitea client calls first
|
||||
mux.HandleFunc("/api/v1/version", func(w http.ResponseWriter, _ *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
_, _ = w.Write([]byte(`{"version":"1.17.0+dev-452-g1f0541780"}`))
|
||||
})
|
||||
|
||||
path := "/api/v1/repos/nonexistent/nonexistent/pulls?limit=0&page=1&state=open"
|
||||
|
||||
mux.HandleFunc(path, func(w http.ResponseWriter, _ *http.Request) {
|
||||
// Return 404 status to simulate repository not found
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
_, _ = w.Write([]byte(`{"message": "404 Project Not Found"}`))
|
||||
})
|
||||
|
||||
svc, err := NewGiteaService("", server.URL, "nonexistent", "nonexistent", []string{}, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
prs, err := svc.List(t.Context())
|
||||
|
||||
// Should return empty pull requests list
|
||||
assert.Empty(t, prs)
|
||||
|
||||
// Should return RepositoryNotFoundError
|
||||
require.Error(t, err)
|
||||
assert.True(t, IsRepositoryNotFoundError(err), "Expected RepositoryNotFoundError but got: %v", err)
|
||||
}
|
||||
|
||||
@@ -37,7 +37,11 @@ func NewGithubService(token, url, owner, repo string, labels []string, optionalH
|
||||
}
|
||||
} else {
|
||||
var err error
|
||||
client, err = github.NewClient(httpClient).WithEnterpriseURLs(url, url)
|
||||
if token == "" {
|
||||
client, err = github.NewClient(httpClient).WithEnterpriseURLs(url, url)
|
||||
} else {
|
||||
client, err = github.NewClient(httpClient).WithAuthToken(token).WithEnterpriseURLs(url, url)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -60,6 +64,11 @@ func (g *GithubService) List(ctx context.Context) ([]*PullRequest, error) {
|
||||
for {
|
||||
pulls, resp, err := g.client.PullRequests.List(ctx, g.owner, g.repo, opts)
|
||||
if err != nil {
|
||||
if resp != nil && resp.StatusCode == http.StatusNotFound {
|
||||
// return a custom error indicating that the repository is not found,
|
||||
// but also returning the empty result since the decision to continue or not in this case is made by the caller
|
||||
return pullRequests, NewRepositoryNotFoundError(err)
|
||||
}
|
||||
return nil, fmt.Errorf("error listing pull requests for %s/%s: %w", g.owner, g.repo, err)
|
||||
}
|
||||
for _, pull := range pulls {
|
||||
|
||||
@@ -1,9 +1,12 @@
|
||||
package pull_request
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-github/v69/github"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -86,3 +89,29 @@ func TestGetGitHubPRLabelNames(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGitHubListReturnsRepositoryNotFoundError(t *testing.T) {
|
||||
mux := http.NewServeMux()
|
||||
server := httptest.NewServer(mux)
|
||||
defer server.Close()
|
||||
|
||||
path := "/repos/nonexistent/nonexistent/pulls"
|
||||
|
||||
mux.HandleFunc(path, func(w http.ResponseWriter, _ *http.Request) {
|
||||
// Return 404 status to simulate repository not found
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
_, _ = w.Write([]byte(`{"message": "404 Project Not Found"}`))
|
||||
})
|
||||
|
||||
svc, err := NewGithubService("", server.URL, "nonexistent", "nonexistent", []string{}, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
prs, err := svc.List(t.Context())
|
||||
|
||||
// Should return empty pull requests list
|
||||
assert.Empty(t, prs)
|
||||
|
||||
// Should return RepositoryNotFoundError
|
||||
require.Error(t, err)
|
||||
assert.True(t, IsRepositoryNotFoundError(err), "Expected RepositoryNotFoundError but got: %v", err)
|
||||
}
|
||||
|
||||
@@ -76,6 +76,11 @@ func (g *GitLabService) List(ctx context.Context) ([]*PullRequest, error) {
|
||||
for {
|
||||
mrs, resp, err := g.client.MergeRequests.ListProjectMergeRequests(g.project, opts, gitlab.WithContext(ctx))
|
||||
if err != nil {
|
||||
if resp != nil && resp.StatusCode == http.StatusNotFound {
|
||||
// return a custom error indicating that the repository is not found,
|
||||
// but also returning the empty result since the decision to continue or not in this case is made by the caller
|
||||
return pullRequests, NewRepositoryNotFoundError(err)
|
||||
}
|
||||
return nil, fmt.Errorf("error listing merge requests for project '%s': %w", g.project, err)
|
||||
}
|
||||
for _, mr := range mrs {
|
||||
|
||||
@@ -191,3 +191,29 @@ func TestListWithStateTLS(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGitLabListReturnsRepositoryNotFoundError(t *testing.T) {
|
||||
mux := http.NewServeMux()
|
||||
server := httptest.NewServer(mux)
|
||||
defer server.Close()
|
||||
|
||||
path := "/api/v4/projects/nonexistent/merge_requests"
|
||||
|
||||
mux.HandleFunc(path, func(w http.ResponseWriter, _ *http.Request) {
|
||||
// Return 404 status to simulate repository not found
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
_, _ = w.Write([]byte(`{"message": "404 Project Not Found"}`))
|
||||
})
|
||||
|
||||
svc, err := NewGitLabService("", server.URL, "nonexistent", []string{}, "", "", false, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
prs, err := svc.List(t.Context())
|
||||
|
||||
// Should return empty pull requests list
|
||||
assert.Empty(t, prs)
|
||||
|
||||
// Should return RepositoryNotFoundError
|
||||
require.Error(t, err)
|
||||
assert.True(t, IsRepositoryNotFoundError(err), "Expected RepositoryNotFoundError but got: %v", err)
|
||||
}
|
||||
|
||||
@@ -30,4 +30,5 @@ type PullRequestService interface {
|
||||
type Filter struct {
|
||||
BranchMatch *regexp.Regexp
|
||||
TargetBranchMatch *regexp.Regexp
|
||||
TitleMatch *regexp.Regexp
|
||||
}
|
||||
|
||||
@@ -25,6 +25,12 @@ func compileFilters(filters []argoprojiov1alpha1.PullRequestGeneratorFilter) ([]
|
||||
return nil, fmt.Errorf("error compiling TargetBranchMatch regexp %q: %w", *filter.TargetBranchMatch, err)
|
||||
}
|
||||
}
|
||||
if filter.TitleMatch != nil {
|
||||
outFilter.TitleMatch, err = regexp.Compile(*filter.TitleMatch)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error compiling TitleMatch regexp %q: %w", *filter.TitleMatch, err)
|
||||
}
|
||||
}
|
||||
outFilters = append(outFilters, outFilter)
|
||||
}
|
||||
return outFilters, nil
|
||||
@@ -37,6 +43,9 @@ func matchFilter(pullRequest *PullRequest, filter *Filter) bool {
|
||||
if filter.TargetBranchMatch != nil && !filter.TargetBranchMatch.MatchString(pullRequest.TargetBranch) {
|
||||
return false
|
||||
}
|
||||
if filter.TitleMatch != nil && !filter.TitleMatch.MatchString(pullRequest.Title) {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -137,6 +137,110 @@ func TestFilterTargetBranchMatch(t *testing.T) {
|
||||
assert.Equal(t, "two", pullRequests[0].Branch)
|
||||
}
|
||||
|
||||
func TestFilterTitleMatch(t *testing.T) {
|
||||
provider, _ := NewFakeService(
|
||||
t.Context(),
|
||||
[]*PullRequest{
|
||||
{
|
||||
Number: 1,
|
||||
Title: "PR one - filter",
|
||||
Branch: "one",
|
||||
TargetBranch: "master",
|
||||
HeadSHA: "189d92cbf9ff857a39e6feccd32798ca700fb958",
|
||||
Author: "name1",
|
||||
},
|
||||
{
|
||||
Number: 2,
|
||||
Title: "PR two - ignore",
|
||||
Branch: "two",
|
||||
TargetBranch: "branch1",
|
||||
HeadSHA: "289d92cbf9ff857a39e6feccd32798ca700fb958",
|
||||
Author: "name2",
|
||||
},
|
||||
{
|
||||
Number: 3,
|
||||
Title: "[filter] PR three",
|
||||
Branch: "three",
|
||||
TargetBranch: "branch2",
|
||||
HeadSHA: "389d92cbf9ff857a39e6feccd32798ca700fb958",
|
||||
Author: "name3",
|
||||
},
|
||||
{
|
||||
Number: 4,
|
||||
Title: "[ignore] PR four",
|
||||
Branch: "four",
|
||||
TargetBranch: "branch3",
|
||||
HeadSHA: "489d92cbf9ff857a39e6feccd32798ca700fb958",
|
||||
Author: "name4",
|
||||
},
|
||||
},
|
||||
nil,
|
||||
)
|
||||
filters := []argoprojiov1alpha1.PullRequestGeneratorFilter{
|
||||
{
|
||||
TitleMatch: strp("\\[filter]"),
|
||||
},
|
||||
}
|
||||
pullRequests, err := ListPullRequests(t.Context(), provider, filters)
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, pullRequests, 1)
|
||||
assert.Equal(t, "three", pullRequests[0].Branch)
|
||||
}
|
||||
|
||||
func TestMultiFilterOrWithTitle(t *testing.T) {
|
||||
provider, _ := NewFakeService(
|
||||
t.Context(),
|
||||
[]*PullRequest{
|
||||
{
|
||||
Number: 1,
|
||||
Title: "PR one - filter",
|
||||
Branch: "one",
|
||||
TargetBranch: "master",
|
||||
HeadSHA: "189d92cbf9ff857a39e6feccd32798ca700fb958",
|
||||
Author: "name1",
|
||||
},
|
||||
{
|
||||
Number: 2,
|
||||
Title: "PR two - ignore",
|
||||
Branch: "two",
|
||||
TargetBranch: "branch1",
|
||||
HeadSHA: "289d92cbf9ff857a39e6feccd32798ca700fb958",
|
||||
Author: "name2",
|
||||
},
|
||||
{
|
||||
Number: 3,
|
||||
Title: "[filter] PR three",
|
||||
Branch: "three",
|
||||
TargetBranch: "branch2",
|
||||
HeadSHA: "389d92cbf9ff857a39e6feccd32798ca700fb958",
|
||||
Author: "name3",
|
||||
},
|
||||
{
|
||||
Number: 4,
|
||||
Title: "[ignore] PR four",
|
||||
Branch: "four",
|
||||
TargetBranch: "branch3",
|
||||
HeadSHA: "489d92cbf9ff857a39e6feccd32798ca700fb958",
|
||||
Author: "name4",
|
||||
},
|
||||
},
|
||||
nil,
|
||||
)
|
||||
filters := []argoprojiov1alpha1.PullRequestGeneratorFilter{
|
||||
{
|
||||
TitleMatch: strp("\\[filter]"),
|
||||
},
|
||||
{
|
||||
TitleMatch: strp("- filter"),
|
||||
},
|
||||
}
|
||||
pullRequests, err := ListPullRequests(t.Context(), provider, filters)
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, pullRequests, 2)
|
||||
assert.Equal(t, "one", pullRequests[0].Branch)
|
||||
assert.Equal(t, "three", pullRequests[1].Branch)
|
||||
}
|
||||
|
||||
func TestMultiFilterOr(t *testing.T) {
|
||||
provider, _ := NewFakeService(
|
||||
t.Context(),
|
||||
@@ -192,7 +296,7 @@ func TestMultiFilterOr(t *testing.T) {
|
||||
assert.Equal(t, "four", pullRequests[2].Branch)
|
||||
}
|
||||
|
||||
func TestMultiFilterOrWithTargetBranchFilter(t *testing.T) {
|
||||
func TestMultiFilterOrWithTargetBranchFilterOrWithTitleFilter(t *testing.T) {
|
||||
provider, _ := NewFakeService(
|
||||
t.Context(),
|
||||
[]*PullRequest{
|
||||
@@ -228,6 +332,14 @@ func TestMultiFilterOrWithTargetBranchFilter(t *testing.T) {
|
||||
HeadSHA: "489d92cbf9ff857a39e6feccd32798ca700fb958",
|
||||
Author: "name4",
|
||||
},
|
||||
{
|
||||
Number: 5,
|
||||
Title: "PR title is different than branch name",
|
||||
Branch: "five",
|
||||
TargetBranch: "branch3",
|
||||
HeadSHA: "489d92cbf9ff857a39e6feccd32798ca700fb958",
|
||||
Author: "name5",
|
||||
},
|
||||
},
|
||||
nil,
|
||||
)
|
||||
@@ -240,12 +352,21 @@ func TestMultiFilterOrWithTargetBranchFilter(t *testing.T) {
|
||||
BranchMatch: strp("r"),
|
||||
TargetBranchMatch: strp("3"),
|
||||
},
|
||||
{
|
||||
TitleMatch: strp("two"),
|
||||
},
|
||||
{
|
||||
BranchMatch: strp("five"),
|
||||
TitleMatch: strp("PR title is different than branch name"),
|
||||
},
|
||||
}
|
||||
pullRequests, err := ListPullRequests(t.Context(), provider, filters)
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, pullRequests, 2)
|
||||
assert.Len(t, pullRequests, 3)
|
||||
assert.Equal(t, "two", pullRequests[0].Branch)
|
||||
assert.Equal(t, "four", pullRequests[1].Branch)
|
||||
assert.Equal(t, "five", pullRequests[2].Branch)
|
||||
assert.Equal(t, "PR title is different than branch name", pullRequests[2].Title)
|
||||
}
|
||||
|
||||
func TestNoFilters(t *testing.T) {
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
bitbucketv1 "github.com/gfleury/go-bitbucket-v1"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/applicationset/utils"
|
||||
"github.com/argoproj/argo-cd/v3/applicationset/services"
|
||||
)
|
||||
|
||||
type BitbucketServerProvider struct {
|
||||
@@ -49,15 +49,10 @@ func NewBitbucketServerProviderNoAuth(ctx context.Context, url, projectKey strin
|
||||
}
|
||||
|
||||
func newBitbucketServerProvider(ctx context.Context, bitbucketConfig *bitbucketv1.Configuration, projectKey string, allBranches bool, scmRootCAPath string, insecure bool, caCerts []byte) (*BitbucketServerProvider, error) {
|
||||
bitbucketConfig.BasePath = utils.NormalizeBitbucketBasePath(bitbucketConfig.BasePath)
|
||||
tlsConfig := utils.GetTlsConfig(scmRootCAPath, insecure, caCerts)
|
||||
bitbucketConfig.HTTPClient = &http.Client{Transport: &http.Transport{
|
||||
TLSClientConfig: tlsConfig,
|
||||
}}
|
||||
bitbucketClient := bitbucketv1.NewAPIClient(ctx, bitbucketConfig)
|
||||
bbClient := services.SetupBitbucketClient(ctx, bitbucketConfig, scmRootCAPath, insecure, caCerts)
|
||||
|
||||
return &BitbucketServerProvider{
|
||||
client: bitbucketClient,
|
||||
client: bbClient,
|
||||
projectKey: projectKey,
|
||||
allBranches: allBranches,
|
||||
}, nil
|
||||
|
||||
@@ -36,7 +36,11 @@ func NewGithubProvider(organization string, token string, url string, allBranche
|
||||
}
|
||||
} else {
|
||||
var err error
|
||||
client, err = github.NewClient(httpClient).WithEnterpriseURLs(url, url)
|
||||
if token == "" {
|
||||
client, err = github.NewClient(httpClient).WithEnterpriseURLs(url, url)
|
||||
} else {
|
||||
client, err = github.NewClient(httpClient).WithAuthToken(token).WithEnterpriseURLs(url, url)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
22
applicationset/services/util.go
Normal file
22
applicationset/services/util.go
Normal file
@@ -0,0 +1,22 @@
|
||||
package services
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
|
||||
bitbucketv1 "github.com/gfleury/go-bitbucket-v1"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/applicationset/utils"
|
||||
)
|
||||
|
||||
// SetupBitbucketClient configures and creates a Bitbucket API client with TLS settings
|
||||
func SetupBitbucketClient(ctx context.Context, config *bitbucketv1.Configuration, scmRootCAPath string, insecure bool, caCerts []byte) *bitbucketv1.APIClient {
|
||||
config.BasePath = utils.NormalizeBitbucketBasePath(config.BasePath)
|
||||
tlsConfig := utils.GetTlsConfig(scmRootCAPath, insecure, caCerts)
|
||||
|
||||
transport := http.DefaultTransport.(*http.Transport).Clone()
|
||||
transport.TLSClientConfig = tlsConfig
|
||||
config.HTTPClient = &http.Client{Transport: transport}
|
||||
|
||||
return bitbucketv1.NewAPIClient(ctx, config)
|
||||
}
|
||||
37
applicationset/services/util_test.go
Normal file
37
applicationset/services/util_test.go
Normal file
@@ -0,0 +1,37 @@
|
||||
package services
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"net/http"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
bitbucketv1 "github.com/gfleury/go-bitbucket-v1"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestSetupBitbucketClient(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
cfg := &bitbucketv1.Configuration{}
|
||||
|
||||
// Act
|
||||
client := SetupBitbucketClient(ctx, cfg, "", false, nil)
|
||||
|
||||
// Assert
|
||||
require.NotNil(t, client, "expected client to be created")
|
||||
require.NotNil(t, cfg.HTTPClient, "expected HTTPClient to be set")
|
||||
|
||||
// The transport should be a clone of DefaultTransport
|
||||
tr, ok := cfg.HTTPClient.Transport.(*http.Transport)
|
||||
require.True(t, ok, "expected HTTPClient.Transport to be *http.Transport")
|
||||
require.NotSame(t, http.DefaultTransport, tr, "transport should be a clone, not the global DefaultTransport")
|
||||
|
||||
// Ensure TLSClientConfig is set
|
||||
require.IsType(t, &tls.Config{}, tr.TLSClientConfig)
|
||||
|
||||
// Defaults from http.DefaultTransport.Clone() should be preserved
|
||||
require.Greater(t, tr.IdleConnTimeout, time.Duration(0), "IdleConnTimeout should be non-zero")
|
||||
require.Positive(t, tr.MaxIdleConns, "MaxIdleConns should be non-zero")
|
||||
require.Greater(t, tr.TLSHandshakeTimeout, time.Duration(0), "TLSHandshakeTimeout should be non-zero")
|
||||
}
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
|
||||
var ErrDisallowedSecretAccess = fmt.Errorf("secret must have label %q=%q", common.LabelKeySecretType, common.LabelValueSecretTypeSCMCreds)
|
||||
|
||||
// getSecretRef gets the value of the key for the specified Secret resource.
|
||||
// GetSecretRef gets the value of the key for the specified Secret resource.
|
||||
func GetSecretRef(ctx context.Context, k8sClient client.Client, ref *argoprojiov1alpha1.SecretRef, namespace string, tokenRefStrictMode bool) (string, error) {
|
||||
if ref == nil {
|
||||
return "", nil
|
||||
|
||||
@@ -74,15 +74,15 @@ func NewWebhookHandler(webhookParallelism int, argocdSettingsMgr *argosettings.S
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get argocd settings: %w", err)
|
||||
}
|
||||
githubHandler, err := github.New(github.Options.Secret(argocdSettings.WebhookGitHubSecret))
|
||||
githubHandler, err := github.New(github.Options.Secret(argocdSettings.GetWebhookGitHubSecret()))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to init GitHub webhook: %w", err)
|
||||
}
|
||||
gitlabHandler, err := gitlab.New(gitlab.Options.Secret(argocdSettings.WebhookGitLabSecret))
|
||||
gitlabHandler, err := gitlab.New(gitlab.Options.Secret(argocdSettings.GetWebhookGitLabSecret()))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to init GitLab webhook: %w", err)
|
||||
}
|
||||
azuredevopsHandler, err := azuredevops.New(azuredevops.Options.BasicAuth(argocdSettings.WebhookAzureDevOpsUsername, argocdSettings.WebhookAzureDevOpsPassword))
|
||||
azuredevopsHandler, err := azuredevops.New(azuredevops.Options.BasicAuth(argocdSettings.GetWebhookAzureDevOpsUsername(), argocdSettings.GetWebhookAzureDevOpsPassword()))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to init Azure DevOps webhook: %w", err)
|
||||
}
|
||||
@@ -339,7 +339,7 @@ func genRevisionHasChanged(gen *v1alpha1.GitGenerator, revision string, touchedH
|
||||
|
||||
func gitGeneratorUsesURL(gen *v1alpha1.GitGenerator, webURL string, repoRegexp *regexp.Regexp) bool {
|
||||
if !repoRegexp.MatchString(gen.RepoURL) {
|
||||
log.Debugf("%s does not match %s", gen.RepoURL, repoRegexp.String())
|
||||
log.Warnf("%s does not match %s", gen.RepoURL, repoRegexp.String())
|
||||
return false
|
||||
}
|
||||
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
# p, <role/user/group>, <resource>, <action>, <object>, <allow/deny>
|
||||
|
||||
p, role:readonly, applications, get, */*, allow
|
||||
p, role:readonly, applicationsets, get, */*, allow
|
||||
p, role:readonly, certificates, get, *, allow
|
||||
p, role:readonly, clusters, get, *, allow
|
||||
p, role:readonly, repositories, get, *, allow
|
||||
|
||||
|
194
assets/swagger.json
generated
194
assets/swagger.json
generated
@@ -374,6 +374,56 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/v1/applications/{appName}/server-side-diff": {
|
||||
"get": {
|
||||
"tags": [
|
||||
"ApplicationService"
|
||||
],
|
||||
"summary": "ServerSideDiff performs server-side diff calculation using dry-run apply",
|
||||
"operationId": "ApplicationService_ServerSideDiff",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"name": "appName",
|
||||
"in": "path",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"name": "appNamespace",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"name": "project",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"collectionFormat": "multi",
|
||||
"name": "targetManifests",
|
||||
"in": "query"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/applicationApplicationServerSideDiffResponse"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/runtimeError"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/v1/applications/{application.metadata.name}": {
|
||||
"put": {
|
||||
"tags": [
|
||||
@@ -1473,10 +1523,11 @@
|
||||
}
|
||||
},
|
||||
"post": {
|
||||
"description": "Deprecated: use RunResourceActionV2 instead. This version does not support resource action parameters but is\nmaintained for backward compatibility. It will be removed in a future release.",
|
||||
"tags": [
|
||||
"ApplicationService"
|
||||
],
|
||||
"summary": "RunResourceAction run resource action",
|
||||
"summary": "RunResourceAction runs a resource action",
|
||||
"operationId": "ApplicationService_RunResourceAction",
|
||||
"parameters": [
|
||||
{
|
||||
@@ -1490,7 +1541,81 @@
|
||||
"in": "body",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"$ref": "#/definitions/applicationResourceActionRunRequest"
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"name": "namespace",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"name": "resourceName",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"name": "version",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"name": "group",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"name": "kind",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"name": "appNamespace",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"name": "project",
|
||||
"in": "query"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/applicationApplicationResponse"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/runtimeError"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/v1/applications/{name}/resource/actions/v2": {
|
||||
"post": {
|
||||
"tags": [
|
||||
"ApplicationService"
|
||||
],
|
||||
"summary": "RunResourceActionV2 runs a resource action with parameters",
|
||||
"operationId": "ApplicationService_RunResourceActionV2",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"name": "name",
|
||||
"in": "path",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"name": "body",
|
||||
"in": "body",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"$ref": "#/definitions/applicationResourceActionRunRequestV2"
|
||||
}
|
||||
}
|
||||
],
|
||||
@@ -4944,6 +5069,20 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"applicationApplicationServerSideDiffResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"items": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/v1alpha1ResourceDiff"
|
||||
}
|
||||
},
|
||||
"modified": {
|
||||
"type": "boolean"
|
||||
}
|
||||
}
|
||||
},
|
||||
"applicationApplicationSyncRequest": {
|
||||
"type": "object",
|
||||
"title": "ApplicationSyncRequest is a request to apply the config state to live state",
|
||||
@@ -5127,7 +5266,7 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"applicationResourceActionRunRequest": {
|
||||
"applicationResourceActionRunRequestV2": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"action": {
|
||||
@@ -7185,6 +7324,10 @@
|
||||
"description": "ApplicationSetStrategy configures how generated Applications are updated in sequence.",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"deletionOrder": {
|
||||
"type": "string",
|
||||
"title": "DeletionOrder allows specifying the order for deleting generated apps when progressive sync is enabled.\naccepts values \"AllAtOnce\" and \"Reverse\""
|
||||
},
|
||||
"rollingSync": {
|
||||
"$ref": "#/definitions/v1alpha1ApplicationSetRolloutStrategy"
|
||||
},
|
||||
@@ -8560,12 +8703,20 @@
|
||||
"title": "KustomizeOptions are options for kustomize to use when building manifests",
|
||||
"properties": {
|
||||
"binaryPath": {
|
||||
"description": "Deprecated: Use settings.Settings instead. See: settings.Settings.KustomizeVersions.\nIf this field is set, it will be used as the Kustomize binary path.\nOtherwise, Versions is used.",
|
||||
"type": "string",
|
||||
"title": "BinaryPath holds optional path to kustomize binary"
|
||||
},
|
||||
"buildOptions": {
|
||||
"type": "string",
|
||||
"title": "BuildOptions is a string of build parameters to use when calling `kustomize build`"
|
||||
},
|
||||
"versions": {
|
||||
"description": "Versions is a list of Kustomize versions and their corresponding binary paths and build options.",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/v1alpha1KustomizeVersion"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -8629,6 +8780,24 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1KustomizeVersion": {
|
||||
"type": "object",
|
||||
"title": "KustomizeVersion holds information about additional Kustomize versions",
|
||||
"properties": {
|
||||
"buildOptions": {
|
||||
"type": "string",
|
||||
"title": "BuildOptions that are specific to a Kustomize version"
|
||||
},
|
||||
"name": {
|
||||
"type": "string",
|
||||
"title": "Name holds Kustomize version name"
|
||||
},
|
||||
"path": {
|
||||
"type": "string",
|
||||
"title": "Path holds the corresponding binary path"
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1ListGenerator": {
|
||||
"type": "object",
|
||||
"title": "ListGenerator include items info",
|
||||
@@ -8950,6 +9119,10 @@
|
||||
"bitbucketServer": {
|
||||
"$ref": "#/definitions/v1alpha1PullRequestGeneratorBitbucketServer"
|
||||
},
|
||||
"continueOnRepoNotFoundError": {
|
||||
"description": "ContinueOnRepoNotFoundError is a flag to continue the ApplicationSet Pull Request generator parameters generation even if the repository is not found.",
|
||||
"type": "boolean"
|
||||
},
|
||||
"filters": {
|
||||
"description": "Filters for which pull requests should be considered.",
|
||||
"type": "array",
|
||||
@@ -9079,6 +9252,9 @@
|
||||
},
|
||||
"targetBranchMatch": {
|
||||
"type": "string"
|
||||
},
|
||||
"titleMatch": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -9482,21 +9658,9 @@
|
||||
"description": "ResourceActionParam represents a parameter for a resource action.\nIt includes a name, value, type, and an optional default value for the parameter.",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"default": {
|
||||
"description": "Default is the default value of the parameter, if any.",
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"description": "Name is the name of the parameter.",
|
||||
"type": "string"
|
||||
},
|
||||
"type": {
|
||||
"description": "Type is the type of the parameter (e.g., string, integer).",
|
||||
"type": "string"
|
||||
},
|
||||
"value": {
|
||||
"description": "Value is the value of the parameter.",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
@@ -268,7 +268,7 @@ func NewCommand() *cobra.Command {
|
||||
command.Flags().BoolVar(&repoServerPlaintext, "repo-server-plaintext", env.ParseBoolFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_REPO_SERVER_PLAINTEXT", false), "Disable TLS on connections to repo server")
|
||||
command.Flags().BoolVar(&repoServerStrictTLS, "repo-server-strict-tls", env.ParseBoolFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_REPO_SERVER_STRICT_TLS", false), "Whether to use strict validation of the TLS cert presented by the repo server")
|
||||
command.Flags().IntVar(&repoServerTimeoutSeconds, "repo-server-timeout-seconds", env.ParseNumFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_REPO_SERVER_TIMEOUT_SECONDS", 60, 0, math.MaxInt64), "Repo server RPC call timeout seconds.")
|
||||
command.Flags().IntVar(&maxConcurrentReconciliations, "concurrent-reconciliations", env.ParseNumFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_CONCURRENT_RECONCILIATIONS", 10, 1, 100), "Max concurrent reconciliations limit for the controller")
|
||||
command.Flags().IntVar(&maxConcurrentReconciliations, "concurrent-reconciliations", env.ParseNumFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_CONCURRENT_RECONCILIATIONS", 10, 1, math.MaxInt), "Max concurrent reconciliations limit for the controller")
|
||||
command.Flags().StringVar(&scmRootCAPath, "scm-root-ca-path", env.StringFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_SCM_ROOT_CA_PATH", ""), "Provide Root CA Path for self-signed TLS Certificates")
|
||||
command.Flags().StringSliceVar(&globalPreservedAnnotations, "preserved-annotations", env.StringsFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_GLOBAL_PRESERVED_ANNOTATIONS", []string{}, ","), "Sets global preserved field values for annotations")
|
||||
command.Flags().StringSliceVar(&globalPreservedLabels, "preserved-labels", env.StringsFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_GLOBAL_PRESERVED_LABELS", []string{}, ","), "Sets global preserved field values for labels")
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !darwin || (cgo && darwin)
|
||||
|
||||
package commands
|
||||
|
||||
import (
|
||||
25
cmd/argocd-k8s-auth/commands/azure_no_cgo.go
Normal file
25
cmd/argocd-k8s-auth/commands/azure_no_cgo.go
Normal file
@@ -0,0 +1,25 @@
|
||||
//go:build darwin && !cgo
|
||||
|
||||
// Package commands
|
||||
// This file is used when the GOOS is darwin and CGO is not enabled.
|
||||
// It provides a no-op implementation of newAzureCommand to allow goreleaser to build
|
||||
// a darwin binary on a linux machine.
|
||||
package commands
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/util/workloadidentity"
|
||||
)
|
||||
|
||||
func newAzureCommand() *cobra.Command {
|
||||
command := &cobra.Command{
|
||||
Use: "azure",
|
||||
Run: func(c *cobra.Command, _ []string) {
|
||||
log.Fatalf(workloadidentity.CGOError)
|
||||
},
|
||||
}
|
||||
return command
|
||||
}
|
||||
@@ -50,7 +50,6 @@ func NewCommand() *cobra.Command {
|
||||
var (
|
||||
clientConfig clientcmd.ClientConfig
|
||||
processorsCount int
|
||||
namespace string
|
||||
appLabelSelector string
|
||||
logLevel string
|
||||
logFormat string
|
||||
@@ -178,7 +177,6 @@ func NewCommand() *cobra.Command {
|
||||
clientConfig = addK8SFlagsToCmd(&command)
|
||||
command.Flags().IntVar(&processorsCount, "processors-count", 1, "Processors count.")
|
||||
command.Flags().StringVar(&appLabelSelector, "app-label-selector", "", "App label selector.")
|
||||
command.Flags().StringVar(&namespace, "namespace", "", "Namespace which controller handles. Current namespace if empty.")
|
||||
command.Flags().StringVar(&logLevel, "loglevel", env.StringFromEnv("ARGOCD_NOTIFICATIONS_CONTROLLER_LOGLEVEL", "info"), "Set the logging level. One of: debug|info|warn|error")
|
||||
command.Flags().StringVar(&logFormat, "logformat", env.StringFromEnv("ARGOCD_NOTIFICATIONS_CONTROLLER_LOGFORMAT", "json"), "Set the logging format. One of: json|text")
|
||||
command.Flags().IntVar(&metricsPort, "metrics-port", defaultMetricsPort, "Metrics port")
|
||||
|
||||
@@ -415,7 +415,6 @@ func reconcileApplications(
|
||||
},
|
||||
settingsMgr,
|
||||
stateCache,
|
||||
projInformer,
|
||||
server,
|
||||
cache,
|
||||
time.Second,
|
||||
@@ -464,7 +463,7 @@ func reconcileApplications(
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions = append(revisions, app.Spec.GetSource().TargetRevision)
|
||||
|
||||
res, err := appStateManager.CompareAppState(&app, proj, revisions, sources, false, false, nil, false, false)
|
||||
res, err := appStateManager.CompareAppState(&app, proj, revisions, sources, false, false, nil, false)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error comparing app states: %w", err)
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/redis/go-redis/v9"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
@@ -608,7 +609,31 @@ func NewGenClusterConfigCommand(pathOpts *clientcmd.PathOptions) *cobra.Command
|
||||
clientConfig := clientcmd.NewDefaultClientConfig(*cfgAccess, &overrides)
|
||||
conf, err := clientConfig.ClientConfig()
|
||||
errors.CheckError(err)
|
||||
kubeClientset := fake.NewClientset()
|
||||
// Seed a minimal in-memory Argo CD environment so settings retrieval succeeds
|
||||
argoCDCM := &corev1.ConfigMap{
|
||||
TypeMeta: metav1.TypeMeta{Kind: "ConfigMap", APIVersion: "v1"},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: common.ArgoCDConfigMapName,
|
||||
Namespace: ArgoCDNamespace,
|
||||
Labels: map[string]string{
|
||||
"app.kubernetes.io/part-of": "argocd",
|
||||
},
|
||||
},
|
||||
}
|
||||
argoCDSecret := &corev1.Secret{
|
||||
TypeMeta: metav1.TypeMeta{Kind: "Secret", APIVersion: "v1"},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: common.ArgoCDSecretName,
|
||||
Namespace: ArgoCDNamespace,
|
||||
Labels: map[string]string{
|
||||
"app.kubernetes.io/part-of": "argocd",
|
||||
},
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"server.secretkey": []byte("test"),
|
||||
},
|
||||
}
|
||||
kubeClientset := fake.NewClientset(argoCDCM, argoCDSecret)
|
||||
|
||||
var awsAuthConf *v1alpha1.AWSAuthConfig
|
||||
var execProviderConf *v1alpha1.ExecProviderConfig
|
||||
|
||||
@@ -24,24 +24,24 @@ func TestRun_SignalHandling_GracefulShutdown(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
var err error
|
||||
var runErr error
|
||||
doneCh := make(chan struct{})
|
||||
go func() {
|
||||
err = d.Run(t.Context(), &DashboardConfig{ClientOpts: &apiclient.ClientOptions{}})
|
||||
runErr = d.Run(t.Context(), &DashboardConfig{ClientOpts: &apiclient.ClientOptions{}})
|
||||
close(doneCh)
|
||||
}()
|
||||
|
||||
// Allow some time for the dashboard to register the signal handler
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
|
||||
proc, err := os.FindProcess(os.Getpid())
|
||||
require.NoErrorf(t, err, "failed to find process: %v", err)
|
||||
err = proc.Signal(syscall.SIGINT)
|
||||
require.NoErrorf(t, err, "failed to send SIGINT: %v", err)
|
||||
proc, procErr := os.FindProcess(os.Getpid())
|
||||
require.NoErrorf(t, procErr, "failed to find process: %v", procErr)
|
||||
sigErr := proc.Signal(syscall.SIGINT)
|
||||
require.NoErrorf(t, sigErr, "failed to send SIGINT: %v", sigErr)
|
||||
|
||||
select {
|
||||
case <-doneCh:
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, runErr)
|
||||
case <-time.After(500 * time.Millisecond):
|
||||
t.Fatal("timeout: dashboard.Run did not exit after SIGINT")
|
||||
}
|
||||
|
||||
@@ -39,9 +39,13 @@ import (
|
||||
"github.com/argoproj/argo-cd/v3/cmd/argocd/commands/headless"
|
||||
"github.com/argoproj/argo-cd/v3/cmd/argocd/commands/utils"
|
||||
cmdutil "github.com/argoproj/argo-cd/v3/cmd/util"
|
||||
argocommon "github.com/argoproj/argo-cd/v3/common"
|
||||
"github.com/argoproj/argo-cd/v3/controller"
|
||||
argocdclient "github.com/argoproj/argo-cd/v3/pkg/apiclient"
|
||||
"github.com/argoproj/argo-cd/v3/pkg/apiclient/application"
|
||||
|
||||
resourceutil "github.com/argoproj/gitops-engine/pkg/sync/resource"
|
||||
|
||||
clusterpkg "github.com/argoproj/argo-cd/v3/pkg/apiclient/cluster"
|
||||
projectpkg "github.com/argoproj/argo-cd/v3/pkg/apiclient/project"
|
||||
"github.com/argoproj/argo-cd/v3/pkg/apiclient/settings"
|
||||
@@ -95,6 +99,7 @@ func NewApplicationCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comman
|
||||
command.AddCommand(NewApplicationTerminateOpCommand(clientOpts))
|
||||
command.AddCommand(NewApplicationEditCommand(clientOpts))
|
||||
command.AddCommand(NewApplicationPatchCommand(clientOpts))
|
||||
command.AddCommand(NewApplicationGetResourceCommand(clientOpts))
|
||||
command.AddCommand(NewApplicationPatchResourceCommand(clientOpts))
|
||||
command.AddCommand(NewApplicationDeleteResourceCommand(clientOpts))
|
||||
command.AddCommand(NewApplicationResourceActionsCommand(clientOpts))
|
||||
@@ -1281,6 +1286,7 @@ func NewApplicationDiffCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
revision string
|
||||
localRepoRoot string
|
||||
serverSideGenerate bool
|
||||
serverSideDiff bool
|
||||
localIncludes []string
|
||||
appNamespace string
|
||||
revisions []string
|
||||
@@ -1343,6 +1349,22 @@ func NewApplicationDiffCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
argoSettings, err := settingsIf.Get(ctx, &settings.SettingsQuery{})
|
||||
errors.CheckError(err)
|
||||
diffOption := &DifferenceOption{}
|
||||
|
||||
hasServerSideDiffAnnotation := resourceutil.HasAnnotationOption(app, argocommon.AnnotationCompareOptions, "ServerSideDiff=true")
|
||||
|
||||
// Use annotation if flag not explicitly set
|
||||
if !c.Flags().Changed("server-side-diff") {
|
||||
serverSideDiff = hasServerSideDiffAnnotation
|
||||
} else if serverSideDiff && !hasServerSideDiffAnnotation {
|
||||
// Flag explicitly set to true, but app annotation is not set
|
||||
fmt.Fprintf(os.Stderr, "Warning: Application does not have ServerSideDiff=true annotation.\n")
|
||||
}
|
||||
|
||||
// Server side diff with local requires server side generate to be set as there will be a mismatch with client-generated manifests.
|
||||
if serverSideDiff && local != "" && !serverSideGenerate {
|
||||
log.Fatal("--server-side-diff with --local requires --server-side-generate.")
|
||||
}
|
||||
|
||||
switch {
|
||||
case app.Spec.HasMultipleSources() && len(revisions) > 0 && len(sourcePositions) > 0:
|
||||
numOfSources := int64(len(app.Spec.GetSources()))
|
||||
@@ -1398,7 +1420,8 @@ func NewApplicationDiffCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
}
|
||||
}
|
||||
proj := getProject(ctx, c, clientOpts, app.Spec.Project)
|
||||
foundDiffs := findandPrintDiff(ctx, app, proj.Project, resources, argoSettings, diffOption, ignoreNormalizerOpts)
|
||||
|
||||
foundDiffs := findAndPrintDiff(ctx, app, proj.Project, resources, argoSettings, diffOption, ignoreNormalizerOpts, serverSideDiff, appIf, app.GetName(), app.GetNamespace())
|
||||
if foundDiffs && exitCode {
|
||||
os.Exit(diffExitCode)
|
||||
}
|
||||
@@ -1407,11 +1430,12 @@ func NewApplicationDiffCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
command.Flags().BoolVar(&refresh, "refresh", false, "Refresh application data when retrieving")
|
||||
command.Flags().BoolVar(&hardRefresh, "hard-refresh", false, "Refresh application data as well as target manifests cache")
|
||||
command.Flags().BoolVar(&exitCode, "exit-code", true, "Return non-zero exit code when there is a diff. May also return non-zero exit code if there is an error.")
|
||||
command.Flags().IntVar(&diffExitCode, "diff-exit-code", 1, "Return specified exit code when there is a diff. Typical error code is 20.")
|
||||
command.Flags().IntVar(&diffExitCode, "diff-exit-code", 1, "Return specified exit code when there is a diff. Typical error code is 20 but use another exit code if you want to differentiate from the generic exit code (20) returned by all CLI commands.")
|
||||
command.Flags().StringVar(&local, "local", "", "Compare live app to a local manifests")
|
||||
command.Flags().StringVar(&revision, "revision", "", "Compare live app to a particular revision")
|
||||
command.Flags().StringVar(&localRepoRoot, "local-repo-root", "/", "Path to the repository root. Used together with --local allows setting the repository root")
|
||||
command.Flags().BoolVar(&serverSideGenerate, "server-side-generate", false, "Used with --local, this will send your manifests to the server for diffing")
|
||||
command.Flags().BoolVar(&serverSideDiff, "server-side-diff", false, "Use server-side diff to calculate the diff. This will default to true if the ServerSideDiff annotation is set on the application.")
|
||||
command.Flags().StringArrayVar(&localIncludes, "local-include", []string{"*.yaml", "*.yml", "*.json"}, "Used with --server-side-generate, specify patterns of filenames to send. Matching is based on filename and not path.")
|
||||
command.Flags().StringVarP(&appNamespace, "app-namespace", "N", "", "Only render the difference in namespace")
|
||||
command.Flags().StringArrayVar(&revisions, "revisions", []string{}, "Show manifests at specific revisions for source position in source-positions")
|
||||
@@ -1421,6 +1445,101 @@ func NewApplicationDiffCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
return command
|
||||
}
|
||||
|
||||
// printResourceDiff prints the diff header and calls cli.PrintDiff for a resource
|
||||
func printResourceDiff(group, kind, namespace, name string, live, target *unstructured.Unstructured) {
|
||||
fmt.Printf("\n===== %s/%s %s/%s ======\n", group, kind, namespace, name)
|
||||
_ = cli.PrintDiff(name, live, target)
|
||||
}
|
||||
|
||||
// findAndPrintServerSideDiff performs a server-side diff by making requests to the api server and prints the response
|
||||
func findAndPrintServerSideDiff(ctx context.Context, app *argoappv1.Application, items []objKeyLiveTarget, resources *application.ManagedResourcesResponse, appIf application.ApplicationServiceClient, appName, appNs string) bool {
|
||||
// Process each item for server-side diff
|
||||
foundDiffs := false
|
||||
for _, item := range items {
|
||||
if item.target != nil && hook.IsHook(item.target) || item.live != nil && hook.IsHook(item.live) {
|
||||
continue
|
||||
}
|
||||
|
||||
// For server-side diff, we need to create aligned arrays for this specific resource
|
||||
var liveResource *argoappv1.ResourceDiff
|
||||
var targetManifest string
|
||||
|
||||
if item.live != nil {
|
||||
for _, res := range resources.Items {
|
||||
if res.Group == item.key.Group && res.Kind == item.key.Kind &&
|
||||
res.Namespace == item.key.Namespace && res.Name == item.key.Name {
|
||||
liveResource = res
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if liveResource == nil {
|
||||
// Create empty live resource for creation case
|
||||
liveResource = &argoappv1.ResourceDiff{
|
||||
Group: item.key.Group,
|
||||
Kind: item.key.Kind,
|
||||
Namespace: item.key.Namespace,
|
||||
Name: item.key.Name,
|
||||
LiveState: "",
|
||||
TargetState: "",
|
||||
Modified: true,
|
||||
}
|
||||
}
|
||||
|
||||
if item.target != nil {
|
||||
jsonBytes, err := json.Marshal(item.target)
|
||||
if err != nil {
|
||||
errors.CheckError(fmt.Errorf("error marshaling target object: %w", err))
|
||||
}
|
||||
targetManifest = string(jsonBytes)
|
||||
}
|
||||
|
||||
// Call server-side diff for this individual resource
|
||||
serverSideDiffQuery := &application.ApplicationServerSideDiffQuery{
|
||||
AppName: &appName,
|
||||
AppNamespace: &appNs,
|
||||
Project: &app.Spec.Project,
|
||||
LiveResources: []*argoappv1.ResourceDiff{liveResource},
|
||||
TargetManifests: []string{targetManifest},
|
||||
}
|
||||
|
||||
serverSideDiffRes, err := appIf.ServerSideDiff(ctx, serverSideDiffQuery)
|
||||
if err != nil {
|
||||
errors.CheckError(err)
|
||||
}
|
||||
|
||||
// Extract diff for this resource
|
||||
for _, resultItem := range serverSideDiffRes.Items {
|
||||
if resultItem.Hook || (!resultItem.Modified && resultItem.TargetState != "" && resultItem.LiveState != "") {
|
||||
continue
|
||||
}
|
||||
|
||||
if resultItem.Modified || resultItem.TargetState == "" || resultItem.LiveState == "" {
|
||||
var live, target *unstructured.Unstructured
|
||||
|
||||
if resultItem.TargetState != "" && resultItem.TargetState != "null" {
|
||||
target = &unstructured.Unstructured{}
|
||||
err = json.Unmarshal([]byte(resultItem.TargetState), target)
|
||||
errors.CheckError(err)
|
||||
}
|
||||
|
||||
if resultItem.LiveState != "" && resultItem.LiveState != "null" {
|
||||
live = &unstructured.Unstructured{}
|
||||
err = json.Unmarshal([]byte(resultItem.LiveState), live)
|
||||
errors.CheckError(err)
|
||||
}
|
||||
|
||||
// Print resulting diff for this resource
|
||||
foundDiffs = true
|
||||
printResourceDiff(resultItem.Group, resultItem.Kind, resultItem.Namespace, resultItem.Name, live, target)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return foundDiffs
|
||||
}
|
||||
|
||||
// DifferenceOption struct to store diff options
|
||||
type DifferenceOption struct {
|
||||
local string
|
||||
@@ -1432,47 +1551,15 @@ type DifferenceOption struct {
|
||||
revisions []string
|
||||
}
|
||||
|
||||
// findandPrintDiff ... Prints difference between application current state and state stored in git or locally, returns boolean as true if difference is found else returns false
|
||||
func findandPrintDiff(ctx context.Context, app *argoappv1.Application, proj *argoappv1.AppProject, resources *application.ManagedResourcesResponse, argoSettings *settings.Settings, diffOptions *DifferenceOption, ignoreNormalizerOpts normalizers.IgnoreNormalizerOpts) bool {
|
||||
// findAndPrintDiff ... Prints difference between application current state and state stored in git or locally, returns boolean as true if difference is found else returns false
|
||||
func findAndPrintDiff(ctx context.Context, app *argoappv1.Application, proj *argoappv1.AppProject, resources *application.ManagedResourcesResponse, argoSettings *settings.Settings, diffOptions *DifferenceOption, ignoreNormalizerOpts normalizers.IgnoreNormalizerOpts, useServerSideDiff bool, appIf application.ApplicationServiceClient, appName, appNs string) bool {
|
||||
var foundDiffs bool
|
||||
liveObjs, err := cmdutil.LiveObjects(resources.Items)
|
||||
|
||||
items, err := prepareObjectsForDiff(ctx, app, proj, resources, argoSettings, diffOptions)
|
||||
errors.CheckError(err)
|
||||
items := make([]objKeyLiveTarget, 0)
|
||||
switch {
|
||||
case diffOptions.local != "":
|
||||
localObjs := groupObjsByKey(getLocalObjects(ctx, app, proj, diffOptions.local, diffOptions.localRepoRoot, argoSettings.AppLabelKey, diffOptions.cluster.Info.ServerVersion, diffOptions.cluster.Info.APIVersions, argoSettings.KustomizeOptions, argoSettings.TrackingMethod), liveObjs, app.Spec.Destination.Namespace)
|
||||
items = groupObjsForDiff(resources, localObjs, items, argoSettings, app.InstanceName(argoSettings.ControllerNamespace), app.Spec.Destination.Namespace)
|
||||
case diffOptions.revision != "" || len(diffOptions.revisions) > 0:
|
||||
var unstructureds []*unstructured.Unstructured
|
||||
for _, mfst := range diffOptions.res.Manifests {
|
||||
obj, err := argoappv1.UnmarshalToUnstructured(mfst)
|
||||
errors.CheckError(err)
|
||||
unstructureds = append(unstructureds, obj)
|
||||
}
|
||||
groupedObjs := groupObjsByKey(unstructureds, liveObjs, app.Spec.Destination.Namespace)
|
||||
items = groupObjsForDiff(resources, groupedObjs, items, argoSettings, app.InstanceName(argoSettings.ControllerNamespace), app.Spec.Destination.Namespace)
|
||||
case diffOptions.serversideRes != nil:
|
||||
var unstructureds []*unstructured.Unstructured
|
||||
for _, mfst := range diffOptions.serversideRes.Manifests {
|
||||
obj, err := argoappv1.UnmarshalToUnstructured(mfst)
|
||||
errors.CheckError(err)
|
||||
unstructureds = append(unstructureds, obj)
|
||||
}
|
||||
groupedObjs := groupObjsByKey(unstructureds, liveObjs, app.Spec.Destination.Namespace)
|
||||
items = groupObjsForDiff(resources, groupedObjs, items, argoSettings, app.InstanceName(argoSettings.ControllerNamespace), app.Spec.Destination.Namespace)
|
||||
default:
|
||||
for i := range resources.Items {
|
||||
res := resources.Items[i]
|
||||
live := &unstructured.Unstructured{}
|
||||
err := json.Unmarshal([]byte(res.NormalizedLiveState), &live)
|
||||
errors.CheckError(err)
|
||||
|
||||
target := &unstructured.Unstructured{}
|
||||
err = json.Unmarshal([]byte(res.TargetState), &target)
|
||||
errors.CheckError(err)
|
||||
|
||||
items = append(items, objKeyLiveTarget{kube.NewResourceKey(res.Group, res.Kind, res.Namespace, res.Name), live, target})
|
||||
}
|
||||
if useServerSideDiff {
|
||||
return findAndPrintServerSideDiff(ctx, app, items, resources, appIf, appName, appNs)
|
||||
}
|
||||
|
||||
for _, item := range items {
|
||||
@@ -1499,7 +1586,6 @@ func findandPrintDiff(ctx context.Context, app *argoappv1.Application, proj *arg
|
||||
errors.CheckError(err)
|
||||
|
||||
if diffRes.Modified || item.target == nil || item.live == nil {
|
||||
fmt.Printf("\n===== %s/%s %s/%s ======\n", item.key.Group, item.key.Kind, item.key.Namespace, item.key.Name)
|
||||
var live *unstructured.Unstructured
|
||||
var target *unstructured.Unstructured
|
||||
if item.target != nil && item.live != nil {
|
||||
@@ -1511,10 +1597,8 @@ func findandPrintDiff(ctx context.Context, app *argoappv1.Application, proj *arg
|
||||
live = item.live
|
||||
target = item.target
|
||||
}
|
||||
if !foundDiffs {
|
||||
foundDiffs = true
|
||||
}
|
||||
_ = cli.PrintDiff(item.key.Name, live, target)
|
||||
foundDiffs = true
|
||||
printResourceDiff(item.key.Group, item.key.Kind, item.key.Namespace, item.key.Name, live, target)
|
||||
}
|
||||
}
|
||||
return foundDiffs
|
||||
@@ -2296,7 +2380,11 @@ func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
fmt.Printf("====== Previewing differences between live and desired state of application %s ======\n", appQualifiedName)
|
||||
|
||||
proj := getProject(ctx, c, clientOpts, app.Spec.Project)
|
||||
foundDiffs = findandPrintDiff(ctx, app, proj.Project, resources, argoSettings, diffOption, ignoreNormalizerOpts)
|
||||
|
||||
// Check if application has ServerSideDiff annotation
|
||||
serverSideDiff := resourceutil.HasAnnotationOption(app, argocommon.AnnotationCompareOptions, "ServerSideDiff=true")
|
||||
|
||||
foundDiffs = findAndPrintDiff(ctx, app, proj.Project, resources, argoSettings, diffOption, ignoreNormalizerOpts, serverSideDiff, appIf, appName, appNs)
|
||||
if !foundDiffs {
|
||||
fmt.Printf("====== No Differences found ======\n")
|
||||
// if no differences found, then no need to sync
|
||||
@@ -3519,3 +3607,60 @@ func NewApplicationConfirmDeletionCommand(clientOpts *argocdclient.ClientOptions
|
||||
command.Flags().StringVarP(&appNamespace, "app-namespace", "N", "", "Namespace of the target application where the source will be appended")
|
||||
return command
|
||||
}
|
||||
|
||||
// prepareObjectsForDiff prepares objects for diffing using the switch statement
|
||||
// to handle different diff options and building the objKeyLiveTarget items
|
||||
func prepareObjectsForDiff(ctx context.Context, app *argoappv1.Application, proj *argoappv1.AppProject, resources *application.ManagedResourcesResponse, argoSettings *settings.Settings, diffOptions *DifferenceOption) ([]objKeyLiveTarget, error) {
|
||||
liveObjs, err := cmdutil.LiveObjects(resources.Items)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
items := make([]objKeyLiveTarget, 0)
|
||||
|
||||
switch {
|
||||
case diffOptions.local != "":
|
||||
localObjs := groupObjsByKey(getLocalObjects(ctx, app, proj, diffOptions.local, diffOptions.localRepoRoot, argoSettings.AppLabelKey, diffOptions.cluster.Info.ServerVersion, diffOptions.cluster.Info.APIVersions, argoSettings.KustomizeOptions, argoSettings.TrackingMethod), liveObjs, app.Spec.Destination.Namespace)
|
||||
items = groupObjsForDiff(resources, localObjs, items, argoSettings, app.InstanceName(argoSettings.ControllerNamespace), app.Spec.Destination.Namespace)
|
||||
case diffOptions.revision != "" || len(diffOptions.revisions) > 0:
|
||||
var unstructureds []*unstructured.Unstructured
|
||||
for _, mfst := range diffOptions.res.Manifests {
|
||||
obj, err := argoappv1.UnmarshalToUnstructured(mfst)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
unstructureds = append(unstructureds, obj)
|
||||
}
|
||||
groupedObjs := groupObjsByKey(unstructureds, liveObjs, app.Spec.Destination.Namespace)
|
||||
items = groupObjsForDiff(resources, groupedObjs, items, argoSettings, app.InstanceName(argoSettings.ControllerNamespace), app.Spec.Destination.Namespace)
|
||||
case diffOptions.serversideRes != nil:
|
||||
var unstructureds []*unstructured.Unstructured
|
||||
for _, mfst := range diffOptions.serversideRes.Manifests {
|
||||
obj, err := argoappv1.UnmarshalToUnstructured(mfst)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
unstructureds = append(unstructureds, obj)
|
||||
}
|
||||
groupedObjs := groupObjsByKey(unstructureds, liveObjs, app.Spec.Destination.Namespace)
|
||||
items = groupObjsForDiff(resources, groupedObjs, items, argoSettings, app.InstanceName(argoSettings.ControllerNamespace), app.Spec.Destination.Namespace)
|
||||
default:
|
||||
for i := range resources.Items {
|
||||
res := resources.Items[i]
|
||||
live := &unstructured.Unstructured{}
|
||||
err := json.Unmarshal([]byte(res.NormalizedLiveState), &live)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
target := &unstructured.Unstructured{}
|
||||
err = json.Unmarshal([]byte(res.TargetState), &target)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
items = append(items, objKeyLiveTarget{kube.NewResourceKey(res.Group, res.Kind, res.Namespace, res.Name), live, target})
|
||||
}
|
||||
}
|
||||
|
||||
return items, nil
|
||||
}
|
||||
|
||||
@@ -8,23 +8,23 @@ import (
|
||||
"strconv"
|
||||
"text/tabwriter"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/util/templates"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/cmd/util"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"google.golang.org/grpc/codes"
|
||||
"k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/yaml"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/cmd/argocd/commands/headless"
|
||||
"github.com/argoproj/argo-cd/v3/cmd/util"
|
||||
argocdclient "github.com/argoproj/argo-cd/v3/pkg/apiclient"
|
||||
applicationpkg "github.com/argoproj/argo-cd/v3/pkg/apiclient/application"
|
||||
"github.com/argoproj/argo-cd/v3/pkg/apis/application"
|
||||
"github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v3/util/argo"
|
||||
"github.com/argoproj/argo-cd/v3/util/errors"
|
||||
"github.com/argoproj/argo-cd/v3/util/grpc"
|
||||
utilio "github.com/argoproj/argo-cd/v3/util/io"
|
||||
"github.com/argoproj/argo-cd/v3/util/templates"
|
||||
)
|
||||
|
||||
type DisplayedAction struct {
|
||||
@@ -192,7 +192,26 @@ func NewApplicationResourceActionsRunCommand(clientOpts *argocdclient.ClientOpti
|
||||
obj := filteredObjects[i]
|
||||
gvk := obj.GroupVersionKind()
|
||||
objResourceName := obj.GetName()
|
||||
_, err := appIf.RunResourceAction(ctx, &applicationpkg.ResourceActionRunRequest{
|
||||
_, err := appIf.RunResourceActionV2(ctx, &applicationpkg.ResourceActionRunRequestV2{
|
||||
Name: &appName,
|
||||
AppNamespace: &appNs,
|
||||
Namespace: ptr.To(obj.GetNamespace()),
|
||||
ResourceName: ptr.To(objResourceName),
|
||||
Group: ptr.To(gvk.Group),
|
||||
Kind: ptr.To(gvk.Kind),
|
||||
Version: ptr.To(gvk.GroupVersion().Version),
|
||||
Action: ptr.To(actionName),
|
||||
// TODO: add support for parameters
|
||||
})
|
||||
if err == nil {
|
||||
continue
|
||||
}
|
||||
if grpc.UnwrapGRPCStatus(err).Code() != codes.Unimplemented {
|
||||
errors.CheckError(err)
|
||||
}
|
||||
fmt.Println("RunResourceActionV2 is not supported by the server, falling back to RunResourceAction.")
|
||||
//nolint:staticcheck // RunResourceAction is deprecated, but we still need to support it for backward compatibility.
|
||||
_, err = appIf.RunResourceAction(ctx, &applicationpkg.ResourceActionRunRequest{
|
||||
Name: &appName,
|
||||
AppNamespace: &appNs,
|
||||
Namespace: ptr.To(obj.GetNamespace()),
|
||||
|
||||
@@ -2,11 +2,14 @@ package commands
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"strings"
|
||||
"testing"
|
||||
"text/tabwriter"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
|
||||
)
|
||||
@@ -117,3 +120,561 @@ func TestPrintResourcesTree(t *testing.T) {
|
||||
|
||||
assert.Equal(t, expectation, output)
|
||||
}
|
||||
|
||||
func TestFilterFieldsFromObject(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
obj unstructured.Unstructured
|
||||
filteredFields []string
|
||||
expectedFields []string
|
||||
unexpectedFields []string
|
||||
}{
|
||||
{
|
||||
name: "filter nested field",
|
||||
obj: unstructured.Unstructured{
|
||||
Object: map[string]any{
|
||||
"apiVersion": "vX",
|
||||
"kind": "kind",
|
||||
"metadata": map[string]any{
|
||||
"name": "test",
|
||||
},
|
||||
"spec": map[string]any{
|
||||
"testfield": map[string]any{
|
||||
"nestedtest": "test",
|
||||
},
|
||||
"testfield2": "test",
|
||||
},
|
||||
},
|
||||
},
|
||||
filteredFields: []string{"spec.testfield.nestedtest"},
|
||||
expectedFields: []string{"spec.testfield.nestedtest"},
|
||||
unexpectedFields: []string{"spec.testfield2"},
|
||||
},
|
||||
{
|
||||
name: "filter multiple fields",
|
||||
obj: unstructured.Unstructured{
|
||||
Object: map[string]any{
|
||||
"apiVersion": "vX",
|
||||
"kind": "kind",
|
||||
"metadata": map[string]any{
|
||||
"name": "test",
|
||||
},
|
||||
"spec": map[string]any{
|
||||
"testfield": map[string]any{
|
||||
"nestedtest": "test",
|
||||
},
|
||||
"testfield2": "test",
|
||||
"testfield3": "deleteme",
|
||||
},
|
||||
},
|
||||
},
|
||||
filteredFields: []string{"spec.testfield.nestedtest", "spec.testfield3"},
|
||||
expectedFields: []string{"spec.testfield.nestedtest"},
|
||||
unexpectedFields: []string{"spec.testfield2"},
|
||||
},
|
||||
{
|
||||
name: "filter nested list object",
|
||||
obj: unstructured.Unstructured{
|
||||
Object: map[string]any{
|
||||
"apiVersion": "vX",
|
||||
"kind": "kind",
|
||||
"metadata": map[string]any{
|
||||
"name": "test",
|
||||
},
|
||||
"spec": map[string]any{
|
||||
"testfield": map[string]any{
|
||||
"nestedtest": "test",
|
||||
},
|
||||
"testfield2": "test",
|
||||
},
|
||||
},
|
||||
},
|
||||
filteredFields: []string{"spec.testfield.nestedtest"},
|
||||
expectedFields: []string{"spec.testfield.nestedtest"},
|
||||
unexpectedFields: []string{"spec.testfield2"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tt.obj.SetName("test-object")
|
||||
|
||||
filtered := filterFieldsFromObject(&tt.obj, tt.filteredFields)
|
||||
|
||||
for _, field := range tt.expectedFields {
|
||||
fieldPath := strings.Split(field, ".")
|
||||
_, exists, err := unstructured.NestedFieldCopy(filtered.Object, fieldPath...)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, exists, "Expected field %s to exist", field)
|
||||
}
|
||||
|
||||
for _, field := range tt.unexpectedFields {
|
||||
fieldPath := strings.Split(field, ".")
|
||||
_, exists, err := unstructured.NestedFieldCopy(filtered.Object, fieldPath...)
|
||||
require.NoError(t, err)
|
||||
assert.False(t, exists, "Expected field %s to not exist", field)
|
||||
}
|
||||
|
||||
assert.Equal(t, tt.obj.GetName(), filtered.GetName())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractNestedItem(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
obj map[string]any
|
||||
fields []string
|
||||
depth int
|
||||
expected map[string]any
|
||||
}{
|
||||
{
|
||||
name: "extract simple nested item",
|
||||
obj: map[string]any{
|
||||
"listofitems": []any{
|
||||
map[string]any{
|
||||
"extract": "123",
|
||||
"dontextract": "abc",
|
||||
},
|
||||
map[string]any{
|
||||
"extract": "456",
|
||||
"dontextract": "def",
|
||||
},
|
||||
map[string]any{
|
||||
"extract": "789",
|
||||
"dontextract": "ghi",
|
||||
},
|
||||
},
|
||||
},
|
||||
fields: []string{"listofitems", "extract"},
|
||||
depth: 0,
|
||||
expected: map[string]any{
|
||||
"listofitems": []any{
|
||||
map[string]any{
|
||||
"extract": "123",
|
||||
},
|
||||
map[string]any{
|
||||
"extract": "456",
|
||||
},
|
||||
map[string]any{
|
||||
"extract": "789",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "double nested list of objects",
|
||||
obj: map[string]any{
|
||||
"listofitems": []any{
|
||||
map[string]any{
|
||||
"doublenested": []any{
|
||||
map[string]any{
|
||||
"extract": "123",
|
||||
},
|
||||
},
|
||||
"dontextract": "abc",
|
||||
},
|
||||
map[string]any{
|
||||
"doublenested": []any{
|
||||
map[string]any{
|
||||
"extract": "456",
|
||||
},
|
||||
},
|
||||
"dontextract": "def",
|
||||
},
|
||||
map[string]any{
|
||||
"doublenested": []any{
|
||||
map[string]any{
|
||||
"extract": "789",
|
||||
},
|
||||
},
|
||||
"dontextract": "ghi",
|
||||
},
|
||||
},
|
||||
},
|
||||
fields: []string{"listofitems", "doublenested", "extract"},
|
||||
depth: 0,
|
||||
expected: map[string]any{
|
||||
"listofitems": []any{
|
||||
map[string]any{
|
||||
"doublenested": []any{
|
||||
map[string]any{
|
||||
"extract": "123",
|
||||
},
|
||||
},
|
||||
},
|
||||
map[string]any{
|
||||
"doublenested": []any{
|
||||
map[string]any{
|
||||
"extract": "456",
|
||||
},
|
||||
},
|
||||
},
|
||||
map[string]any{
|
||||
"doublenested": []any{
|
||||
map[string]any{
|
||||
"extract": "789",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "depth is greater then list of field size",
|
||||
obj: map[string]any{"test1": "1234567890"},
|
||||
fields: []string{"test1"},
|
||||
depth: 4,
|
||||
expected: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
filteredObj := extractNestedItem(tt.obj, tt.fields, tt.depth)
|
||||
assert.Equal(t, tt.expected, filteredObj, "Did not get the correct filtered obj")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractItemsFromList(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
list []any
|
||||
fields []string
|
||||
expected []any
|
||||
}{
|
||||
{
|
||||
name: "test simple field",
|
||||
list: []any{
|
||||
map[string]any{"extract": "value1", "dontextract": "valueA"},
|
||||
map[string]any{"extract": "value2", "dontextract": "valueB"},
|
||||
map[string]any{"extract": "value3", "dontextract": "valueC"},
|
||||
},
|
||||
fields: []string{"extract"},
|
||||
expected: []any{
|
||||
map[string]any{"extract": "value1"},
|
||||
map[string]any{"extract": "value2"},
|
||||
map[string]any{"extract": "value3"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "test simple field with some depth",
|
||||
list: []any{
|
||||
map[string]any{
|
||||
"test1": map[string]any{
|
||||
"test2": map[string]any{
|
||||
"extract": "123",
|
||||
"dontextract": "abc",
|
||||
},
|
||||
},
|
||||
},
|
||||
map[string]any{
|
||||
"test1": map[string]any{
|
||||
"test2": map[string]any{
|
||||
"extract": "456",
|
||||
"dontextract": "def",
|
||||
},
|
||||
},
|
||||
},
|
||||
map[string]any{
|
||||
"test1": map[string]any{
|
||||
"test2": map[string]any{
|
||||
"extract": "789",
|
||||
"dontextract": "ghi",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
fields: []string{"test1", "test2", "extract"},
|
||||
expected: []any{
|
||||
map[string]any{
|
||||
"test1": map[string]any{
|
||||
"test2": map[string]any{
|
||||
"extract": "123",
|
||||
},
|
||||
},
|
||||
},
|
||||
map[string]any{
|
||||
"test1": map[string]any{
|
||||
"test2": map[string]any{
|
||||
"extract": "456",
|
||||
},
|
||||
},
|
||||
},
|
||||
map[string]any{
|
||||
"test1": map[string]any{
|
||||
"test2": map[string]any{
|
||||
"extract": "789",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "test a missing field",
|
||||
list: []any{
|
||||
map[string]any{"test1": "123"},
|
||||
map[string]any{"test1": "456"},
|
||||
map[string]any{"test1": "789"},
|
||||
},
|
||||
fields: []string{"test2"},
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
name: "test getting an object",
|
||||
list: []any{
|
||||
map[string]any{
|
||||
"extract": map[string]any{
|
||||
"keyA": "valueA",
|
||||
"keyB": "valueB",
|
||||
"keyC": "valueC",
|
||||
},
|
||||
"dontextract": map[string]any{
|
||||
"key1": "value1",
|
||||
"key2": "value2",
|
||||
"key3": "value3",
|
||||
},
|
||||
},
|
||||
map[string]any{
|
||||
"extract": map[string]any{
|
||||
"keyD": "valueD",
|
||||
"keyE": "valueE",
|
||||
"keyF": "valueF",
|
||||
},
|
||||
"dontextract": map[string]any{
|
||||
"key4": "value4",
|
||||
"key5": "value5",
|
||||
"key6": "value6",
|
||||
},
|
||||
},
|
||||
},
|
||||
fields: []string{"extract"},
|
||||
expected: []any{
|
||||
map[string]any{
|
||||
"extract": map[string]any{
|
||||
"keyA": "valueA",
|
||||
"keyB": "valueB",
|
||||
"keyC": "valueC",
|
||||
},
|
||||
},
|
||||
map[string]any{
|
||||
"extract": map[string]any{
|
||||
"keyD": "valueD",
|
||||
"keyE": "valueE",
|
||||
"keyF": "valueF",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
extractedList := extractItemsFromList(tt.list, tt.fields)
|
||||
assert.Equal(t, tt.expected, extractedList, "Lists were not equal")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestReconstructObject(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
extracted []any
|
||||
fields []string
|
||||
depth int
|
||||
expected map[string]any
|
||||
}{
|
||||
{
|
||||
name: "simple single field at depth 0",
|
||||
extracted: []any{"value1", "value2"},
|
||||
fields: []string{"items"},
|
||||
depth: 0,
|
||||
expected: map[string]any{
|
||||
"items": []any{"value1", "value2"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "object nested at depth 1",
|
||||
extracted: []any{map[string]any{"key": "value"}},
|
||||
fields: []string{"test1", "test2"},
|
||||
depth: 1,
|
||||
expected: map[string]any{
|
||||
"test1": map[string]any{
|
||||
"test2": []any{map[string]any{"key": "value"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "empty list of extracted items",
|
||||
extracted: []any{},
|
||||
fields: []string{"test1"},
|
||||
depth: 0,
|
||||
expected: map[string]any{
|
||||
"test1": []any{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "complex object nesteed at depth 2",
|
||||
extracted: []any{map[string]any{
|
||||
"obj1": map[string]any{
|
||||
"key1": "value1",
|
||||
"key2": "value2",
|
||||
},
|
||||
"obj2": map[string]any{
|
||||
"keyA": "valueA",
|
||||
"keyB": "valueB",
|
||||
},
|
||||
}},
|
||||
fields: []string{"test1", "test2", "test3"},
|
||||
depth: 2,
|
||||
expected: map[string]any{
|
||||
"test1": map[string]any{
|
||||
"test2": map[string]any{
|
||||
"test3": []any{
|
||||
map[string]any{
|
||||
"obj1": map[string]any{
|
||||
"key1": "value1",
|
||||
"key2": "value2",
|
||||
},
|
||||
"obj2": map[string]any{
|
||||
"keyA": "valueA",
|
||||
"keyB": "valueB",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
filteredObj := reconstructObject(tt.extracted, tt.fields, tt.depth)
|
||||
assert.Equal(t, tt.expected, filteredObj, "objects were not equal")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrintManifests(t *testing.T) {
|
||||
obj := unstructured.Unstructured{
|
||||
Object: map[string]any{
|
||||
"apiVersion": "vX",
|
||||
"kind": "test",
|
||||
"metadata": map[string]any{
|
||||
"name": "unit-test",
|
||||
},
|
||||
"spec": map[string]any{
|
||||
"testfield": "testvalue",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
expectedYAML := `apiVersion: vX
|
||||
kind: test
|
||||
metadata:
|
||||
name: unit-test
|
||||
spec:
|
||||
testfield: testvalue
|
||||
`
|
||||
|
||||
output, _ := captureOutput(func() error {
|
||||
printManifests(&[]unstructured.Unstructured{obj}, false, true, "yaml")
|
||||
return nil
|
||||
})
|
||||
assert.Equal(t, expectedYAML+"\n", output, "Incorrect yaml output for printManifests")
|
||||
|
||||
output, _ = captureOutput(func() error {
|
||||
printManifests(&[]unstructured.Unstructured{obj, obj}, false, true, "yaml")
|
||||
return nil
|
||||
})
|
||||
assert.Equal(t, expectedYAML+"\n---\n"+expectedYAML+"\n", output, "Incorrect yaml output with multiple objs.")
|
||||
|
||||
expectedJSON := `{
|
||||
"apiVersion": "vX",
|
||||
"kind": "test",
|
||||
"metadata": {
|
||||
"name": "unit-test"
|
||||
},
|
||||
"spec": {
|
||||
"testfield": "testvalue"
|
||||
}
|
||||
}`
|
||||
|
||||
output, _ = captureOutput(func() error {
|
||||
printManifests(&[]unstructured.Unstructured{obj}, false, true, "json")
|
||||
return nil
|
||||
})
|
||||
assert.Equal(t, expectedJSON+"\n", output, "Incorrect json output.")
|
||||
|
||||
output, _ = captureOutput(func() error {
|
||||
printManifests(&[]unstructured.Unstructured{obj, obj}, false, true, "json")
|
||||
return nil
|
||||
})
|
||||
assert.Equal(t, expectedJSON+"\n---\n"+expectedJSON+"\n", output, "Incorrect json output with multiple objs.")
|
||||
|
||||
output, _ = captureOutput(func() error {
|
||||
printManifests(&[]unstructured.Unstructured{obj}, true, true, "wide")
|
||||
return nil
|
||||
})
|
||||
assert.Contains(t, output, "FIELD RESOURCE NAME VALUE", "Missing or incorrect header line for table print with showing names.")
|
||||
assert.Contains(t, output, "apiVersion unit-test vX", "Missing or incorrect row in table related to apiVersion with showing names.")
|
||||
assert.Contains(t, output, "kind unit-test test", "Missing or incorrect line in the table related to kind with showing names.")
|
||||
assert.Contains(t, output, "spec.testfield unit-test testvalue", "Missing or incorrect line in the table related to spec.testfield with showing names.")
|
||||
assert.NotContains(t, output, "metadata.name unit-test testvalue", "Missing or incorrect line in the table related to metadata.name with showing names.")
|
||||
|
||||
output, _ = captureOutput(func() error {
|
||||
printManifests(&[]unstructured.Unstructured{obj}, true, false, "wide")
|
||||
return nil
|
||||
})
|
||||
assert.Contains(t, output, "FIELD VALUE", "Missing or incorrect header line for table print with not showing names.")
|
||||
assert.Contains(t, output, "apiVersion vX", "Missing or incorrect row in table related to apiVersion with not showing names.")
|
||||
assert.Contains(t, output, "kind test", "Missing or incorrect row in the table related to kind with not showing names.")
|
||||
assert.Contains(t, output, "spec.testfield testvalue", "Missing or incorrect row in the table related to spec.testefield with not showing names.")
|
||||
assert.NotContains(t, output, "metadata.name testvalue", "Missing or incorrect row in the tbale related to metadata.name with not showing names.")
|
||||
}
|
||||
|
||||
func TestPrintManifests_FilterNestedListObject_Wide(t *testing.T) {
|
||||
obj := unstructured.Unstructured{
|
||||
Object: map[string]any{
|
||||
"apiVersion": "vX",
|
||||
"kind": "kind",
|
||||
"metadata": map[string]any{
|
||||
"name": "unit-test",
|
||||
},
|
||||
"status": map[string]any{
|
||||
"podIPs": []map[string]any{
|
||||
{
|
||||
"IP": "127.0.0.1",
|
||||
},
|
||||
{
|
||||
"IP": "127.0.0.2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
output, _ := captureOutput(func() error {
|
||||
v, err := json.Marshal(&obj)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var obj2 *unstructured.Unstructured
|
||||
err = json.Unmarshal([]byte(v), &obj2)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
printManifests(&[]unstructured.Unstructured{*obj2}, false, true, "wide")
|
||||
return nil
|
||||
})
|
||||
|
||||
// Verify table header
|
||||
assert.Contains(t, output, "FIELD RESOURCE NAME VALUE", "Missing a line in the table")
|
||||
assert.Contains(t, output, "apiVersion unit-test vX", "Test for apiVersion field failed for wide output")
|
||||
assert.Contains(t, output, "kind unit-test kind", "Test for kind field failed for wide output")
|
||||
assert.Contains(t, output, "status.podIPs[0].IP unit-test 127.0.0.1", "Test for podIP array index 0 field failed for wide output")
|
||||
assert.Contains(t, output, "status.podIPs[1].IP unit-test 127.0.0.2", "Test for podIP array index 1 field failed for wide output")
|
||||
}
|
||||
|
||||
@@ -1,16 +1,22 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
|
||||
"gopkg.in/yaml.v3"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/cmd/argocd/commands/utils"
|
||||
"github.com/argoproj/argo-cd/v3/cmd/util"
|
||||
"github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
@@ -22,15 +28,273 @@ import (
|
||||
utilio "github.com/argoproj/argo-cd/v3/util/io"
|
||||
)
|
||||
|
||||
// NewApplicationGetResourceCommand returns a new instance of the `app get-resource` command
|
||||
func NewApplicationGetResourceCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
var (
|
||||
resourceName string
|
||||
kind string
|
||||
project string
|
||||
filteredFields []string
|
||||
showManagedFields bool
|
||||
output string
|
||||
)
|
||||
command := &cobra.Command{
|
||||
Use: "get-resource APPNAME",
|
||||
Short: "Get details about the live Kubernetes manifests of a resource in an application. The filter-fields flag can be used to only display fields you want to see.",
|
||||
Example: `
|
||||
# Get a specific resource, Pod my-app-pod, in 'my-app' by name in wide format
|
||||
argocd app get-resource my-app --kind Pod --resource-name my-app-pod
|
||||
|
||||
# Get a specific resource, Pod my-app-pod, in 'my-app' by name in yaml format
|
||||
argocd app get-resource my-app --kind Pod --resource-name my-app-pod -o yaml
|
||||
|
||||
# Get a specific resource, Pod my-app-pod, in 'my-app' by name in json format
|
||||
argocd app get-resource my-app --kind Pod --resource-name my-app-pod -o json
|
||||
|
||||
# Get details about all Pods in the application
|
||||
argocd app get-resource my-app --kind Pod
|
||||
|
||||
# Get a specific resource with managed fields, Pod my-app-pod, in 'my-app' by name in wide format
|
||||
argocd app get-resource my-app --kind Pod --resource-name my-app-pod --show-managed-fields
|
||||
|
||||
# Get the the details of a specific field in a resource in 'my-app' in the wide format
|
||||
argocd app get-resource my-app --kind Pod --filter-fields status.podIP
|
||||
|
||||
# Get the details of multiple specific fields in a specific resource in 'my-app' in the wide format
|
||||
argocd app get-resource my-app --kind Pod --resource-name my-app-pod --filter-fields status.podIP,status.hostIP`,
|
||||
}
|
||||
|
||||
command.Run = func(c *cobra.Command, args []string) {
|
||||
ctx := c.Context()
|
||||
|
||||
if len(args) != 1 {
|
||||
c.HelpFunc()(c, args)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
appName, appNs := argo.ParseFromQualifiedName(args[0], "")
|
||||
|
||||
conn, appIf := headless.NewClientOrDie(clientOpts, c).NewApplicationClientOrDie()
|
||||
defer utilio.Close(conn)
|
||||
|
||||
tree, err := appIf.ResourceTree(ctx, &applicationpkg.ResourcesQuery{
|
||||
ApplicationName: &appName,
|
||||
AppNamespace: &appNs,
|
||||
})
|
||||
errors.CheckError(err)
|
||||
|
||||
// Get manifests of resources
|
||||
// If resource name is "" find all resources of that kind
|
||||
var resources []unstructured.Unstructured
|
||||
var fetchedStr string
|
||||
for _, r := range tree.Nodes {
|
||||
if (resourceName != "" && r.Name != resourceName) || r.Kind != kind {
|
||||
continue
|
||||
}
|
||||
resource, err := appIf.GetResource(ctx, &applicationpkg.ApplicationResourceRequest{
|
||||
Name: &appName,
|
||||
AppNamespace: &appNs,
|
||||
Group: &r.Group,
|
||||
Kind: &r.Kind,
|
||||
Namespace: &r.Namespace,
|
||||
Project: &project,
|
||||
ResourceName: &r.Name,
|
||||
Version: &r.Version,
|
||||
})
|
||||
errors.CheckError(err)
|
||||
manifest := resource.GetManifest()
|
||||
|
||||
var obj *unstructured.Unstructured
|
||||
err = json.Unmarshal([]byte(manifest), &obj)
|
||||
errors.CheckError(err)
|
||||
|
||||
if !showManagedFields {
|
||||
unstructured.RemoveNestedField(obj.Object, "metadata", "managedFields")
|
||||
}
|
||||
|
||||
if len(filteredFields) != 0 {
|
||||
obj = filterFieldsFromObject(obj, filteredFields)
|
||||
}
|
||||
|
||||
fetchedStr += obj.GetName() + ", "
|
||||
resources = append(resources, *obj)
|
||||
}
|
||||
printManifests(&resources, len(filteredFields) > 0, resourceName == "", output)
|
||||
|
||||
if fetchedStr != "" {
|
||||
fetchedStr = strings.TrimSuffix(fetchedStr, ", ")
|
||||
}
|
||||
log.Infof("Resources '%s' fetched", fetchedStr)
|
||||
}
|
||||
|
||||
command.Flags().StringVar(&resourceName, "resource-name", "", "Name of resource, if none is included will output details of all resources with specified kind")
|
||||
command.Flags().StringVar(&kind, "kind", "", "Kind of resource [REQUIRED]")
|
||||
err := command.MarkFlagRequired("kind")
|
||||
errors.CheckError(err)
|
||||
command.Flags().StringVar(&project, "project", "", "Project of resource")
|
||||
command.Flags().StringSliceVar(&filteredFields, "filter-fields", nil, "A comma separated list of fields to display, if not provided will output the entire manifest")
|
||||
command.Flags().BoolVar(&showManagedFields, "show-managed-fields", false, "Show managed fields in the output manifest")
|
||||
command.Flags().StringVarP(&output, "output", "o", "wide", "Format of the output, wide, yaml, or json")
|
||||
return command
|
||||
}
|
||||
|
||||
// filterFieldsFromObject creates a new unstructured object containing only the specified fields from the source object.
|
||||
func filterFieldsFromObject(obj *unstructured.Unstructured, filteredFields []string) *unstructured.Unstructured {
|
||||
var filteredObj unstructured.Unstructured
|
||||
filteredObj.Object = make(map[string]any)
|
||||
|
||||
for _, f := range filteredFields {
|
||||
fields := strings.Split(f, ".")
|
||||
|
||||
value, exists, err := unstructured.NestedFieldCopy(obj.Object, fields...)
|
||||
if exists {
|
||||
errors.CheckError(err)
|
||||
err = unstructured.SetNestedField(filteredObj.Object, value, fields...)
|
||||
errors.CheckError(err)
|
||||
} else {
|
||||
// If doesn't exist assume its a nested inside a list of objects
|
||||
value := extractNestedItem(obj.Object, fields, 0)
|
||||
filteredObj.Object = value
|
||||
}
|
||||
}
|
||||
filteredObj.SetName(obj.GetName())
|
||||
return &filteredObj
|
||||
}
|
||||
|
||||
// extractNestedItem recursively extracts an item that may be nested inside a list of objects.
|
||||
func extractNestedItem(obj map[string]any, fields []string, depth int) map[string]any {
|
||||
if depth >= len(fields) {
|
||||
return nil
|
||||
}
|
||||
|
||||
value, exists, _ := unstructured.NestedFieldCopy(obj, fields[:depth+1]...)
|
||||
list, ok := value.([]any)
|
||||
if !exists || !ok {
|
||||
return extractNestedItem(obj, fields, depth+1)
|
||||
}
|
||||
|
||||
extractedItems := extractItemsFromList(list, fields[depth+1:])
|
||||
if len(extractedItems) == 0 {
|
||||
for _, e := range list {
|
||||
if o, ok := e.(map[string]any); ok {
|
||||
result := extractNestedItem(o, fields[depth+1:], 0)
|
||||
extractedItems = append(extractedItems, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
filteredObj := reconstructObject(extractedItems, fields, depth)
|
||||
return filteredObj
|
||||
}
|
||||
|
||||
// extractItemsFromList processes a list of objects and extracts specific fields from each item.
|
||||
func extractItemsFromList(list []any, fields []string) []any {
|
||||
var extratedObjs []any
|
||||
for _, e := range list {
|
||||
extractedObj := make(map[string]any)
|
||||
if o, ok := e.(map[string]any); ok {
|
||||
value, exists, _ := unstructured.NestedFieldCopy(o, fields...)
|
||||
if !exists {
|
||||
continue
|
||||
}
|
||||
err := unstructured.SetNestedField(extractedObj, value, fields...)
|
||||
errors.CheckError(err)
|
||||
extratedObjs = append(extratedObjs, extractedObj)
|
||||
}
|
||||
}
|
||||
return extratedObjs
|
||||
}
|
||||
|
||||
// reconstructObject rebuilds the original object structure by placing extracted items back into their proper nested location.
|
||||
func reconstructObject(extracted []any, fields []string, depth int) map[string]any {
|
||||
obj := make(map[string]any)
|
||||
err := unstructured.SetNestedField(obj, extracted, fields[:depth+1]...)
|
||||
errors.CheckError(err)
|
||||
return obj
|
||||
}
|
||||
|
||||
// printManifests outputs resource manifests in the specified format (wide, JSON, or YAML).
|
||||
func printManifests(objs *[]unstructured.Unstructured, filteredFields bool, showName bool, output string) {
|
||||
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
|
||||
if showName {
|
||||
fmt.Fprintf(w, "FIELD\tRESOURCE NAME\tVALUE\n")
|
||||
} else {
|
||||
fmt.Fprintf(w, "FIELD\tVALUE\n")
|
||||
}
|
||||
|
||||
for i, o := range *objs {
|
||||
if output == "json" || output == "yaml" {
|
||||
var formattedManifest []byte
|
||||
var err error
|
||||
if output == "json" {
|
||||
formattedManifest, err = json.MarshalIndent(o.Object, "", " ")
|
||||
} else {
|
||||
formattedManifest, err = yaml.Marshal(o.Object)
|
||||
}
|
||||
errors.CheckError(err)
|
||||
|
||||
fmt.Println(string(formattedManifest))
|
||||
if len(*objs) > 1 && i != len(*objs)-1 {
|
||||
fmt.Println("---")
|
||||
}
|
||||
} else {
|
||||
name := o.GetName()
|
||||
if filteredFields {
|
||||
unstructured.RemoveNestedField(o.Object, "metadata", "name")
|
||||
}
|
||||
|
||||
printManifestAsTable(w, name, showName, o.Object, "")
|
||||
}
|
||||
}
|
||||
|
||||
if output != "json" && output != "yaml" {
|
||||
err := w.Flush()
|
||||
errors.CheckError(err)
|
||||
}
|
||||
}
|
||||
|
||||
// printManifestAsTable recursively prints a manifest object as a tabular view with nested fields flattened.
|
||||
func printManifestAsTable(w *tabwriter.Writer, name string, showName bool, obj map[string]any, parentField string) {
|
||||
for key, value := range obj {
|
||||
field := parentField + key
|
||||
switch v := value.(type) {
|
||||
case map[string]any:
|
||||
printManifestAsTable(w, name, showName, v, field+".")
|
||||
case []any:
|
||||
for i, e := range v {
|
||||
index := "[" + strconv.Itoa(i) + "]"
|
||||
|
||||
if innerObj, ok := e.(map[string]any); ok {
|
||||
printManifestAsTable(w, name, showName, innerObj, field+index+".")
|
||||
} else {
|
||||
if showName {
|
||||
fmt.Fprintf(w, "%v\t%v\t%v\n", field+index, name, e)
|
||||
} else {
|
||||
fmt.Fprintf(w, "%v\t%v\n", field+index, e)
|
||||
}
|
||||
}
|
||||
}
|
||||
default:
|
||||
if showName {
|
||||
fmt.Fprintf(w, "%v\t%v\t%v\n", field, name, v)
|
||||
} else {
|
||||
fmt.Fprintf(w, "%v\t%v\n", field, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func NewApplicationPatchResourceCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
var patch string
|
||||
var patchType string
|
||||
var resourceName string
|
||||
var namespace string
|
||||
var kind string
|
||||
var group string
|
||||
var all bool
|
||||
var project string
|
||||
var (
|
||||
patch string
|
||||
patchType string
|
||||
resourceName string
|
||||
namespace string
|
||||
kind string
|
||||
group string
|
||||
all bool
|
||||
project string
|
||||
)
|
||||
command := &cobra.Command{
|
||||
Use: "patch-resource APPNAME",
|
||||
Short: "Patch resource in an application",
|
||||
@@ -90,14 +354,16 @@ func NewApplicationPatchResourceCommand(clientOpts *argocdclient.ClientOptions)
|
||||
}
|
||||
|
||||
func NewApplicationDeleteResourceCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
var resourceName string
|
||||
var namespace string
|
||||
var kind string
|
||||
var group string
|
||||
var force bool
|
||||
var orphan bool
|
||||
var all bool
|
||||
var project string
|
||||
var (
|
||||
resourceName string
|
||||
namespace string
|
||||
kind string
|
||||
group string
|
||||
force bool
|
||||
orphan bool
|
||||
all bool
|
||||
project string
|
||||
)
|
||||
command := &cobra.Command{
|
||||
Use: "delete-resource APPNAME",
|
||||
Short: "Delete resource in an application",
|
||||
@@ -253,13 +519,16 @@ func printResources(listAll bool, orphaned bool, appResourceTree *v1alpha1.Appli
|
||||
}
|
||||
}
|
||||
}
|
||||
_ = w.Flush()
|
||||
err := w.Flush()
|
||||
errors.CheckError(err)
|
||||
}
|
||||
|
||||
func NewApplicationListResourcesCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
var orphaned bool
|
||||
var output string
|
||||
var project string
|
||||
var (
|
||||
orphaned bool
|
||||
output string
|
||||
project string
|
||||
)
|
||||
command := &cobra.Command{
|
||||
Use: "resources APPNAME",
|
||||
Short: "List resource of application",
|
||||
|
||||
@@ -2228,10 +2228,15 @@ func (c *fakeAppServiceClient) ListResourceActions(_ context.Context, _ *applica
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// nolint:staticcheck // ResourceActionRunRequest is deprecated, but we still need to implement it to satisfy the server interface.
|
||||
func (c *fakeAppServiceClient) RunResourceAction(_ context.Context, _ *applicationpkg.ResourceActionRunRequest, _ ...grpc.CallOption) (*applicationpkg.ApplicationResponse, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (c *fakeAppServiceClient) RunResourceActionV2(_ context.Context, _ *applicationpkg.ResourceActionRunRequestV2, _ ...grpc.CallOption) (*applicationpkg.ApplicationResponse, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (c *fakeAppServiceClient) DeleteResource(_ context.Context, _ *applicationpkg.ApplicationResourceDeleteRequest, _ ...grpc.CallOption) (*applicationpkg.ApplicationResponse, error) {
|
||||
return nil, nil
|
||||
}
|
||||
@@ -2248,6 +2253,10 @@ func (c *fakeAppServiceClient) ListResourceLinks(_ context.Context, _ *applicati
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (c *fakeAppServiceClient) ServerSideDiff(_ context.Context, _ *applicationpkg.ApplicationServerSideDiffQuery, _ ...grpc.CallOption) (*applicationpkg.ApplicationServerSideDiffResponse, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
type fakeAcdClient struct {
|
||||
simulateTimeout uint
|
||||
}
|
||||
|
||||
@@ -6,6 +6,8 @@ import (
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/util/cli"
|
||||
)
|
||||
|
||||
// NewBcryptCmd represents the bcrypt command
|
||||
@@ -15,22 +17,25 @@ func NewBcryptCmd() *cobra.Command {
|
||||
Use: "bcrypt",
|
||||
Short: "Generate bcrypt hash for any password",
|
||||
Example: `# Generate bcrypt hash for any password
|
||||
argocd account bcrypt --password YOUR_PASSWORD`,
|
||||
argocd account bcrypt --password YOUR_PASSWORD
|
||||
|
||||
# Prompt for password input
|
||||
argocd account bcrypt
|
||||
|
||||
# Read password from stdin
|
||||
echo -e "password" | argocd account bcrypt`,
|
||||
Run: func(cmd *cobra.Command, _ []string) {
|
||||
password = cli.PromptPassword(password)
|
||||
bytePassword := []byte(password)
|
||||
// Hashing the password
|
||||
hash, err := bcrypt.GenerateFromPassword(bytePassword, bcrypt.DefaultCost)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to genarate bcrypt hash: %v", err)
|
||||
log.Fatalf("Failed to generate bcrypt hash: %v", err)
|
||||
}
|
||||
fmt.Fprint(cmd.OutOrStdout(), string(hash))
|
||||
},
|
||||
}
|
||||
|
||||
bcryptCmd.Flags().StringVar(&password, "password", "", "Password for which bcrypt hash is generated")
|
||||
err := bcryptCmd.MarkFlagRequired("password")
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return bcryptCmd
|
||||
}
|
||||
|
||||
@@ -2,9 +2,11 @@ package commands
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
)
|
||||
|
||||
@@ -20,3 +22,27 @@ func TestGeneratePassword(t *testing.T) {
|
||||
err = bcrypt.CompareHashAndPassword(output.Bytes(), []byte("abc"))
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestGeneratePasswordWithStdin(t *testing.T) {
|
||||
oldStdin := os.Stdin
|
||||
defer func() {
|
||||
os.Stdin = oldStdin
|
||||
}()
|
||||
|
||||
input := bytes.NewBufferString("abc\n")
|
||||
r, w, _ := os.Pipe()
|
||||
_, _ = w.Write(input.Bytes())
|
||||
w.Close()
|
||||
os.Stdin = r
|
||||
|
||||
bcryptCmd := NewBcryptCmd()
|
||||
bcryptCmd.SetArgs([]string{})
|
||||
output := new(bytes.Buffer)
|
||||
bcryptCmd.SetOut(output)
|
||||
|
||||
err := bcryptCmd.Execute()
|
||||
require.NoError(t, err)
|
||||
|
||||
err = bcrypt.CompareHashAndPassword(output.Bytes(), []byte("abc"))
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package commands
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
@@ -27,6 +28,10 @@ argocd configure --prompts-enabled=false`,
|
||||
Run: func(_ *cobra.Command, _ []string) {
|
||||
localCfg, err := localconfig.ReadLocalConfig(globalClientOpts.ConfigPath)
|
||||
errors.CheckError(err)
|
||||
if localCfg == nil {
|
||||
fmt.Println("No local configuration found")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
localCfg.PromptsEnabled = promptsEnabled
|
||||
|
||||
|
||||
@@ -185,8 +185,7 @@ argocd login cd.argoproj.io --core`,
|
||||
command.Flags().StringVar(&password, "password", "", "The password of an account to authenticate")
|
||||
command.Flags().BoolVar(&sso, "sso", false, "Perform SSO login")
|
||||
command.Flags().IntVar(&ssoPort, "sso-port", DefaultSSOLocalPort, "Port to run local OAuth2 login application")
|
||||
command.Flags().
|
||||
BoolVar(&skipTestTLS, "skip-test-tls", false, "Skip testing whether the server is configured with TLS (this can help when the command hangs for no apparent reason)")
|
||||
command.Flags().BoolVar(&skipTestTLS, "skip-test-tls", false, "Skip testing whether the server is configured with TLS (this can help when the command hangs for no apparent reason)")
|
||||
command.Flags().BoolVar(&ssoLaunchBrowser, "sso-launch-browser", true, "Automatically launch the system default browser when performing SSO login")
|
||||
return command
|
||||
}
|
||||
|
||||
@@ -19,9 +19,12 @@ func NewLogoutCommand(globalClientOpts *argocdclient.ClientOptions) *cobra.Comma
|
||||
Use: "logout CONTEXT",
|
||||
Short: "Log out from Argo CD",
|
||||
Long: "Log out from Argo CD",
|
||||
Example: `# To log out of argocd
|
||||
$ argocd logout
|
||||
Example: `# Logout from the active Argo CD context
|
||||
# This can be helpful for security reasons or when you want to switch between different Argo CD contexts or accounts.
|
||||
argocd logout CONTEXT
|
||||
|
||||
# Logout from a specific context named 'cd.argoproj.io'
|
||||
argocd logout cd.argoproj.io
|
||||
`,
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
if len(args) == 0 {
|
||||
|
||||
@@ -270,6 +270,19 @@ func NewRepoRemoveCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command
|
||||
command := &cobra.Command{
|
||||
Use: "rm REPO ...",
|
||||
Short: "Remove configured repositories",
|
||||
Example: `
|
||||
# Remove a single repository
|
||||
argocd repo rm https://github.com/yourusername/your-repo.git
|
||||
|
||||
# Remove multiple repositories
|
||||
argocd repo rm https://github.com/yourusername/your-repo.git https://git.example.com/repo2.git
|
||||
|
||||
# Remove repositories for a specific project
|
||||
argocd repo rm https://github.com/yourusername/your-repo.git --project myproject
|
||||
|
||||
# Remove repository using SSH URL
|
||||
argocd repo rm git@github.com:yourusername/your-repo.git
|
||||
`,
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
ctx := c.Context()
|
||||
|
||||
@@ -330,6 +343,25 @@ func NewRepoListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
command := &cobra.Command{
|
||||
Use: "list",
|
||||
Short: "List configured repositories",
|
||||
Example: `
|
||||
# List all repositories
|
||||
argocd repo list
|
||||
|
||||
# List repositories in wide format
|
||||
argocd repo list -o wide
|
||||
|
||||
# List repositories in YAML format
|
||||
argocd repo list -o yaml
|
||||
|
||||
# List repositories in JSON format
|
||||
argocd repo list -o json
|
||||
|
||||
# List urls of repositories
|
||||
argocd repo list -o url
|
||||
|
||||
# Force refresh of cached repository connection status
|
||||
argocd repo list --refresh hard
|
||||
`,
|
||||
Run: func(c *cobra.Command, _ []string) {
|
||||
ctx := c.Context()
|
||||
|
||||
@@ -372,9 +404,26 @@ func NewRepoGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
refresh string
|
||||
project string
|
||||
)
|
||||
|
||||
// For better readability and easier formatting
|
||||
repoGetExamples := `
|
||||
# Get Git or Helm repository details in wide format (default, '-o wide')
|
||||
argocd repo get https://git.example.com/repos/repo
|
||||
|
||||
# Get repository details in YAML format
|
||||
argocd repo get https://git.example.com/repos/repo -o yaml
|
||||
|
||||
# Get repository details in JSON format
|
||||
argocd repo get https://git.example.com/repos/repo -o json
|
||||
|
||||
# Get repository URL
|
||||
argocd repo get https://git.example.com/repos/repo -o url
|
||||
`
|
||||
|
||||
command := &cobra.Command{
|
||||
Use: "get REPO",
|
||||
Short: "Get a configured repository by URL",
|
||||
Use: "get REPO",
|
||||
Short: "Get a configured repository by URL",
|
||||
Example: repoGetExamples,
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
ctx := c.Context()
|
||||
|
||||
|
||||
@@ -20,7 +20,6 @@ import (
|
||||
reposerver "github.com/argoproj/argo-cd/v3/cmd/argocd-repo-server/commands"
|
||||
apiserver "github.com/argoproj/argo-cd/v3/cmd/argocd-server/commands"
|
||||
cli "github.com/argoproj/argo-cd/v3/cmd/argocd/commands"
|
||||
"github.com/argoproj/argo-cd/v3/cmd/util"
|
||||
"github.com/argoproj/argo-cd/v3/util/log"
|
||||
)
|
||||
|
||||
@@ -74,7 +73,6 @@ func main() {
|
||||
command = cli.NewCommand()
|
||||
isArgocdCLI = true
|
||||
}
|
||||
util.SetAutoMaxProcs(isArgocdCLI)
|
||||
|
||||
if isArgocdCLI {
|
||||
// silence errors and usages since we'll be printing them manually.
|
||||
|
||||
@@ -10,8 +10,6 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"go.uber.org/automaxprocs/maxprocs"
|
||||
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
@@ -102,19 +100,6 @@ type AppOptions struct {
|
||||
hydrateToBranch string
|
||||
}
|
||||
|
||||
// SetAutoMaxProcs sets the GOMAXPROCS value based on the binary name.
|
||||
// It suppresses logs for CLI binaries and logs the setting for services.
|
||||
func SetAutoMaxProcs(isCLI bool) {
|
||||
if isCLI {
|
||||
_, _ = maxprocs.Set() // Intentionally ignore errors for CLI binaries
|
||||
} else {
|
||||
_, err := maxprocs.Set(maxprocs.Logger(log.Infof))
|
||||
if err != nil {
|
||||
log.Errorf("Error setting GOMAXPROCS: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func AddAppFlags(command *cobra.Command, opts *AppOptions) {
|
||||
command.Flags().StringVar(&opts.repoURL, "repo", "", "Repository URL, ignored if a file is set")
|
||||
command.Flags().StringVar(&opts.appPath, "path", "", "Path in repository to the app directory, ignored if a file is set")
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package util
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"log"
|
||||
"os"
|
||||
"testing"
|
||||
@@ -573,27 +572,3 @@ func TestFilterResources(t *testing.T) {
|
||||
assert.Nil(t, filteredResources)
|
||||
})
|
||||
}
|
||||
|
||||
func TestSetAutoMaxProcs(t *testing.T) {
|
||||
t.Run("CLI mode ignores errors", func(t *testing.T) {
|
||||
logBuffer := &bytes.Buffer{}
|
||||
oldLogger := log.Default()
|
||||
log.SetOutput(logBuffer)
|
||||
defer log.SetOutput(oldLogger.Writer())
|
||||
|
||||
SetAutoMaxProcs(true)
|
||||
|
||||
assert.Empty(t, logBuffer.String(), "Expected no log output when isCLI is true")
|
||||
})
|
||||
|
||||
t.Run("Non-CLI mode logs error on failure", func(t *testing.T) {
|
||||
logBuffer := &bytes.Buffer{}
|
||||
oldLogger := log.Default()
|
||||
log.SetOutput(logBuffer)
|
||||
defer log.SetOutput(oldLogger.Writer())
|
||||
|
||||
SetAutoMaxProcs(false)
|
||||
|
||||
assert.NotContains(t, logBuffer.String(), "Error setting GOMAXPROCS", "Unexpected log output detected")
|
||||
})
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
|
||||
grpc_retry "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/retry"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
|
||||
@@ -44,11 +45,10 @@ func NewConnection(address string) (*grpc.ClientConn, error) {
|
||||
}
|
||||
unaryInterceptors := []grpc.UnaryClientInterceptor{grpc_retry.UnaryClientInterceptor(retryOpts...)}
|
||||
dialOpts := []grpc.DialOption{
|
||||
grpc.WithStreamInterceptor(grpc_retry.StreamClientInterceptor(retryOpts...)),
|
||||
grpc.WithStreamInterceptor(grpc_util.RetryOnlyForServerStreamInterceptor(retryOpts...)),
|
||||
grpc.WithChainUnaryInterceptor(unaryInterceptors...),
|
||||
grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(MaxGRPCMessageSize), grpc.MaxCallSendMsgSize(MaxGRPCMessageSize)),
|
||||
grpc.WithUnaryInterceptor(grpc_util.OTELUnaryClientInterceptor()),
|
||||
grpc.WithStreamInterceptor(grpc_util.OTELStreamClientInterceptor()),
|
||||
grpc.WithStatsHandler(otelgrpc.NewClientHandler()),
|
||||
}
|
||||
|
||||
dialOpts = append(dialOpts, grpc.WithTransportCredentials(insecure.NewCredentials()))
|
||||
|
||||
@@ -49,13 +49,11 @@ func NewServer(initConstants plugin.CMPServerInitConstants) (*ArgoCDCMPServer, e
|
||||
|
||||
serverLog := log.NewEntry(log.StandardLogger())
|
||||
streamInterceptors := []grpc.StreamServerInterceptor{
|
||||
otelgrpc.StreamServerInterceptor(), //nolint:staticcheck // TODO: ignore SA1019 for depreciation: see https://github.com/argoproj/argo-cd/issues/18258
|
||||
logging.StreamServerInterceptor(grpc_util.InterceptorLogger(serverLog)),
|
||||
serverMetrics.StreamServerInterceptor(),
|
||||
recovery.StreamServerInterceptor(recovery.WithRecoveryHandler(grpc_util.LoggerRecoveryHandler(serverLog))),
|
||||
}
|
||||
unaryInterceptors := []grpc.UnaryServerInterceptor{
|
||||
otelgrpc.UnaryServerInterceptor(), //nolint:staticcheck // TODO: ignore SA1019 for depreciation: see https://github.com/argoproj/argo-cd/issues/18258
|
||||
logging.UnaryServerInterceptor(grpc_util.InterceptorLogger(serverLog)),
|
||||
serverMetrics.UnaryServerInterceptor(),
|
||||
recovery.UnaryServerInterceptor(recovery.WithRecoveryHandler(grpc_util.LoggerRecoveryHandler(serverLog))),
|
||||
@@ -71,6 +69,7 @@ func NewServer(initConstants plugin.CMPServerInitConstants) (*ArgoCDCMPServer, e
|
||||
MinTime: common.GetGRPCKeepAliveEnforcementMinimum(),
|
||||
},
|
||||
),
|
||||
grpc.StatsHandler(otelgrpc.NewServerHandler()),
|
||||
}
|
||||
|
||||
return &ArgoCDCMPServer{
|
||||
|
||||
@@ -118,10 +118,21 @@ func (s *Service) handleCommitRequest(logCtx *log.Entry, r *apiclient.CommitHydr
|
||||
return out, "", fmt.Errorf("failed to checkout target branch: %w", err)
|
||||
}
|
||||
|
||||
logCtx.Debug("Clearing repo contents")
|
||||
out, err = gitClient.RemoveContents()
|
||||
if err != nil {
|
||||
return out, "", fmt.Errorf("failed to clear repo: %w", err)
|
||||
logCtx.Debug("Clearing and preparing paths")
|
||||
var pathsToClear []string
|
||||
for _, p := range r.Paths {
|
||||
if p.Path == "" || p.Path == "." {
|
||||
logCtx.Debug("Using root directory for manifests, no directory removal needed")
|
||||
} else {
|
||||
pathsToClear = append(pathsToClear, p.Path)
|
||||
}
|
||||
}
|
||||
if len(pathsToClear) > 0 {
|
||||
logCtx.Debugf("Clearing paths: %v", pathsToClear)
|
||||
out, err := gitClient.RemoveContents(pathsToClear)
|
||||
if err != nil {
|
||||
return out, "", fmt.Errorf("failed to clear paths %v: %w", pathsToClear, err)
|
||||
}
|
||||
}
|
||||
|
||||
logCtx.Debug("Writing manifests")
|
||||
@@ -220,6 +231,7 @@ type hydratorMetadataFile struct {
|
||||
// Subject is the subject line of the DRY commit message, i.e. `git show --format=%s`.
|
||||
Subject string `json:"subject,omitempty"`
|
||||
// Body is the body of the DRY commit message, excluding the subject line, i.e. `git show --format=%b`.
|
||||
// Known Argocd- trailers with valid values are removed, but all other trailers are kept.
|
||||
Body string `json:"body,omitempty"`
|
||||
References []v1alpha1.RevisionReference `json:"references,omitempty"`
|
||||
}
|
||||
|
||||
@@ -99,7 +99,6 @@ func Test_CommitHydratedManifests(t *testing.T) {
|
||||
mockGitClient.On("SetAuthor", "Argo CD", "argo-cd@example.com").Return("", nil).Once()
|
||||
mockGitClient.On("CheckoutOrOrphan", "env/test", false).Return("", nil).Once()
|
||||
mockGitClient.On("CheckoutOrNew", "main", "env/test", false).Return("", nil).Once()
|
||||
mockGitClient.On("RemoveContents").Return("", nil).Once()
|
||||
mockGitClient.On("CommitAndPush", "main", "test commit message").Return("", nil).Once()
|
||||
mockGitClient.On("CommitSHA").Return("it-worked!", nil).Once()
|
||||
mockRepoClientFactory.On("NewClient", mock.Anything, mock.Anything).Return(mockGitClient, nil).Once()
|
||||
@@ -109,6 +108,178 @@ func Test_CommitHydratedManifests(t *testing.T) {
|
||||
require.NotNil(t, resp)
|
||||
assert.Equal(t, "it-worked!", resp.HydratedSha)
|
||||
})
|
||||
|
||||
t.Run("root path with dot and blank - no directory removal", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
service, mockRepoClientFactory := newServiceWithMocks(t)
|
||||
mockGitClient := gitmocks.NewClient(t)
|
||||
mockGitClient.On("Init").Return(nil).Once()
|
||||
mockGitClient.On("Fetch", mock.Anything).Return(nil).Once()
|
||||
mockGitClient.On("SetAuthor", "Argo CD", "argo-cd@example.com").Return("", nil).Once()
|
||||
mockGitClient.On("CheckoutOrOrphan", "env/test", false).Return("", nil).Once()
|
||||
mockGitClient.On("CheckoutOrNew", "main", "env/test", false).Return("", nil).Once()
|
||||
mockGitClient.On("CommitAndPush", "main", "test commit message").Return("", nil).Once()
|
||||
mockGitClient.On("CommitSHA").Return("root-and-blank-sha", nil).Once()
|
||||
mockRepoClientFactory.On("NewClient", mock.Anything, mock.Anything).Return(mockGitClient, nil).Once()
|
||||
|
||||
requestWithRootAndBlank := &apiclient.CommitHydratedManifestsRequest{
|
||||
Repo: &v1alpha1.Repository{
|
||||
Repo: "https://github.com/argoproj/argocd-example-apps.git",
|
||||
},
|
||||
TargetBranch: "main",
|
||||
SyncBranch: "env/test",
|
||||
CommitMessage: "test commit message",
|
||||
Paths: []*apiclient.PathDetails{
|
||||
{
|
||||
Path: ".",
|
||||
Manifests: []*apiclient.HydratedManifestDetails{
|
||||
{
|
||||
ManifestJSON: `{"apiVersion":"v1","kind":"ConfigMap","metadata":{"name":"test-dot"}}`,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Path: "",
|
||||
Manifests: []*apiclient.HydratedManifestDetails{
|
||||
{
|
||||
ManifestJSON: `{"apiVersion":"v1","kind":"ConfigMap","metadata":{"name":"test-blank"}}`,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
resp, err := service.CommitHydratedManifests(t.Context(), requestWithRootAndBlank)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, resp)
|
||||
assert.Equal(t, "root-and-blank-sha", resp.HydratedSha)
|
||||
})
|
||||
|
||||
t.Run("subdirectory path - triggers directory removal", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
service, mockRepoClientFactory := newServiceWithMocks(t)
|
||||
mockGitClient := gitmocks.NewClient(t)
|
||||
mockGitClient.On("Init").Return(nil).Once()
|
||||
mockGitClient.On("Fetch", mock.Anything).Return(nil).Once()
|
||||
mockGitClient.On("SetAuthor", "Argo CD", "argo-cd@example.com").Return("", nil).Once()
|
||||
mockGitClient.On("CheckoutOrOrphan", "env/test", false).Return("", nil).Once()
|
||||
mockGitClient.On("CheckoutOrNew", "main", "env/test", false).Return("", nil).Once()
|
||||
mockGitClient.On("RemoveContents", []string{"apps/staging"}).Return("", nil).Once()
|
||||
mockGitClient.On("CommitAndPush", "main", "test commit message").Return("", nil).Once()
|
||||
mockGitClient.On("CommitSHA").Return("subdir-path-sha", nil).Once()
|
||||
mockRepoClientFactory.On("NewClient", mock.Anything, mock.Anything).Return(mockGitClient, nil).Once()
|
||||
|
||||
requestWithSubdirPath := &apiclient.CommitHydratedManifestsRequest{
|
||||
Repo: &v1alpha1.Repository{
|
||||
Repo: "https://github.com/argoproj/argocd-example-apps.git",
|
||||
},
|
||||
TargetBranch: "main",
|
||||
SyncBranch: "env/test",
|
||||
CommitMessage: "test commit message",
|
||||
Paths: []*apiclient.PathDetails{
|
||||
{
|
||||
Path: "apps/staging", // subdirectory path
|
||||
Manifests: []*apiclient.HydratedManifestDetails{
|
||||
{
|
||||
ManifestJSON: `{"apiVersion":"v1","kind":"Deployment","metadata":{"name":"test-app"}}`,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
resp, err := service.CommitHydratedManifests(t.Context(), requestWithSubdirPath)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, resp)
|
||||
assert.Equal(t, "subdir-path-sha", resp.HydratedSha)
|
||||
})
|
||||
|
||||
t.Run("mixed paths - root and subdirectory", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
service, mockRepoClientFactory := newServiceWithMocks(t)
|
||||
mockGitClient := gitmocks.NewClient(t)
|
||||
mockGitClient.On("Init").Return(nil).Once()
|
||||
mockGitClient.On("Fetch", mock.Anything).Return(nil).Once()
|
||||
mockGitClient.On("SetAuthor", "Argo CD", "argo-cd@example.com").Return("", nil).Once()
|
||||
mockGitClient.On("CheckoutOrOrphan", "env/test", false).Return("", nil).Once()
|
||||
mockGitClient.On("CheckoutOrNew", "main", "env/test", false).Return("", nil).Once()
|
||||
mockGitClient.On("RemoveContents", []string{"apps/production", "apps/staging"}).Return("", nil).Once()
|
||||
mockGitClient.On("CommitAndPush", "main", "test commit message").Return("", nil).Once()
|
||||
mockGitClient.On("CommitSHA").Return("mixed-paths-sha", nil).Once()
|
||||
mockRepoClientFactory.On("NewClient", mock.Anything, mock.Anything).Return(mockGitClient, nil).Once()
|
||||
|
||||
requestWithMixedPaths := &apiclient.CommitHydratedManifestsRequest{
|
||||
Repo: &v1alpha1.Repository{
|
||||
Repo: "https://github.com/argoproj/argocd-example-apps.git",
|
||||
},
|
||||
TargetBranch: "main",
|
||||
SyncBranch: "env/test",
|
||||
CommitMessage: "test commit message",
|
||||
Paths: []*apiclient.PathDetails{
|
||||
{
|
||||
Path: ".", // root path - should NOT trigger removal
|
||||
Manifests: []*apiclient.HydratedManifestDetails{
|
||||
{
|
||||
ManifestJSON: `{"apiVersion":"v1","kind":"ConfigMap","metadata":{"name":"global-config"}}`,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Path: "apps/production", // subdirectory path - SHOULD trigger removal
|
||||
Manifests: []*apiclient.HydratedManifestDetails{
|
||||
{
|
||||
ManifestJSON: `{"apiVersion":"v1","kind":"Deployment","metadata":{"name":"prod-app"}}`,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Path: "apps/staging", // another subdirectory path - SHOULD trigger removal
|
||||
Manifests: []*apiclient.HydratedManifestDetails{
|
||||
{
|
||||
ManifestJSON: `{"apiVersion":"v1","kind":"Deployment","metadata":{"name":"staging-app"}}`,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
resp, err := service.CommitHydratedManifests(t.Context(), requestWithMixedPaths)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, resp)
|
||||
assert.Equal(t, "mixed-paths-sha", resp.HydratedSha)
|
||||
})
|
||||
|
||||
t.Run("empty paths array", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
service, mockRepoClientFactory := newServiceWithMocks(t)
|
||||
mockGitClient := gitmocks.NewClient(t)
|
||||
mockGitClient.On("Init").Return(nil).Once()
|
||||
mockGitClient.On("Fetch", mock.Anything).Return(nil).Once()
|
||||
mockGitClient.On("SetAuthor", "Argo CD", "argo-cd@example.com").Return("", nil).Once()
|
||||
mockGitClient.On("CheckoutOrOrphan", "env/test", false).Return("", nil).Once()
|
||||
mockGitClient.On("CheckoutOrNew", "main", "env/test", false).Return("", nil).Once()
|
||||
mockGitClient.On("CommitAndPush", "main", "test commit message").Return("", nil).Once()
|
||||
mockGitClient.On("CommitSHA").Return("it-worked!", nil).Once()
|
||||
mockRepoClientFactory.On("NewClient", mock.Anything, mock.Anything).Return(mockGitClient, nil).Once()
|
||||
|
||||
requestWithEmptyPaths := &apiclient.CommitHydratedManifestsRequest{
|
||||
Repo: &v1alpha1.Repository{
|
||||
Repo: "https://github.com/argoproj/argocd-example-apps.git",
|
||||
},
|
||||
TargetBranch: "main",
|
||||
SyncBranch: "env/test",
|
||||
CommitMessage: "test commit message",
|
||||
}
|
||||
|
||||
resp, err := service.CommitHydratedManifests(t.Context(), requestWithEmptyPaths)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, resp)
|
||||
assert.Equal(t, "it-worked!", resp.HydratedSha)
|
||||
})
|
||||
}
|
||||
|
||||
func newServiceWithMocks(t *testing.T) (*Service, *mocks.RepoClientFactory) {
|
||||
|
||||
@@ -2,9 +2,7 @@ package commit
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
@@ -17,12 +15,17 @@ import (
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/commitserver/apiclient"
|
||||
"github.com/argoproj/argo-cd/v3/common"
|
||||
appv1 "github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v3/util/git"
|
||||
"github.com/argoproj/argo-cd/v3/util/io"
|
||||
)
|
||||
|
||||
var sprigFuncMap = sprig.GenericFuncMap() // a singleton for better performance
|
||||
|
||||
const gitAttributesContents = `*/README.md linguist-generated=true
|
||||
*/hydrator.metadata linguist-generated=true`
|
||||
|
||||
func init() {
|
||||
// Avoid allowing the user to learn things about the environment.
|
||||
delete(sprigFuncMap, "env")
|
||||
@@ -48,19 +51,27 @@ func WriteForPaths(root *os.Root, repoUrl, drySha string, dryCommitMetadata *app
|
||||
|
||||
subject, body, _ := strings.Cut(message, "\n\n")
|
||||
|
||||
_, bodyMinusTrailers := git.GetReferences(log.WithFields(log.Fields{"repo": repoUrl, "revision": drySha}), body)
|
||||
|
||||
// Write the top-level readme.
|
||||
err := writeMetadata(root, "", hydratorMetadataFile{DrySHA: drySha, RepoURL: repoUrl, Author: author, Subject: subject, Body: body, Date: date, References: references})
|
||||
err := writeMetadata(root, "", hydratorMetadataFile{DrySHA: drySha, RepoURL: repoUrl, Author: author, Subject: subject, Body: bodyMinusTrailers, Date: date, References: references})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to write top-level hydrator metadata: %w", err)
|
||||
}
|
||||
|
||||
// Write .gitattributes
|
||||
err = writeGitAttributes(root)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to write git attributes: %w", err)
|
||||
}
|
||||
|
||||
for _, p := range paths {
|
||||
hydratePath := p.Path
|
||||
if hydratePath == "." {
|
||||
hydratePath = ""
|
||||
}
|
||||
|
||||
err = mkdirAll(root, hydratePath)
|
||||
err = root.MkdirAll(hydratePath, 0o755)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create path: %w", err)
|
||||
}
|
||||
@@ -134,6 +145,30 @@ func writeReadme(root *os.Root, dirPath string, metadata hydratorMetadataFile) e
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeGitAttributes(root *os.Root) error {
|
||||
gitAttributesFile, err := root.Create(".gitattributes")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create git attributes file: %w", err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
err = gitAttributesFile.Close()
|
||||
if err != nil {
|
||||
log.WithFields(log.Fields{
|
||||
common.SecurityField: common.SecurityMedium,
|
||||
common.SecurityCWEField: common.SecurityCWEMissingReleaseOfFileDescriptor,
|
||||
}).Errorf("error closing file %q: %v", gitAttributesFile.Name(), err)
|
||||
}
|
||||
}()
|
||||
|
||||
_, err = gitAttributesFile.WriteString(gitAttributesContents)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to write git attributes: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeManifests writes the manifests to the manifest.yaml file, truncating the file if it exists and appending the
|
||||
// manifests in the order they are provided.
|
||||
func writeManifests(root *os.Root, dirPath string, manifests []*apiclient.HydratedManifestDetails) error {
|
||||
@@ -175,25 +210,3 @@ func writeManifests(root *os.Root, dirPath string, manifests []*apiclient.Hydrat
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// mkdirAll creates the directory and all its parents if they do not exist. It returns an error if the directory
|
||||
// cannot be.
|
||||
func mkdirAll(root *os.Root, dirPath string) error {
|
||||
parts := strings.Split(dirPath, string(os.PathSeparator))
|
||||
builtPath := ""
|
||||
for _, part := range parts {
|
||||
if part == "" {
|
||||
continue
|
||||
}
|
||||
builtPath = filepath.Join(builtPath, part)
|
||||
err := root.Mkdir(builtPath, os.ModePerm)
|
||||
if err != nil {
|
||||
if errors.Is(err, fs.ErrExist) {
|
||||
log.WithError(err).Warnf("path %s already exists, skipping", dirPath)
|
||||
continue
|
||||
}
|
||||
return fmt.Errorf("failed to create path: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -73,9 +72,13 @@ func TestWriteForPaths(t *testing.T) {
|
||||
|
||||
now := metav1.NewTime(time.Now())
|
||||
metadata := &appsv1.RevisionMetadata{
|
||||
Author: "test-author",
|
||||
Date: &now,
|
||||
Message: "test-message",
|
||||
Author: "test-author",
|
||||
Date: &now,
|
||||
Message: `test-message
|
||||
|
||||
Signed-off-by: Test User <test@example.com>
|
||||
Argocd-reference-commit-sha: abc123
|
||||
`,
|
||||
References: []appsv1.RevisionReference{
|
||||
{
|
||||
Commit: &appsv1.CommitMetadata{
|
||||
@@ -97,16 +100,15 @@ func TestWriteForPaths(t *testing.T) {
|
||||
topMetadataBytes, err := os.ReadFile(topMetadataPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedSubject, expectedBody, _ := strings.Cut(metadata.Message, "\n\n")
|
||||
|
||||
var topMetadata hydratorMetadataFile
|
||||
err = json.Unmarshal(topMetadataBytes, &topMetadata)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, repoURL, topMetadata.RepoURL)
|
||||
assert.Equal(t, drySha, topMetadata.DrySHA)
|
||||
assert.Equal(t, metadata.Author, topMetadata.Author)
|
||||
assert.Equal(t, expectedSubject, topMetadata.Subject)
|
||||
assert.Equal(t, expectedBody, topMetadata.Body)
|
||||
assert.Equal(t, "test-message", topMetadata.Subject)
|
||||
// The body should exclude the Argocd- trailers.
|
||||
assert.Equal(t, "Signed-off-by: Test User <test@example.com>\n", topMetadata.Body)
|
||||
assert.Equal(t, metadata.Date.Format(time.RFC3339), topMetadata.Date)
|
||||
assert.Equal(t, metadata.References, topMetadata.References)
|
||||
|
||||
@@ -221,3 +223,16 @@ func TestWriteManifests(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, string(manifestBytes), "kind")
|
||||
}
|
||||
|
||||
func TestWriteGitAttributes(t *testing.T) {
|
||||
root := tempRoot(t)
|
||||
|
||||
err := writeGitAttributes(root)
|
||||
require.NoError(t, err)
|
||||
|
||||
gitAttributesPath := filepath.Join(root.Name(), ".gitattributes")
|
||||
gitAttributesBytes, err := os.ReadFile(gitAttributesPath)
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, string(gitAttributesBytes), "*/README.md linguist-generated=true")
|
||||
assert.Contains(t, string(gitAttributesBytes), "*/hydrator.metadata linguist-generated=true")
|
||||
}
|
||||
|
||||
@@ -100,6 +100,12 @@ const (
|
||||
PluginConfigFileName = "plugin.yaml"
|
||||
)
|
||||
|
||||
// consts for podrequests metrics in cache/info
|
||||
const (
|
||||
PodRequestsCPU = "cpu"
|
||||
PodRequestsMEM = "memory"
|
||||
)
|
||||
|
||||
// Argo CD application related constants
|
||||
const (
|
||||
|
||||
@@ -186,6 +192,8 @@ const (
|
||||
LabelValueSecretTypeRepoCreds = "repo-creds"
|
||||
// LabelValueSecretTypeRepositoryWrite indicates a secret type of repository credentials for writing
|
||||
LabelValueSecretTypeRepositoryWrite = "repository-write"
|
||||
// LabelValueSecretTypeRepoCredsWrite indicates a secret type of repository credentials for writing for templating
|
||||
LabelValueSecretTypeRepoCredsWrite = "repo-write-creds"
|
||||
// LabelValueSecretTypeSCMCreds indicates a secret type of SCM credentials
|
||||
LabelValueSecretTypeSCMCreds = "scm-creds"
|
||||
|
||||
|
||||
82
common/version_test.go
Normal file
82
common/version_test.go
Normal file
@@ -0,0 +1,82 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestGetVersion(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
inputGitCommit string
|
||||
inputGitTag string
|
||||
inputTreeState string
|
||||
inputVersion string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "Official release with tag and clean state",
|
||||
inputGitCommit: "abcdef123456",
|
||||
inputGitTag: "v1.2.3",
|
||||
inputTreeState: "clean",
|
||||
inputVersion: "1.2.3",
|
||||
expected: "v1.2.3",
|
||||
},
|
||||
{
|
||||
name: "Dirty state with commit",
|
||||
inputGitCommit: "deadbeefcafebabe",
|
||||
inputGitTag: "",
|
||||
inputTreeState: "dirty",
|
||||
inputVersion: "2.0.1",
|
||||
expected: "v2.0.1+deadbee.dirty",
|
||||
},
|
||||
{
|
||||
name: "Clean state with commit, no tag",
|
||||
inputGitCommit: "cafebabedeadbeef",
|
||||
inputGitTag: "",
|
||||
inputTreeState: "clean",
|
||||
inputVersion: "2.1.0",
|
||||
expected: "v2.1.0+cafebab",
|
||||
},
|
||||
{
|
||||
name: "Missing commit and tag",
|
||||
inputGitCommit: "",
|
||||
inputGitTag: "",
|
||||
inputTreeState: "clean",
|
||||
inputVersion: "3.1.0",
|
||||
expected: "v3.1.0+unknown",
|
||||
},
|
||||
{
|
||||
name: "Short commit",
|
||||
inputGitCommit: "abc",
|
||||
inputGitTag: "",
|
||||
inputTreeState: "clean",
|
||||
inputVersion: "4.0.0",
|
||||
expected: "v4.0.0+unknown",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
gitCommit = tt.inputGitCommit
|
||||
gitTag = tt.inputGitTag
|
||||
gitTreeState = tt.inputTreeState
|
||||
version = tt.inputVersion
|
||||
|
||||
buildDate = "2025-06-26"
|
||||
kubectlVersion = "v1.30.0"
|
||||
extraBuildInfo = "test-build"
|
||||
|
||||
got := GetVersion()
|
||||
assert.Equal(t, tt.expected, got.Version)
|
||||
assert.Equal(t, buildDate, got.BuildDate)
|
||||
assert.Equal(t, tt.inputGitCommit, got.GitCommit)
|
||||
assert.Equal(t, tt.inputGitTag, got.GitTag)
|
||||
assert.Equal(t, tt.inputTreeState, got.GitTreeState)
|
||||
assert.Equal(t, runtime.Version(), got.GoVersion)
|
||||
assert.Equal(t, runtime.Compiler, got.Compiler)
|
||||
assert.Equal(t, runtime.GOOS+"/"+runtime.GOARCH, got.Platform)
|
||||
assert.Equal(t, kubectlVersion, got.KubectlVersion)
|
||||
assert.Equal(t, extraBuildInfo, got.ExtraBuildInfo)
|
||||
}
|
||||
}
|
||||
@@ -47,6 +47,7 @@ import (
|
||||
"github.com/argoproj/argo-cd/v3/common"
|
||||
statecache "github.com/argoproj/argo-cd/v3/controller/cache"
|
||||
"github.com/argoproj/argo-cd/v3/controller/hydrator"
|
||||
hydratortypes "github.com/argoproj/argo-cd/v3/controller/hydrator/types"
|
||||
"github.com/argoproj/argo-cd/v3/controller/metrics"
|
||||
"github.com/argoproj/argo-cd/v3/controller/sharding"
|
||||
"github.com/argoproj/argo-cd/v3/pkg/apis/application"
|
||||
@@ -115,7 +116,7 @@ type ApplicationController struct {
|
||||
appOperationQueue workqueue.TypedRateLimitingInterface[string]
|
||||
projectRefreshQueue workqueue.TypedRateLimitingInterface[string]
|
||||
appHydrateQueue workqueue.TypedRateLimitingInterface[string]
|
||||
hydrationQueue workqueue.TypedRateLimitingInterface[hydrator.HydrationQueueKey]
|
||||
hydrationQueue workqueue.TypedRateLimitingInterface[hydratortypes.HydrationQueueKey]
|
||||
appInformer cache.SharedIndexInformer
|
||||
appLister applisters.ApplicationLister
|
||||
projInformer cache.SharedIndexInformer
|
||||
@@ -125,7 +126,7 @@ type ApplicationController struct {
|
||||
statusHardRefreshTimeout time.Duration
|
||||
statusRefreshJitter time.Duration
|
||||
selfHealTimeout time.Duration
|
||||
selfHealBackOff *wait.Backoff
|
||||
selfHealBackoff *wait.Backoff
|
||||
selfHealBackoffCooldown time.Duration
|
||||
syncTimeout time.Duration
|
||||
db db.ArgoDB
|
||||
@@ -198,7 +199,7 @@ func NewApplicationController(
|
||||
projectRefreshQueue: workqueue.NewTypedRateLimitingQueueWithConfig(ratelimiter.NewCustomAppControllerRateLimiter[string](rateLimiterConfig), workqueue.TypedRateLimitingQueueConfig[string]{Name: "project_reconciliation_queue"}),
|
||||
appComparisonTypeRefreshQueue: workqueue.NewTypedRateLimitingQueue(ratelimiter.NewCustomAppControllerRateLimiter[string](rateLimiterConfig)),
|
||||
appHydrateQueue: workqueue.NewTypedRateLimitingQueueWithConfig(ratelimiter.NewCustomAppControllerRateLimiter[string](rateLimiterConfig), workqueue.TypedRateLimitingQueueConfig[string]{Name: "app_hydration_queue"}),
|
||||
hydrationQueue: workqueue.NewTypedRateLimitingQueueWithConfig(ratelimiter.NewCustomAppControllerRateLimiter[hydrator.HydrationQueueKey](rateLimiterConfig), workqueue.TypedRateLimitingQueueConfig[hydrator.HydrationQueueKey]{Name: "manifest_hydration_queue"}),
|
||||
hydrationQueue: workqueue.NewTypedRateLimitingQueueWithConfig(ratelimiter.NewCustomAppControllerRateLimiter[hydratortypes.HydrationQueueKey](rateLimiterConfig), workqueue.TypedRateLimitingQueueConfig[hydratortypes.HydrationQueueKey]{Name: "manifest_hydration_queue"}),
|
||||
db: db,
|
||||
statusRefreshTimeout: appResyncPeriod,
|
||||
statusHardRefreshTimeout: appHardResyncPeriod,
|
||||
@@ -208,7 +209,7 @@ func NewApplicationController(
|
||||
auditLogger: argo.NewAuditLogger(kubeClientset, common.ApplicationController, enableK8sEvent),
|
||||
settingsMgr: settingsMgr,
|
||||
selfHealTimeout: selfHealTimeout,
|
||||
selfHealBackOff: selfHealBackoff,
|
||||
selfHealBackoff: selfHealBackoff,
|
||||
selfHealBackoffCooldown: selfHealBackoffCooldown,
|
||||
syncTimeout: syncTimeout,
|
||||
clusterSharding: clusterSharding,
|
||||
@@ -328,7 +329,7 @@ func NewApplicationController(
|
||||
}
|
||||
}
|
||||
stateCache := statecache.NewLiveStateCache(db, appInformer, ctrl.settingsMgr, ctrl.metricsServer, ctrl.handleObjectUpdated, clusterSharding, argo.NewResourceTracking())
|
||||
appStateManager := NewAppStateManager(db, applicationClientset, repoClientset, namespace, kubectl, ctrl.onKubectlRun, ctrl.settingsMgr, stateCache, projInformer, ctrl.metricsServer, argoCache, ctrl.statusRefreshTimeout, argo.NewResourceTracking(), persistResourceHealth, repoErrorGracePeriod, serverSideDiff, ignoreNormalizerOpts)
|
||||
appStateManager := NewAppStateManager(db, applicationClientset, repoClientset, namespace, kubectl, ctrl.onKubectlRun, ctrl.settingsMgr, stateCache, ctrl.metricsServer, argoCache, ctrl.statusRefreshTimeout, argo.NewResourceTracking(), persistResourceHealth, repoErrorGracePeriod, serverSideDiff, ignoreNormalizerOpts)
|
||||
ctrl.appInformer = appInformer
|
||||
ctrl.appLister = appLister
|
||||
ctrl.projInformer = projInformer
|
||||
@@ -602,6 +603,9 @@ func (ctrl *ApplicationController) getResourceTree(destCluster *appv1.Cluster, a
|
||||
Group: managedResource.Group,
|
||||
Namespace: managedResource.Namespace,
|
||||
},
|
||||
Health: &appv1.HealthStatus{
|
||||
Status: health.HealthStatusMissing,
|
||||
},
|
||||
})
|
||||
} else {
|
||||
managedResourcesKeys = append(managedResourcesKeys, kube.GetResourceKey(live))
|
||||
@@ -1391,16 +1395,23 @@ func (ctrl *ApplicationController) processRequestedAppOperation(app *appv1.Appli
|
||||
logCtx = logCtx.WithField("time_ms", time.Since(ts.StartTime).Milliseconds())
|
||||
logCtx.Debug("Finished processing requested app operation")
|
||||
}()
|
||||
terminating := false
|
||||
terminatingCause := ""
|
||||
if isOperationInProgress(app) {
|
||||
state = app.Status.OperationState.DeepCopy()
|
||||
terminating = state.Phase == synccommon.OperationTerminating
|
||||
// Failed operation with retry strategy might have be in-progress and has completion time
|
||||
switch {
|
||||
case state.FinishedAt != nil && !terminating:
|
||||
case state.Phase == synccommon.OperationTerminating:
|
||||
logCtx.Infof("Resuming in-progress operation. phase: %s, message: %s", state.Phase, state.Message)
|
||||
case ctrl.syncTimeout != time.Duration(0) && time.Now().After(state.StartedAt.Add(ctrl.syncTimeout)):
|
||||
state.Phase = synccommon.OperationTerminating
|
||||
state.Message = "operation is terminating due to timeout"
|
||||
terminatingCause = "controller sync timeout"
|
||||
ctrl.setOperationState(app, state)
|
||||
logCtx.Infof("Terminating in-progress operation due to timeout. Started at: %v, timeout: %v", state.StartedAt, ctrl.syncTimeout)
|
||||
case state.Phase == synccommon.OperationRunning && state.FinishedAt != nil:
|
||||
// Failed operation with retry strategy might be in-progress and has completion time
|
||||
retryAt, err := app.Status.OperationState.Operation.Retry.NextRetryAt(state.FinishedAt.Time, state.RetryCount)
|
||||
if err != nil {
|
||||
state.Phase = synccommon.OperationFailed
|
||||
state.Phase = synccommon.OperationError
|
||||
state.Message = err.Error()
|
||||
ctrl.setOperationState(app, state)
|
||||
return
|
||||
@@ -1411,22 +1422,18 @@ func (ctrl *ApplicationController) processRequestedAppOperation(app *appv1.Appli
|
||||
ctrl.requestAppRefresh(app.QualifiedName(), CompareWithLatest.Pointer(), &retryAfter)
|
||||
return
|
||||
}
|
||||
// retrying operation. remove previous failure time in app since it is used as a trigger
|
||||
// that previous failed and operation should be retried
|
||||
state.FinishedAt = nil
|
||||
ctrl.setOperationState(app, state)
|
||||
// Get rid of sync results and null out previous operation completion time
|
||||
// This will start the retry attempt
|
||||
state.Message = fmt.Sprintf("Retrying operation. Attempt #%d", state.RetryCount)
|
||||
state.FinishedAt = nil
|
||||
state.SyncResult = nil
|
||||
case ctrl.syncTimeout != time.Duration(0) && time.Now().After(state.StartedAt.Add(ctrl.syncTimeout)) && !terminating:
|
||||
state.Phase = synccommon.OperationTerminating
|
||||
state.Message = "operation is terminating due to timeout"
|
||||
ctrl.setOperationState(app, state)
|
||||
logCtx.Infof("Terminating in-progress operation due to timeout. Started at: %v, timeout: %v", state.StartedAt, ctrl.syncTimeout)
|
||||
logCtx.Infof("Retrying operation. Attempt #%d", state.RetryCount)
|
||||
default:
|
||||
logCtx.Infof("Resuming in-progress operation. phase: %s, message: %s", state.Phase, state.Message)
|
||||
}
|
||||
} else {
|
||||
state = &appv1.OperationState{Phase: synccommon.OperationRunning, Operation: *app.Operation, StartedAt: metav1.Now()}
|
||||
state = NewOperationState(*app.Operation)
|
||||
ctrl.setOperationState(app, state)
|
||||
if ctrl.syncTimeout != time.Duration(0) {
|
||||
// Schedule a check during which the timeout would be checked.
|
||||
@@ -1436,22 +1443,16 @@ func (ctrl *ApplicationController) processRequestedAppOperation(app *appv1.Appli
|
||||
}
|
||||
ts.AddCheckpoint("initial_operation_stage_ms")
|
||||
|
||||
// Call GetDestinationCluster to validate the destination cluster.
|
||||
if _, err := argo.GetDestinationCluster(context.Background(), app.Spec.Destination, ctrl.db); err != nil {
|
||||
state.Phase = synccommon.OperationFailed
|
||||
state.Message = err.Error()
|
||||
terminating := state.Phase == synccommon.OperationTerminating
|
||||
project, err := ctrl.getAppProj(app)
|
||||
if err == nil {
|
||||
// Start or resume the sync
|
||||
ctrl.appStateManager.SyncAppState(app, project, state)
|
||||
} else {
|
||||
ctrl.appStateManager.SyncAppState(app, state)
|
||||
}
|
||||
ts.AddCheckpoint("validate_and_sync_app_state_ms")
|
||||
|
||||
// Check whether application is allowed to use project
|
||||
_, err := ctrl.getAppProj(app)
|
||||
ts.AddCheckpoint("get_app_proj_ms")
|
||||
if err != nil {
|
||||
state.Phase = synccommon.OperationError
|
||||
state.Message = err.Error()
|
||||
state.Message = fmt.Sprintf("Failed to load application project: %v", err)
|
||||
}
|
||||
ts.AddCheckpoint("sync_app_state_ms")
|
||||
|
||||
switch state.Phase {
|
||||
case synccommon.OperationRunning:
|
||||
@@ -1459,12 +1460,6 @@ func (ctrl *ApplicationController) processRequestedAppOperation(app *appv1.Appli
|
||||
// to clobber the Terminated state with Running. Get the latest app state to check for this.
|
||||
freshApp, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(app.Namespace).Get(context.Background(), app.Name, metav1.GetOptions{})
|
||||
if err == nil {
|
||||
// App may have lost permissions to use the project meanwhile.
|
||||
_, err = ctrl.getAppProj(freshApp)
|
||||
if err != nil {
|
||||
state.Phase = synccommon.OperationFailed
|
||||
state.Message = fmt.Sprintf("operation not allowed: %v", err)
|
||||
}
|
||||
if freshApp.Status.OperationState != nil && freshApp.Status.OperationState.Phase == synccommon.OperationTerminating {
|
||||
state.Phase = synccommon.OperationTerminating
|
||||
state.Message = "operation is terminating"
|
||||
@@ -1476,17 +1471,24 @@ func (ctrl *ApplicationController) processRequestedAppOperation(app *appv1.Appli
|
||||
case synccommon.OperationFailed, synccommon.OperationError:
|
||||
if !terminating && (state.RetryCount < state.Operation.Retry.Limit || state.Operation.Retry.Limit < 0) {
|
||||
now := metav1.Now()
|
||||
state.FinishedAt = &now
|
||||
if retryAt, err := state.Operation.Retry.NextRetryAt(now.Time, state.RetryCount); err != nil {
|
||||
state.Phase = synccommon.OperationFailed
|
||||
state.Phase = synccommon.OperationError
|
||||
state.Message = fmt.Sprintf("%s (failed to retry: %v)", state.Message, err)
|
||||
} else {
|
||||
// Set FinishedAt explicitly on a Running phase. This is a unique condition that will allow this
|
||||
// function to perform a retry the next time the operation is processed.
|
||||
state.Phase = synccommon.OperationRunning
|
||||
state.FinishedAt = &now
|
||||
state.RetryCount++
|
||||
state.Message = fmt.Sprintf("%s. Retrying attempt #%d at %s.", state.Message, state.RetryCount, retryAt.Format(time.Kitchen))
|
||||
}
|
||||
} else if state.RetryCount > 0 {
|
||||
state.Message = fmt.Sprintf("%s (retried %d times).", state.Message, state.RetryCount)
|
||||
} else {
|
||||
if terminating && terminatingCause != "" {
|
||||
state.Message = fmt.Sprintf("%s, triggered by %s", state.Message, terminatingCause)
|
||||
}
|
||||
if state.RetryCount > 0 {
|
||||
state.Message = fmt.Sprintf("%s (retried %d times).", state.Message, state.RetryCount)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1760,7 +1762,7 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
}
|
||||
|
||||
compareResult, err := ctrl.appStateManager.CompareAppState(app, project, revisions, sources, refreshType == appv1.RefreshTypeHard, comparisonLevel == CompareWithLatestForceResolve, localManifests, hasMultipleSources, false)
|
||||
compareResult, err := ctrl.appStateManager.CompareAppState(app, project, revisions, sources, refreshType == appv1.RefreshTypeHard, comparisonLevel == CompareWithLatestForceResolve, localManifests, hasMultipleSources)
|
||||
|
||||
ts.AddCheckpoint("compare_app_state_ms")
|
||||
|
||||
@@ -1786,7 +1788,7 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo
|
||||
|
||||
canSync, _ := project.Spec.SyncWindows.Matches(app).CanSync(false)
|
||||
if canSync {
|
||||
syncErrCond, opDuration := ctrl.autoSync(app, compareResult.syncStatus, compareResult.resources, compareResult.revisionUpdated)
|
||||
syncErrCond, opDuration := ctrl.autoSync(app, compareResult.syncStatus, compareResult.resources, compareResult.revisionsMayHaveChanges)
|
||||
setOpDuration = opDuration
|
||||
if syncErrCond != nil {
|
||||
app.Status.SetConditions(
|
||||
@@ -2081,7 +2083,7 @@ func (ctrl *ApplicationController) persistAppStatus(orig *appv1.Application, new
|
||||
}
|
||||
|
||||
// autoSync will initiate a sync operation for an application configured with automated sync
|
||||
func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus *appv1.SyncStatus, resources []appv1.ResourceStatus, revisionUpdated bool) (*appv1.ApplicationCondition, time.Duration) {
|
||||
func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus *appv1.SyncStatus, resources []appv1.ResourceStatus, shouldCompareRevisions bool) (*appv1.ApplicationCondition, time.Duration) {
|
||||
logCtx := log.WithFields(applog.GetAppLogFields(app))
|
||||
ts := stats.NewTimingStats()
|
||||
defer func() {
|
||||
@@ -2125,65 +2127,66 @@ func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus *
|
||||
}
|
||||
}
|
||||
|
||||
selfHeal := app.Spec.SyncPolicy.Automated.SelfHeal
|
||||
// Multi-Source Apps with selfHeal disabled should not trigger an autosync if
|
||||
// the last sync revision and the new sync revision is the same.
|
||||
if app.Spec.HasMultipleSources() && !selfHeal && reflect.DeepEqual(app.Status.Sync.Revisions, syncStatus.Revisions) {
|
||||
logCtx.Infof("Skipping auto-sync: selfHeal disabled and sync caused by object update")
|
||||
return nil, 0
|
||||
desiredRevisions := []string{syncStatus.Revision}
|
||||
if app.Spec.HasMultipleSources() {
|
||||
desiredRevisions = syncStatus.Revisions
|
||||
}
|
||||
|
||||
desiredCommitSHA := syncStatus.Revision
|
||||
desiredCommitSHAsMS := syncStatus.Revisions
|
||||
alreadyAttempted, attemptPhase := alreadyAttemptedSync(app, desiredCommitSHA, desiredCommitSHAsMS, app.Spec.HasMultipleSources(), revisionUpdated)
|
||||
ts.AddCheckpoint("already_attempted_sync_ms")
|
||||
op := appv1.Operation{
|
||||
Sync: &appv1.SyncOperation{
|
||||
Revision: desiredCommitSHA,
|
||||
Revision: syncStatus.Revision,
|
||||
Prune: app.Spec.SyncPolicy.Automated.Prune,
|
||||
SyncOptions: app.Spec.SyncPolicy.SyncOptions,
|
||||
Revisions: desiredCommitSHAsMS,
|
||||
Revisions: syncStatus.Revisions,
|
||||
},
|
||||
InitiatedBy: appv1.OperationInitiator{Automated: true},
|
||||
Retry: appv1.RetryStrategy{Limit: 5},
|
||||
}
|
||||
|
||||
if app.Spec.SyncPolicy.Retry != nil {
|
||||
op.Retry = *app.Spec.SyncPolicy.Retry
|
||||
}
|
||||
|
||||
// It is possible for manifests to remain OutOfSync even after a sync/kubectl apply (e.g.
|
||||
// auto-sync with pruning disabled). We need to ensure that we do not keep Syncing an
|
||||
// application in an infinite loop. To detect this, we only attempt the Sync if the revision
|
||||
// and parameter overrides are different from our most recent sync operation.
|
||||
if alreadyAttempted && (!selfHeal || !attemptPhase.Successful()) {
|
||||
if !attemptPhase.Successful() {
|
||||
logCtx.Warnf("Skipping auto-sync: failed previous sync attempt to %s", desiredCommitSHA)
|
||||
message := fmt.Sprintf("Failed sync attempt to %s: %s", desiredCommitSHA, app.Status.OperationState.Message)
|
||||
alreadyAttempted, lastAttemptedRevisions, lastAttemptedPhase := alreadyAttemptedSync(app, desiredRevisions, shouldCompareRevisions)
|
||||
ts.AddCheckpoint("already_attempted_sync_ms")
|
||||
if alreadyAttempted {
|
||||
if !lastAttemptedPhase.Successful() {
|
||||
logCtx.Warnf("Skipping auto-sync: failed previous sync attempt to %s and will not retry for %s", lastAttemptedRevisions, desiredRevisions)
|
||||
message := fmt.Sprintf("Failed last sync attempt to %s: %s", lastAttemptedRevisions, app.Status.OperationState.Message)
|
||||
return &appv1.ApplicationCondition{Type: appv1.ApplicationConditionSyncError, Message: message}, 0
|
||||
}
|
||||
logCtx.Infof("Skipping auto-sync: most recent sync already to %s", desiredCommitSHA)
|
||||
return nil, 0
|
||||
} else if selfHeal {
|
||||
shouldSelfHeal, retryAfter := ctrl.shouldSelfHeal(app, alreadyAttempted)
|
||||
if app.Status.OperationState != nil && app.Status.OperationState.Operation.Sync != nil {
|
||||
op.Sync.SelfHealAttemptsCount = app.Status.OperationState.Operation.Sync.SelfHealAttemptsCount
|
||||
if !app.Spec.SyncPolicy.Automated.SelfHeal {
|
||||
logCtx.Infof("Skipping auto-sync: most recent sync already to %s", desiredRevisions)
|
||||
return nil, 0
|
||||
}
|
||||
// Self heal will trigger a new sync operation when the desired state changes and cause the application to
|
||||
// be OutOfSync when it was previously synced Successfully. This means SelfHeal should only ever be attempted
|
||||
// when the revisions have not changed, and where the previous sync to these revision was successful
|
||||
|
||||
// Only carry SelfHealAttemptsCount to be increased when the selfHealBackoffCooldown has not elapsed yet
|
||||
if !ctrl.selfHealBackoffCooldownElapsed(app) {
|
||||
if app.Status.OperationState != nil && app.Status.OperationState.Operation.Sync != nil {
|
||||
op.Sync.SelfHealAttemptsCount = app.Status.OperationState.Operation.Sync.SelfHealAttemptsCount
|
||||
}
|
||||
}
|
||||
|
||||
if alreadyAttempted {
|
||||
if !shouldSelfHeal {
|
||||
logCtx.Infof("Skipping auto-sync: already attempted sync to %s with timeout %v (retrying in %v)", desiredCommitSHA, ctrl.selfHealTimeout, retryAfter)
|
||||
ctrl.requestAppRefresh(app.QualifiedName(), CompareWithLatest.Pointer(), &retryAfter)
|
||||
return nil, 0
|
||||
}
|
||||
op.Sync.SelfHealAttemptsCount++
|
||||
for _, resource := range resources {
|
||||
if resource.Status != appv1.SyncStatusCodeSynced {
|
||||
op.Sync.Resources = append(op.Sync.Resources, appv1.SyncOperationResource{
|
||||
Kind: resource.Kind,
|
||||
Group: resource.Group,
|
||||
Name: resource.Name,
|
||||
})
|
||||
}
|
||||
if remainingTime := ctrl.selfHealRemainingBackoff(app, int(op.Sync.SelfHealAttemptsCount)); remainingTime > 0 {
|
||||
logCtx.Infof("Skipping auto-sync: already attempted sync to %s with timeout %v (retrying in %v)", lastAttemptedRevisions, ctrl.selfHealTimeout, remainingTime)
|
||||
ctrl.requestAppRefresh(app.QualifiedName(), CompareWithLatest.Pointer(), &remainingTime)
|
||||
return nil, 0
|
||||
}
|
||||
|
||||
op.Sync.SelfHealAttemptsCount++
|
||||
for _, resource := range resources {
|
||||
if resource.Status != appv1.SyncStatusCodeSynced {
|
||||
op.Sync.Resources = append(op.Sync.Resources, appv1.SyncOperationResource{
|
||||
Kind: resource.Kind,
|
||||
Group: resource.Group,
|
||||
Name: resource.Name,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2197,7 +2200,7 @@ func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus *
|
||||
}
|
||||
}
|
||||
if bAllNeedPrune {
|
||||
message := fmt.Sprintf("Skipping sync attempt to %s: auto-sync will wipe out all resources", desiredCommitSHA)
|
||||
message := fmt.Sprintf("Skipping sync attempt to %s: auto-sync will wipe out all resources", desiredRevisions)
|
||||
logCtx.Warn(message)
|
||||
return &appv1.ApplicationCondition{Type: appv1.ApplicationConditionSyncError, Message: message}, 0
|
||||
}
|
||||
@@ -2213,62 +2216,65 @@ func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus *
|
||||
if stderrors.Is(err, argo.ErrAnotherOperationInProgress) {
|
||||
// skipping auto-sync because another operation is in progress and was not noticed due to stale data in informer
|
||||
// it is safe to skip auto-sync because it is already running
|
||||
logCtx.Warnf("Failed to initiate auto-sync to %s: %v", desiredCommitSHA, err)
|
||||
logCtx.Warnf("Failed to initiate auto-sync to %s: %v", desiredRevisions, err)
|
||||
return nil, 0
|
||||
}
|
||||
|
||||
logCtx.Errorf("Failed to initiate auto-sync to %s: %v", desiredCommitSHA, err)
|
||||
logCtx.Errorf("Failed to initiate auto-sync to %s: %v", desiredRevisions, err)
|
||||
return &appv1.ApplicationCondition{Type: appv1.ApplicationConditionSyncError, Message: err.Error()}, setOpTime
|
||||
}
|
||||
ctrl.writeBackToInformer(updatedApp)
|
||||
ts.AddCheckpoint("write_back_to_informer_ms")
|
||||
|
||||
var target string
|
||||
if updatedApp.Spec.HasMultipleSources() {
|
||||
target = strings.Join(desiredCommitSHAsMS, ", ")
|
||||
} else {
|
||||
target = desiredCommitSHA
|
||||
}
|
||||
message := fmt.Sprintf("Initiated automated sync to '%s'", target)
|
||||
message := fmt.Sprintf("Initiated automated sync to %s", desiredRevisions)
|
||||
ctrl.logAppEvent(context.TODO(), app, argo.EventInfo{Reason: argo.EventReasonOperationStarted, Type: corev1.EventTypeNormal}, message)
|
||||
logCtx.Info(message)
|
||||
return nil, setOpTime
|
||||
}
|
||||
|
||||
// alreadyAttemptedSync returns whether the most recent sync was performed against the
|
||||
// commitSHA and with the same app source config which are currently set in the app.
|
||||
func alreadyAttemptedSync(app *appv1.Application, commitSHA string, commitSHAsMS []string, hasMultipleSources bool, revisionUpdated bool) (bool, synccommon.OperationPhase) {
|
||||
if app.Status.OperationState == nil || app.Status.OperationState.Operation.Sync == nil || app.Status.OperationState.SyncResult == nil {
|
||||
return false, ""
|
||||
// alreadyAttemptedSync returns whether the most recently synced revision(s) exactly match the given desiredRevisions
|
||||
// and for the same application source. If the revision(s) have changed or the Application source configuration has been updated,
|
||||
// it will return false, indicating that a new sync should be attempted.
|
||||
// When newRevisionHasChanges is false, due to commits not having direct changes on the application, it will not compare the revision(s), but only the sources.
|
||||
// It also returns the last synced revisions if any, and the result of that last sync operation.
|
||||
func alreadyAttemptedSync(app *appv1.Application, desiredRevisions []string, newRevisionHasChanges bool) (bool, []string, synccommon.OperationPhase) {
|
||||
if app.Status.OperationState == nil {
|
||||
// The operation state may be removed when new operations are triggered
|
||||
return false, []string{}, ""
|
||||
}
|
||||
if hasMultipleSources {
|
||||
if revisionUpdated {
|
||||
if !reflect.DeepEqual(app.Status.OperationState.SyncResult.Revisions, commitSHAsMS) {
|
||||
return false, ""
|
||||
if app.Status.OperationState.SyncResult == nil {
|
||||
// If the sync has completed without result, it is very likely that an error happened
|
||||
// We don't want to resync with auto-sync indefinitely. We should have retried the configured amount of time already
|
||||
// In this case, a manual action to restore the app may be required
|
||||
log.WithFields(applog.GetAppLogFields(app)).Warn("Already attempted sync: sync does not have any results")
|
||||
return app.Status.OperationState.Phase.Completed(), []string{}, app.Status.OperationState.Phase
|
||||
}
|
||||
|
||||
if newRevisionHasChanges {
|
||||
log.WithFields(applog.GetAppLogFields(app)).Infof("Already attempted sync: comparing synced revisions to %s", desiredRevisions)
|
||||
if app.Spec.HasMultipleSources() {
|
||||
if !reflect.DeepEqual(app.Status.OperationState.SyncResult.Revisions, desiredRevisions) {
|
||||
return false, app.Status.OperationState.SyncResult.Revisions, app.Status.OperationState.Phase
|
||||
}
|
||||
} else {
|
||||
log.WithFields(applog.GetAppLogFields(app)).Debugf("Skipping auto-sync: commitSHA %s has no changes", commitSHA)
|
||||
if len(desiredRevisions) != 1 || app.Status.OperationState.SyncResult.Revision != desiredRevisions[0] {
|
||||
return false, []string{app.Status.OperationState.SyncResult.Revision}, app.Status.OperationState.Phase
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if revisionUpdated {
|
||||
log.WithFields(applog.GetAppLogFields(app)).Infof("Executing compare of syncResult.Revision and commitSha because manifest changed: %v", commitSHA)
|
||||
if app.Status.OperationState.SyncResult.Revision != commitSHA {
|
||||
return false, ""
|
||||
}
|
||||
} else {
|
||||
log.WithFields(applog.GetAppLogFields(app)).Debugf("Skipping auto-sync: commitSHA %s has no changes", commitSHA)
|
||||
}
|
||||
log.WithFields(applog.GetAppLogFields(app)).Debugf("Already attempted sync: revisions %s have no changes", desiredRevisions)
|
||||
}
|
||||
|
||||
if hasMultipleSources {
|
||||
return reflect.DeepEqual(app.Spec.Sources, app.Status.OperationState.SyncResult.Sources), app.Status.OperationState.Phase
|
||||
log.WithFields(applog.GetAppLogFields(app)).Debug("Already attempted sync: comparing sources")
|
||||
if app.Spec.HasMultipleSources() {
|
||||
return reflect.DeepEqual(app.Spec.Sources, app.Status.OperationState.SyncResult.Sources), app.Status.OperationState.SyncResult.Revisions, app.Status.OperationState.Phase
|
||||
}
|
||||
return reflect.DeepEqual(app.Spec.GetSource(), app.Status.OperationState.SyncResult.Source), app.Status.OperationState.Phase
|
||||
return reflect.DeepEqual(app.Spec.GetSource(), app.Status.OperationState.SyncResult.Source), []string{app.Status.OperationState.SyncResult.Revision}, app.Status.OperationState.Phase
|
||||
}
|
||||
|
||||
func (ctrl *ApplicationController) shouldSelfHeal(app *appv1.Application, alreadyAttempted bool) (bool, time.Duration) {
|
||||
func (ctrl *ApplicationController) selfHealRemainingBackoff(app *appv1.Application, selfHealAttemptsCount int) time.Duration {
|
||||
if app.Status.OperationState == nil {
|
||||
return true, time.Duration(0)
|
||||
return time.Duration(0)
|
||||
}
|
||||
|
||||
var timeSinceOperation *time.Duration
|
||||
@@ -2276,34 +2282,41 @@ func (ctrl *ApplicationController) shouldSelfHeal(app *appv1.Application, alread
|
||||
timeSinceOperation = ptr.To(time.Since(app.Status.OperationState.FinishedAt.Time))
|
||||
}
|
||||
|
||||
// Reset counter if the prior sync was successful and the cooldown period is over OR if the revision has changed
|
||||
if !alreadyAttempted || (timeSinceOperation != nil && *timeSinceOperation >= ctrl.selfHealBackoffCooldown && app.Status.Sync.Status == appv1.SyncStatusCodeSynced) {
|
||||
app.Status.OperationState.Operation.Sync.SelfHealAttemptsCount = 0
|
||||
}
|
||||
|
||||
var retryAfter time.Duration
|
||||
if ctrl.selfHealBackOff == nil {
|
||||
if ctrl.selfHealBackoff == nil {
|
||||
if timeSinceOperation == nil {
|
||||
retryAfter = ctrl.selfHealTimeout
|
||||
} else {
|
||||
retryAfter = ctrl.selfHealTimeout - *timeSinceOperation
|
||||
}
|
||||
} else {
|
||||
backOff := *ctrl.selfHealBackOff
|
||||
backOff.Steps = int(app.Status.OperationState.Operation.Sync.SelfHealAttemptsCount)
|
||||
backOff := *ctrl.selfHealBackoff
|
||||
backOff.Steps = selfHealAttemptsCount
|
||||
var delay time.Duration
|
||||
steps := backOff.Steps
|
||||
for i := 0; i < steps; i++ {
|
||||
delay = backOff.Step()
|
||||
}
|
||||
|
||||
if timeSinceOperation == nil {
|
||||
retryAfter = delay
|
||||
} else {
|
||||
retryAfter = delay - *timeSinceOperation
|
||||
}
|
||||
}
|
||||
return retryAfter <= 0, retryAfter
|
||||
return retryAfter
|
||||
}
|
||||
|
||||
// selfHealBackoffCooldownElapsed returns true when the last successful sync has occurred since longer
|
||||
// than then self heal cooldown. This means that the application has been in sync for long enough to
|
||||
// reset the self healing backoff to its initial state
|
||||
func (ctrl *ApplicationController) selfHealBackoffCooldownElapsed(app *appv1.Application) bool {
|
||||
if app.Status.OperationState == nil || app.Status.OperationState.FinishedAt == nil {
|
||||
// Something is in progress, or about to be. In that case, selfHeal attempt should be zero anyway
|
||||
return true
|
||||
}
|
||||
|
||||
timeSinceLastOperation := time.Since(app.Status.OperationState.FinishedAt.Time)
|
||||
return timeSinceLastOperation >= ctrl.selfHealBackoffCooldown && app.Status.OperationState.Phase.Successful()
|
||||
}
|
||||
|
||||
// isAppNamespaceAllowed returns whether the application is allowed in the
|
||||
|
||||
@@ -95,10 +95,10 @@ func (m *MockKubectl) DeleteResource(ctx context.Context, config *rest.Config, g
|
||||
}
|
||||
|
||||
func newFakeController(data *fakeData, repoErr error) *ApplicationController {
|
||||
return newFakeControllerWithResync(data, time.Minute, repoErr)
|
||||
return newFakeControllerWithResync(data, time.Minute, repoErr, nil)
|
||||
}
|
||||
|
||||
func newFakeControllerWithResync(data *fakeData, appResyncPeriod time.Duration, repoErr error) *ApplicationController {
|
||||
func newFakeControllerWithResync(data *fakeData, appResyncPeriod time.Duration, repoErr, revisionPathsErr error) *ApplicationController {
|
||||
var clust corev1.Secret
|
||||
err := yaml.Unmarshal([]byte(fakeCluster), &clust)
|
||||
if err != nil {
|
||||
@@ -124,7 +124,11 @@ func newFakeControllerWithResync(data *fakeData, appResyncPeriod time.Duration,
|
||||
}
|
||||
}
|
||||
|
||||
mockRepoClient.On("UpdateRevisionForPaths", mock.Anything, mock.Anything).Return(data.updateRevisionForPathsResponse, nil)
|
||||
if revisionPathsErr != nil {
|
||||
mockRepoClient.On("UpdateRevisionForPaths", mock.Anything, mock.Anything).Return(nil, revisionPathsErr)
|
||||
} else {
|
||||
mockRepoClient.On("UpdateRevisionForPaths", mock.Anything, mock.Anything).Return(data.updateRevisionForPathsResponse, nil)
|
||||
}
|
||||
|
||||
mockRepoClientset := mockrepoclient.Clientset{RepoServerServiceClient: &mockRepoClient}
|
||||
|
||||
@@ -344,10 +348,13 @@ status:
|
||||
- cccccccccccccccccccccccccccccccccccccccc
|
||||
sources:
|
||||
- path: some/path
|
||||
helm:
|
||||
valueFiles:
|
||||
- $values_test/values.yaml
|
||||
repoURL: https://github.com/argoproj/argocd-example-apps.git
|
||||
- path: some/other/path
|
||||
repoURL: https://github.com/argoproj/argocd-example-apps-fake.git
|
||||
- path: some/other/path
|
||||
- ref: values_test
|
||||
repoURL: https://github.com/argoproj/argocd-example-apps-fake-ref.git
|
||||
`
|
||||
|
||||
@@ -621,13 +628,13 @@ func TestAutoSyncEnabledSetToTrue(t *testing.T) {
|
||||
assert.False(t, app.Operation.Sync.Prune)
|
||||
}
|
||||
|
||||
func TestMultiSourceSelfHeal(t *testing.T) {
|
||||
func TestAutoSyncMultiSourceWithoutSelfHeal(t *testing.T) {
|
||||
// Simulate OutOfSync caused by object change in cluster
|
||||
// So our Sync Revisions and SyncStatus Revisions should deep equal
|
||||
t.Run("ClusterObjectChangeShouldNotTriggerAutoSync", func(t *testing.T) {
|
||||
app := newFakeMultiSourceApp()
|
||||
app.Spec.SyncPolicy.Automated.SelfHeal = false
|
||||
app.Status.Sync.Revisions = []string{"z", "x", "v"}
|
||||
app.Status.OperationState.SyncResult.Revisions = []string{"z", "x", "v"}
|
||||
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}, nil)
|
||||
syncStatus := v1alpha1.SyncStatus{
|
||||
Status: v1alpha1.SyncStatusCodeOutOfSync,
|
||||
@@ -639,15 +646,14 @@ func TestMultiSourceSelfHeal(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, app.Operation)
|
||||
})
|
||||
|
||||
t.Run("NewRevisionChangeShouldTriggerAutoSync", func(t *testing.T) {
|
||||
app := newFakeMultiSourceApp()
|
||||
app.Spec.SyncPolicy.Automated.SelfHeal = false
|
||||
app.Status.Sync.Revisions = []string{"a", "b", "c"}
|
||||
app.Status.OperationState.SyncResult.Revisions = []string{"z", "x", "v"}
|
||||
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}, nil)
|
||||
syncStatus := v1alpha1.SyncStatus{
|
||||
Status: v1alpha1.SyncStatusCodeOutOfSync,
|
||||
Revisions: []string{"z", "x", "v"},
|
||||
Revisions: []string{"a", "b", "c"},
|
||||
}
|
||||
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{{Name: "guestbook-1", Kind: kube.DeploymentKind, Status: v1alpha1.SyncStatusCodeOutOfSync}}, true)
|
||||
assert.Nil(t, cond)
|
||||
@@ -790,6 +796,30 @@ func TestSkipAutoSync(t *testing.T) {
|
||||
assert.Nil(t, app.Operation)
|
||||
})
|
||||
|
||||
t.Run("PreviousSyncAttemptError", func(t *testing.T) {
|
||||
app := newFakeApp()
|
||||
app.Status.OperationState = &v1alpha1.OperationState{
|
||||
Operation: v1alpha1.Operation{
|
||||
Sync: &v1alpha1.SyncOperation{},
|
||||
},
|
||||
Phase: synccommon.OperationError,
|
||||
SyncResult: &v1alpha1.SyncOperationResult{
|
||||
Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
|
||||
Source: *app.Spec.Source.DeepCopy(),
|
||||
},
|
||||
}
|
||||
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}, nil)
|
||||
syncStatus := v1alpha1.SyncStatus{
|
||||
Status: v1alpha1.SyncStatusCodeOutOfSync,
|
||||
Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
|
||||
}
|
||||
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{{Name: "guestbook", Kind: kube.DeploymentKind, Status: v1alpha1.SyncStatusCodeOutOfSync}}, true)
|
||||
assert.NotNil(t, cond)
|
||||
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get(t.Context(), "my-app", metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, app.Operation)
|
||||
})
|
||||
|
||||
t.Run("NeedsToPruneResourcesOnlyButAutomatedPruneDisabled", func(t *testing.T) {
|
||||
app := newFakeApp()
|
||||
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}, nil)
|
||||
@@ -844,45 +874,78 @@ func TestAutoSyncIndicateError(t *testing.T) {
|
||||
|
||||
// TestAutoSyncParameterOverrides verifies we auto-sync if revision is same but parameter overrides are different
|
||||
func TestAutoSyncParameterOverrides(t *testing.T) {
|
||||
app := newFakeApp()
|
||||
app.Spec.Source.Helm = &v1alpha1.ApplicationSourceHelm{
|
||||
Parameters: []v1alpha1.HelmParameter{
|
||||
{
|
||||
Name: "a",
|
||||
Value: "1",
|
||||
t.Run("Single source", func(t *testing.T) {
|
||||
app := newFakeApp()
|
||||
app.Spec.Source.Helm = &v1alpha1.ApplicationSourceHelm{
|
||||
Parameters: []v1alpha1.HelmParameter{
|
||||
{
|
||||
Name: "a",
|
||||
Value: "1",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}, nil)
|
||||
syncStatus := v1alpha1.SyncStatus{
|
||||
Status: v1alpha1.SyncStatusCodeOutOfSync,
|
||||
Revision: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
|
||||
}
|
||||
app.Status.OperationState = &v1alpha1.OperationState{
|
||||
Operation: v1alpha1.Operation{
|
||||
Sync: &v1alpha1.SyncOperation{
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
Helm: &v1alpha1.ApplicationSourceHelm{
|
||||
Parameters: []v1alpha1.HelmParameter{
|
||||
{
|
||||
Name: "a",
|
||||
Value: "2", // this value changed
|
||||
}
|
||||
app.Status.OperationState = &v1alpha1.OperationState{
|
||||
Operation: v1alpha1.Operation{
|
||||
Sync: &v1alpha1.SyncOperation{
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
Helm: &v1alpha1.ApplicationSourceHelm{
|
||||
Parameters: []v1alpha1.HelmParameter{
|
||||
{
|
||||
Name: "a",
|
||||
Value: "2", // this value changed
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Phase: synccommon.OperationFailed,
|
||||
SyncResult: &v1alpha1.SyncOperationResult{
|
||||
Phase: synccommon.OperationFailed,
|
||||
SyncResult: &v1alpha1.SyncOperationResult{
|
||||
Revision: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
|
||||
},
|
||||
}
|
||||
syncStatus := v1alpha1.SyncStatus{
|
||||
Status: v1alpha1.SyncStatusCodeOutOfSync,
|
||||
Revision: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
|
||||
},
|
||||
}
|
||||
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{{Name: "guestbook", Kind: kube.DeploymentKind, Status: v1alpha1.SyncStatusCodeOutOfSync}}, true)
|
||||
assert.Nil(t, cond)
|
||||
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get(t.Context(), "my-app", metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, app.Operation)
|
||||
}
|
||||
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}, nil)
|
||||
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{{Name: "guestbook", Kind: kube.DeploymentKind, Status: v1alpha1.SyncStatusCodeOutOfSync}}, true)
|
||||
assert.Nil(t, cond)
|
||||
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get(t.Context(), "my-app", metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, app.Operation)
|
||||
})
|
||||
|
||||
t.Run("Multi sources", func(t *testing.T) {
|
||||
app := newFakeMultiSourceApp()
|
||||
app.Spec.Sources[0].Helm = &v1alpha1.ApplicationSourceHelm{
|
||||
Parameters: []v1alpha1.HelmParameter{
|
||||
{
|
||||
Name: "a",
|
||||
Value: "1",
|
||||
},
|
||||
},
|
||||
}
|
||||
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}, nil)
|
||||
app.Status.OperationState.SyncResult.Revisions = []string{"z", "x", "v"}
|
||||
app.Status.OperationState.SyncResult.Sources[0].Helm = &v1alpha1.ApplicationSourceHelm{
|
||||
Parameters: []v1alpha1.HelmParameter{
|
||||
{
|
||||
Name: "a",
|
||||
Value: "2", // this value changed
|
||||
},
|
||||
},
|
||||
}
|
||||
syncStatus := v1alpha1.SyncStatus{
|
||||
Status: v1alpha1.SyncStatusCodeOutOfSync,
|
||||
Revisions: []string{"z", "x", "v"},
|
||||
}
|
||||
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{{Name: "guestbook", Kind: kube.DeploymentKind, Status: v1alpha1.SyncStatusCodeOutOfSync}}, true)
|
||||
assert.Nil(t, cond)
|
||||
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get(t.Context(), "my-app", metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, app.Operation)
|
||||
})
|
||||
}
|
||||
|
||||
// TestFinalizeAppDeletion verifies application deletion
|
||||
@@ -1310,6 +1373,9 @@ func TestGetResourceTree_HasOrphanedResources(t *testing.T) {
|
||||
|
||||
managedDeploy := v1alpha1.ResourceNode{
|
||||
ResourceRef: v1alpha1.ResourceRef{Group: "apps", Kind: "Deployment", Namespace: "default", Name: "nginx-deployment", Version: "v1"},
|
||||
Health: &v1alpha1.HealthStatus{
|
||||
Status: health.HealthStatusMissing,
|
||||
},
|
||||
}
|
||||
orphanedDeploy1 := v1alpha1.ResourceNode{
|
||||
ResourceRef: v1alpha1.ResourceRef{Group: "apps", Kind: "Deployment", Namespace: "default", Name: "deploy1"},
|
||||
@@ -1862,7 +1928,7 @@ apps/Deployment:
|
||||
hs = {}
|
||||
hs.status = ""
|
||||
hs.message = ""
|
||||
|
||||
|
||||
if obj.metadata ~= nil then
|
||||
if obj.metadata.labels ~= nil then
|
||||
current_status = obj.metadata.labels["status"]
|
||||
@@ -1898,7 +1964,7 @@ apps/Deployment:
|
||||
{},
|
||||
{},
|
||||
},
|
||||
}, time.Millisecond*10, nil)
|
||||
}, time.Millisecond*10, nil, nil)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
@@ -2030,7 +2096,9 @@ func TestProcessRequestedAppOperation_FailedNoRetries(t *testing.T) {
|
||||
ctrl.processRequestedAppOperation(app)
|
||||
|
||||
phase, _, _ := unstructured.NestedString(receivedPatch, "status", "operationState", "phase")
|
||||
message, _, _ := unstructured.NestedString(receivedPatch, "status", "operationState", "message")
|
||||
assert.Equal(t, string(synccommon.OperationError), phase)
|
||||
assert.Equal(t, "Failed to load application project: error getting app project \"default\": appproject.argoproj.io \"default\" not found", message)
|
||||
}
|
||||
|
||||
func TestProcessRequestedAppOperation_InvalidDestination(t *testing.T) {
|
||||
@@ -2059,8 +2127,8 @@ func TestProcessRequestedAppOperation_InvalidDestination(t *testing.T) {
|
||||
ctrl.processRequestedAppOperation(app)
|
||||
|
||||
phase, _, _ := unstructured.NestedString(receivedPatch, "status", "operationState", "phase")
|
||||
assert.Equal(t, string(synccommon.OperationFailed), phase)
|
||||
message, _, _ := unstructured.NestedString(receivedPatch, "status", "operationState", "message")
|
||||
assert.Equal(t, string(synccommon.OperationError), phase)
|
||||
assert.Contains(t, message, "application destination can't have both name and server defined: another-cluster https://localhost:6443")
|
||||
}
|
||||
|
||||
@@ -2084,20 +2152,24 @@ func TestProcessRequestedAppOperation_FailedHasRetries(t *testing.T) {
|
||||
ctrl.processRequestedAppOperation(app)
|
||||
|
||||
phase, _, _ := unstructured.NestedString(receivedPatch, "status", "operationState", "phase")
|
||||
assert.Equal(t, string(synccommon.OperationRunning), phase)
|
||||
message, _, _ := unstructured.NestedString(receivedPatch, "status", "operationState", "message")
|
||||
assert.Contains(t, message, "Retrying attempt #1")
|
||||
retryCount, _, _ := unstructured.NestedFloat64(receivedPatch, "status", "operationState", "retryCount")
|
||||
assert.Equal(t, string(synccommon.OperationRunning), phase)
|
||||
assert.Contains(t, message, "Failed to load application project: error getting app project \"invalid-project\": appproject.argoproj.io \"invalid-project\" not found. Retrying attempt #1")
|
||||
assert.InEpsilon(t, float64(1), retryCount, 0.0001)
|
||||
}
|
||||
|
||||
func TestProcessRequestedAppOperation_RunningPreviouslyFailed(t *testing.T) {
|
||||
failedAttemptFinisedAt := time.Now().Add(-time.Minute * 5)
|
||||
app := newFakeApp()
|
||||
app.Operation = &v1alpha1.Operation{
|
||||
Sync: &v1alpha1.SyncOperation{},
|
||||
Retry: v1alpha1.RetryStrategy{Limit: 1},
|
||||
}
|
||||
app.Status.OperationState.Operation = *app.Operation
|
||||
app.Status.OperationState.Phase = synccommon.OperationRunning
|
||||
app.Status.OperationState.RetryCount = 1
|
||||
app.Status.OperationState.FinishedAt = &metav1.Time{Time: failedAttemptFinisedAt}
|
||||
app.Status.OperationState.SyncResult.Resources = []*v1alpha1.ResourceResult{{
|
||||
Name: "guestbook",
|
||||
Kind: "Deployment",
|
||||
@@ -2127,7 +2199,58 @@ func TestProcessRequestedAppOperation_RunningPreviouslyFailed(t *testing.T) {
|
||||
ctrl.processRequestedAppOperation(app)
|
||||
|
||||
phase, _, _ := unstructured.NestedString(receivedPatch, "status", "operationState", "phase")
|
||||
message, _, _ := unstructured.NestedString(receivedPatch, "status", "operationState", "message")
|
||||
finishedAtStr, _, _ := unstructured.NestedString(receivedPatch, "status", "operationState", "finishedAt")
|
||||
finishedAt, err := time.Parse(time.RFC3339, finishedAtStr)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, string(synccommon.OperationSucceeded), phase)
|
||||
assert.Equal(t, "successfully synced (no more tasks)", message)
|
||||
assert.Truef(t, finishedAt.After(failedAttemptFinisedAt), "finishedAt was expected to be updated. The retry was not performed.")
|
||||
}
|
||||
|
||||
func TestProcessRequestedAppOperation_RunningPreviouslyFailedBackoff(t *testing.T) {
|
||||
failedAttemptFinisedAt := time.Now().Add(-time.Second)
|
||||
app := newFakeApp()
|
||||
app.Operation = &v1alpha1.Operation{
|
||||
Sync: &v1alpha1.SyncOperation{},
|
||||
Retry: v1alpha1.RetryStrategy{
|
||||
Limit: 1,
|
||||
Backoff: &v1alpha1.Backoff{
|
||||
Duration: "1h",
|
||||
Factor: ptr.To(int64(100)),
|
||||
MaxDuration: "1h",
|
||||
},
|
||||
},
|
||||
}
|
||||
app.Status.OperationState.Operation = *app.Operation
|
||||
app.Status.OperationState.Phase = synccommon.OperationRunning
|
||||
app.Status.OperationState.Message = "pending retry"
|
||||
app.Status.OperationState.RetryCount = 1
|
||||
app.Status.OperationState.FinishedAt = &metav1.Time{Time: failedAttemptFinisedAt}
|
||||
app.Status.OperationState.SyncResult.Resources = []*v1alpha1.ResourceResult{{
|
||||
Name: "guestbook",
|
||||
Kind: "Deployment",
|
||||
Group: "apps",
|
||||
Status: synccommon.ResultCodeSyncFailed,
|
||||
}}
|
||||
|
||||
data := &fakeData{
|
||||
apps: []runtime.Object{app, &defaultProj},
|
||||
manifestResponse: &apiclient.ManifestResponse{
|
||||
Manifests: []string{},
|
||||
Namespace: test.FakeDestNamespace,
|
||||
Server: test.FakeClusterURL,
|
||||
Revision: "abc123",
|
||||
},
|
||||
}
|
||||
ctrl := newFakeController(data, nil)
|
||||
fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset)
|
||||
fakeAppCs.PrependReactor("patch", "*", func(_ kubetesting.Action) (handled bool, ret runtime.Object, err error) {
|
||||
require.FailNow(t, "A patch should not have been called if the backoff has not passed")
|
||||
return true, &v1alpha1.Application{}, nil
|
||||
})
|
||||
|
||||
ctrl.processRequestedAppOperation(app)
|
||||
}
|
||||
|
||||
func TestProcessRequestedAppOperation_HasRetriesTerminated(t *testing.T) {
|
||||
@@ -2136,6 +2259,7 @@ func TestProcessRequestedAppOperation_HasRetriesTerminated(t *testing.T) {
|
||||
Sync: &v1alpha1.SyncOperation{},
|
||||
Retry: v1alpha1.RetryStrategy{Limit: 10},
|
||||
}
|
||||
app.Status.OperationState.Operation = *app.Operation
|
||||
app.Status.OperationState.Phase = synccommon.OperationTerminating
|
||||
|
||||
data := &fakeData{
|
||||
@@ -2160,7 +2284,9 @@ func TestProcessRequestedAppOperation_HasRetriesTerminated(t *testing.T) {
|
||||
ctrl.processRequestedAppOperation(app)
|
||||
|
||||
phase, _, _ := unstructured.NestedString(receivedPatch, "status", "operationState", "phase")
|
||||
message, _, _ := unstructured.NestedString(receivedPatch, "status", "operationState", "message")
|
||||
assert.Equal(t, string(synccommon.OperationFailed), phase)
|
||||
assert.Equal(t, "Operation terminated", message)
|
||||
}
|
||||
|
||||
func TestProcessRequestedAppOperation_Successful(t *testing.T) {
|
||||
@@ -2187,12 +2313,91 @@ func TestProcessRequestedAppOperation_Successful(t *testing.T) {
|
||||
ctrl.processRequestedAppOperation(app)
|
||||
|
||||
phase, _, _ := unstructured.NestedString(receivedPatch, "status", "operationState", "phase")
|
||||
message, _, _ := unstructured.NestedString(receivedPatch, "status", "operationState", "message")
|
||||
assert.Equal(t, string(synccommon.OperationSucceeded), phase)
|
||||
assert.Equal(t, "successfully synced (no more tasks)", message)
|
||||
ok, level := ctrl.isRefreshRequested(ctrl.toAppKey(app.Name))
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, CompareWithLatestForceResolve, level)
|
||||
}
|
||||
|
||||
func TestProcessRequestedAppOperation_SyncTimeout(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
startedSince time.Duration
|
||||
syncTimeout time.Duration
|
||||
retryAttempt int
|
||||
currentPhase synccommon.OperationPhase
|
||||
expectedPhase synccommon.OperationPhase
|
||||
expectedMessage string
|
||||
}{{
|
||||
name: "Continue when running operation has not exceeded timeout",
|
||||
syncTimeout: time.Minute,
|
||||
startedSince: 30 * time.Second,
|
||||
currentPhase: synccommon.OperationRunning,
|
||||
expectedPhase: synccommon.OperationSucceeded,
|
||||
expectedMessage: "successfully synced (no more tasks)",
|
||||
}, {
|
||||
name: "Continue when terminating operation has exceeded timeout",
|
||||
syncTimeout: time.Minute,
|
||||
startedSince: 2 * time.Minute,
|
||||
currentPhase: synccommon.OperationTerminating,
|
||||
expectedPhase: synccommon.OperationFailed,
|
||||
expectedMessage: "Operation terminated",
|
||||
}, {
|
||||
name: "Terminate when running operation exceeded timeout",
|
||||
syncTimeout: time.Minute,
|
||||
startedSince: 2 * time.Minute,
|
||||
currentPhase: synccommon.OperationRunning,
|
||||
expectedPhase: synccommon.OperationFailed,
|
||||
expectedMessage: "Operation terminated, triggered by controller sync timeout",
|
||||
}, {
|
||||
name: "Terminate when retried operation exceeded timeout",
|
||||
syncTimeout: time.Minute,
|
||||
startedSince: 15 * time.Minute,
|
||||
currentPhase: synccommon.OperationRunning,
|
||||
retryAttempt: 1,
|
||||
expectedPhase: synccommon.OperationFailed,
|
||||
expectedMessage: "Operation terminated, triggered by controller sync timeout (retried 1 times).",
|
||||
}}
|
||||
for i := range testCases {
|
||||
tc := testCases[i]
|
||||
t.Run(fmt.Sprintf("case %d: %s", i, tc.name), func(t *testing.T) {
|
||||
app := newFakeApp()
|
||||
app.Spec.Project = "default"
|
||||
app.Operation = &v1alpha1.Operation{
|
||||
Sync: &v1alpha1.SyncOperation{
|
||||
Revision: "HEAD",
|
||||
},
|
||||
}
|
||||
ctrl := newFakeController(&fakeData{
|
||||
apps: []runtime.Object{app, &defaultProj},
|
||||
manifestResponses: []*apiclient.ManifestResponse{{
|
||||
Manifests: []string{},
|
||||
}},
|
||||
}, nil)
|
||||
|
||||
ctrl.syncTimeout = tc.syncTimeout
|
||||
app.Status.OperationState = &v1alpha1.OperationState{
|
||||
Operation: *app.Operation,
|
||||
Phase: tc.currentPhase,
|
||||
StartedAt: metav1.NewTime(time.Now().Add(-tc.startedSince)),
|
||||
}
|
||||
if tc.retryAttempt > 0 {
|
||||
app.Status.OperationState.FinishedAt = ptr.To(metav1.NewTime(time.Now().Add(-tc.startedSince)))
|
||||
app.Status.OperationState.RetryCount = int64(tc.retryAttempt)
|
||||
}
|
||||
|
||||
ctrl.processRequestedAppOperation(app)
|
||||
|
||||
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(app.ObjectMeta.Namespace).Get(t.Context(), app.Name, metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tc.expectedPhase, app.Status.OperationState.Phase)
|
||||
assert.Equal(t, tc.expectedMessage, app.Status.OperationState.Message)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetAppHosts(t *testing.T) {
|
||||
app := newFakeApp()
|
||||
data := &fakeData{
|
||||
@@ -2459,35 +2664,71 @@ func TestAppStatusIsReplaced(t *testing.T) {
|
||||
|
||||
func TestAlreadyAttemptSync(t *testing.T) {
|
||||
app := newFakeApp()
|
||||
defaultRevision := app.Status.OperationState.SyncResult.Revision
|
||||
|
||||
t.Run("no operation state", func(t *testing.T) {
|
||||
app := app.DeepCopy()
|
||||
app.Status.OperationState = nil
|
||||
attempted, _ := alreadyAttemptedSync(app, "", []string{}, false, false)
|
||||
attempted, _, _ := alreadyAttemptedSync(app, []string{defaultRevision}, true)
|
||||
assert.False(t, attempted)
|
||||
})
|
||||
|
||||
t.Run("no sync operation", func(t *testing.T) {
|
||||
app := app.DeepCopy()
|
||||
app.Status.OperationState.Operation.Sync = nil
|
||||
attempted, _ := alreadyAttemptedSync(app, "", []string{}, false, false)
|
||||
assert.False(t, attempted)
|
||||
})
|
||||
|
||||
t.Run("no sync result", func(t *testing.T) {
|
||||
t.Run("no sync result for running sync", func(t *testing.T) {
|
||||
app := app.DeepCopy()
|
||||
app.Status.OperationState.SyncResult = nil
|
||||
attempted, _ := alreadyAttemptedSync(app, "", []string{}, false, false)
|
||||
app.Status.OperationState.Phase = synccommon.OperationRunning
|
||||
attempted, _, _ := alreadyAttemptedSync(app, []string{defaultRevision}, true)
|
||||
assert.False(t, attempted)
|
||||
})
|
||||
|
||||
t.Run("no sync result for completed sync", func(t *testing.T) {
|
||||
app := app.DeepCopy()
|
||||
app.Status.OperationState.SyncResult = nil
|
||||
app.Status.OperationState.Phase = synccommon.OperationError
|
||||
attempted, _, _ := alreadyAttemptedSync(app, []string{defaultRevision}, true)
|
||||
assert.True(t, attempted)
|
||||
})
|
||||
|
||||
t.Run("single source", func(t *testing.T) {
|
||||
t.Run("same manifest with sync result", func(t *testing.T) {
|
||||
attempted, _ := alreadyAttemptedSync(app, "sha", []string{}, false, false)
|
||||
t.Run("no revision", func(t *testing.T) {
|
||||
attempted, _, _ := alreadyAttemptedSync(app, []string{}, true)
|
||||
assert.False(t, attempted)
|
||||
})
|
||||
|
||||
t.Run("empty revision", func(t *testing.T) {
|
||||
attempted, _, _ := alreadyAttemptedSync(app, []string{""}, true)
|
||||
assert.False(t, attempted)
|
||||
})
|
||||
|
||||
t.Run("too many revision", func(t *testing.T) {
|
||||
app := app.DeepCopy()
|
||||
app.Status.OperationState.SyncResult.Revision = "sha"
|
||||
attempted, _, _ := alreadyAttemptedSync(app, []string{"sha", "sha2"}, true)
|
||||
assert.False(t, attempted)
|
||||
})
|
||||
|
||||
t.Run("same manifest, same SHA with changes", func(t *testing.T) {
|
||||
app := app.DeepCopy()
|
||||
app.Status.OperationState.SyncResult.Revision = "sha"
|
||||
attempted, _, _ := alreadyAttemptedSync(app, []string{"sha"}, true)
|
||||
assert.True(t, attempted)
|
||||
})
|
||||
|
||||
t.Run("same manifest with sync result different targetRevision, same SHA", func(t *testing.T) {
|
||||
t.Run("same manifest, different SHA with changes", func(t *testing.T) {
|
||||
app := app.DeepCopy()
|
||||
app.Status.OperationState.SyncResult.Revision = "sha1"
|
||||
attempted, _, _ := alreadyAttemptedSync(app, []string{"sha2"}, true)
|
||||
assert.False(t, attempted)
|
||||
})
|
||||
|
||||
t.Run("same manifest, different SHA without changes", func(t *testing.T) {
|
||||
app := app.DeepCopy()
|
||||
app.Status.OperationState.SyncResult.Revision = "sha1"
|
||||
attempted, _, _ := alreadyAttemptedSync(app, []string{"sha2"}, false)
|
||||
assert.True(t, attempted)
|
||||
})
|
||||
|
||||
t.Run("different manifest, same SHA with changes", func(t *testing.T) {
|
||||
// This test represents the case where the user changed a source's target revision to a new branch, but it
|
||||
// points to the same revision as the old branch. We currently do not consider this as having been "already
|
||||
// attempted." In the future we may want to short-circuit the auto-sync in these cases.
|
||||
@@ -2495,55 +2736,101 @@ func TestAlreadyAttemptSync(t *testing.T) {
|
||||
app.Status.OperationState.SyncResult.Source = v1alpha1.ApplicationSource{TargetRevision: "branch1"}
|
||||
app.Spec.Source = &v1alpha1.ApplicationSource{TargetRevision: "branch2"}
|
||||
app.Status.OperationState.SyncResult.Revision = "sha"
|
||||
attempted, _ := alreadyAttemptedSync(app, "sha", []string{}, false, false)
|
||||
attempted, _, _ := alreadyAttemptedSync(app, []string{"sha"}, true)
|
||||
assert.False(t, attempted)
|
||||
})
|
||||
|
||||
t.Run("different manifest with sync result, different SHA", func(t *testing.T) {
|
||||
t.Run("different manifest, different SHA with changes", func(t *testing.T) {
|
||||
app := app.DeepCopy()
|
||||
app.Status.OperationState.SyncResult.Source = v1alpha1.ApplicationSource{Path: "folder1"}
|
||||
app.Spec.Source = &v1alpha1.ApplicationSource{Path: "folder2"}
|
||||
app.Status.OperationState.SyncResult.Revision = "sha1"
|
||||
attempted, _ := alreadyAttemptedSync(app, "sha2", []string{}, false, true)
|
||||
attempted, _, _ := alreadyAttemptedSync(app, []string{"sha2"}, true)
|
||||
assert.False(t, attempted)
|
||||
})
|
||||
|
||||
t.Run("different manifest with sync result, same SHA", func(t *testing.T) {
|
||||
t.Run("different manifest, different SHA without changes", func(t *testing.T) {
|
||||
app := app.DeepCopy()
|
||||
app.Status.OperationState.SyncResult.Source = v1alpha1.ApplicationSource{Path: "folder1"}
|
||||
app.Spec.Source = &v1alpha1.ApplicationSource{Path: "folder2"}
|
||||
app.Status.OperationState.SyncResult.Revision = "sha1"
|
||||
attempted, _, _ := alreadyAttemptedSync(app, []string{"sha2"}, false)
|
||||
assert.False(t, attempted)
|
||||
})
|
||||
|
||||
t.Run("different manifest, same SHA without changes", func(t *testing.T) {
|
||||
app := app.DeepCopy()
|
||||
app.Status.OperationState.SyncResult.Source = v1alpha1.ApplicationSource{Path: "folder1"}
|
||||
app.Spec.Source = &v1alpha1.ApplicationSource{Path: "folder2"}
|
||||
app.Status.OperationState.SyncResult.Revision = "sha"
|
||||
attempted, _ := alreadyAttemptedSync(app, "sha", []string{}, false, true)
|
||||
assert.True(t, attempted)
|
||||
attempted, _, _ := alreadyAttemptedSync(app, []string{"sha"}, false)
|
||||
assert.False(t, attempted)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("multi-source", func(t *testing.T) {
|
||||
t.Run("same manifest with sync result", func(t *testing.T) {
|
||||
attempted, _ := alreadyAttemptedSync(app, "", []string{"sha"}, true, false)
|
||||
app := app.DeepCopy()
|
||||
app.Status.OperationState.SyncResult.Sources = []v1alpha1.ApplicationSource{{Path: "folder1"}, {Path: "folder2"}}
|
||||
app.Spec.Sources = []v1alpha1.ApplicationSource{{Path: "folder1"}, {Path: "folder2"}}
|
||||
|
||||
t.Run("same manifest, same SHAs with changes", func(t *testing.T) {
|
||||
app := app.DeepCopy()
|
||||
app.Status.OperationState.SyncResult.Revisions = []string{"sha_a", "sha_b"}
|
||||
attempted, _, _ := alreadyAttemptedSync(app, []string{"sha_a", "sha_b"}, true)
|
||||
assert.True(t, attempted)
|
||||
})
|
||||
|
||||
t.Run("same manifest with sync result, different targetRevision, same SHA", func(t *testing.T) {
|
||||
t.Run("same manifest, different SHAs with changes", func(t *testing.T) {
|
||||
app := app.DeepCopy()
|
||||
app.Status.OperationState.SyncResult.Revisions = []string{"sha_a_=", "sha_b_1"}
|
||||
attempted, _, _ := alreadyAttemptedSync(app, []string{"sha_a_2", "sha_b_2"}, true)
|
||||
assert.False(t, attempted)
|
||||
})
|
||||
|
||||
t.Run("same manifest, different SHA without changes", func(t *testing.T) {
|
||||
app := app.DeepCopy()
|
||||
app.Status.OperationState.SyncResult.Revisions = []string{"sha_a_=", "sha_b_1"}
|
||||
attempted, _, _ := alreadyAttemptedSync(app, []string{"sha_a_2", "sha_b_2"}, false)
|
||||
assert.True(t, attempted)
|
||||
})
|
||||
|
||||
t.Run("different manifest, same SHA with changes", func(t *testing.T) {
|
||||
// This test represents the case where the user changed a source's target revision to a new branch, but it
|
||||
// points to the same revision as the old branch. We currently do not consider this as having been "already
|
||||
// attempted." In the future we may want to short-circuit the auto-sync in these cases.
|
||||
app := app.DeepCopy()
|
||||
app.Status.OperationState.SyncResult.Sources = []v1alpha1.ApplicationSource{{TargetRevision: "branch1"}}
|
||||
app.Spec.Sources = []v1alpha1.ApplicationSource{{TargetRevision: "branch2"}}
|
||||
app.Status.OperationState.SyncResult.Revisions = []string{"sha"}
|
||||
attempted, _ := alreadyAttemptedSync(app, "", []string{"sha"}, true, false)
|
||||
app.Status.OperationState.SyncResult.Sources = []v1alpha1.ApplicationSource{{TargetRevision: "branch1"}, {TargetRevision: "branch2"}}
|
||||
app.Spec.Sources = []v1alpha1.ApplicationSource{{TargetRevision: "branch1"}, {TargetRevision: "branch3"}}
|
||||
app.Status.OperationState.SyncResult.Revisions = []string{"sha_a_2", "sha_b_2"}
|
||||
attempted, _, _ := alreadyAttemptedSync(app, []string{"sha_a_2", "sha_b_2"}, false)
|
||||
assert.False(t, attempted)
|
||||
})
|
||||
|
||||
t.Run("different manifest with sync result, different SHAs", func(t *testing.T) {
|
||||
app := app.DeepCopy()
|
||||
app.Status.OperationState.SyncResult.Revisions = []string{"sha_a_=", "sha_b_1"}
|
||||
attempted, _ := alreadyAttemptedSync(app, "", []string{"sha_a_2", "sha_b_2"}, true, true)
|
||||
assert.False(t, attempted)
|
||||
})
|
||||
|
||||
t.Run("different manifest with sync result, same SHAs", func(t *testing.T) {
|
||||
t.Run("different manifest, different SHA with changes", func(t *testing.T) {
|
||||
app := app.DeepCopy()
|
||||
app.Status.OperationState.SyncResult.Sources = []v1alpha1.ApplicationSource{{Path: "folder1"}, {Path: "folder2"}}
|
||||
app.Spec.Sources = []v1alpha1.ApplicationSource{{Path: "folder1"}, {Path: "folder3"}}
|
||||
app.Status.OperationState.SyncResult.Revisions = []string{"sha_a", "sha_b"}
|
||||
attempted, _ := alreadyAttemptedSync(app, "", []string{"sha_a", "sha_b"}, true, true)
|
||||
assert.True(t, attempted)
|
||||
attempted, _, _ := alreadyAttemptedSync(app, []string{"sha_a", "sha_b_2"}, true)
|
||||
assert.False(t, attempted)
|
||||
})
|
||||
|
||||
t.Run("different manifest, different SHA without changes", func(t *testing.T) {
|
||||
app := app.DeepCopy()
|
||||
app.Status.OperationState.SyncResult.Sources = []v1alpha1.ApplicationSource{{Path: "folder1"}, {Path: "folder2"}}
|
||||
app.Spec.Sources = []v1alpha1.ApplicationSource{{Path: "folder1"}, {Path: "folder3"}}
|
||||
app.Status.OperationState.SyncResult.Revisions = []string{"sha_a", "sha_b"}
|
||||
attempted, _, _ := alreadyAttemptedSync(app, []string{"sha_a", "sha_b_2"}, false)
|
||||
assert.False(t, attempted)
|
||||
})
|
||||
|
||||
t.Run("different manifest, same SHA without changes", func(t *testing.T) {
|
||||
app := app.DeepCopy()
|
||||
app.Status.OperationState.SyncResult.Sources = []v1alpha1.ApplicationSource{{Path: "folder1"}, {Path: "folder2"}}
|
||||
app.Spec.Sources = []v1alpha1.ApplicationSource{{Path: "folder1"}, {Path: "folder3"}}
|
||||
app.Status.OperationState.SyncResult.Revisions = []string{"sha_a", "sha_b"}
|
||||
attempted, _, _ := alreadyAttemptedSync(app, []string{"sha_a", "sha_b"}, false)
|
||||
assert.False(t, attempted)
|
||||
})
|
||||
})
|
||||
}
|
||||
@@ -2555,14 +2842,13 @@ func assertDurationAround(t *testing.T, expected time.Duration, actual time.Dura
|
||||
assert.LessOrEqual(t, expected, actual+delta)
|
||||
}
|
||||
|
||||
func TestSelfHealExponentialBackoff(t *testing.T) {
|
||||
func TestSelfHealRemainingBackoff(t *testing.T) {
|
||||
ctrl := newFakeController(&fakeData{}, nil)
|
||||
ctrl.selfHealBackOff = &wait.Backoff{
|
||||
ctrl.selfHealBackoff = &wait.Backoff{
|
||||
Factor: 3,
|
||||
Duration: 2 * time.Second,
|
||||
Cap: 2 * time.Minute,
|
||||
}
|
||||
|
||||
app := &v1alpha1.Application{
|
||||
Status: v1alpha1.ApplicationStatus{
|
||||
OperationState: &v1alpha1.OperationState{
|
||||
@@ -2574,156 +2860,108 @@ func TestSelfHealExponentialBackoff(t *testing.T) {
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
attempts int64
|
||||
expectedAttempts int64
|
||||
attempts int
|
||||
finishedAt *metav1.Time
|
||||
expectedDuration time.Duration
|
||||
shouldSelfHeal bool
|
||||
alreadyAttempted bool
|
||||
syncStatus v1alpha1.SyncStatusCode
|
||||
}{{
|
||||
attempts: 0,
|
||||
finishedAt: ptr.To(metav1.Now()),
|
||||
expectedDuration: 0,
|
||||
shouldSelfHeal: true,
|
||||
alreadyAttempted: true,
|
||||
expectedAttempts: 0,
|
||||
syncStatus: v1alpha1.SyncStatusCodeOutOfSync,
|
||||
}, {
|
||||
attempts: 1,
|
||||
finishedAt: ptr.To(metav1.Now()),
|
||||
expectedDuration: 2 * time.Second,
|
||||
shouldSelfHeal: false,
|
||||
alreadyAttempted: true,
|
||||
expectedAttempts: 1,
|
||||
syncStatus: v1alpha1.SyncStatusCodeOutOfSync,
|
||||
}, {
|
||||
attempts: 2,
|
||||
finishedAt: ptr.To(metav1.Now()),
|
||||
expectedDuration: 6 * time.Second,
|
||||
shouldSelfHeal: false,
|
||||
alreadyAttempted: true,
|
||||
expectedAttempts: 2,
|
||||
syncStatus: v1alpha1.SyncStatusCodeOutOfSync,
|
||||
}, {
|
||||
attempts: 3,
|
||||
finishedAt: nil,
|
||||
expectedDuration: 18 * time.Second,
|
||||
shouldSelfHeal: false,
|
||||
alreadyAttempted: true,
|
||||
expectedAttempts: 3,
|
||||
syncStatus: v1alpha1.SyncStatusCodeOutOfSync,
|
||||
}, {
|
||||
attempts: 4,
|
||||
finishedAt: nil,
|
||||
expectedDuration: 54 * time.Second,
|
||||
shouldSelfHeal: false,
|
||||
alreadyAttempted: true,
|
||||
expectedAttempts: 4,
|
||||
syncStatus: v1alpha1.SyncStatusCodeOutOfSync,
|
||||
}, {
|
||||
attempts: 5,
|
||||
finishedAt: nil,
|
||||
expectedDuration: 120 * time.Second,
|
||||
shouldSelfHeal: false,
|
||||
alreadyAttempted: true,
|
||||
expectedAttempts: 5,
|
||||
syncStatus: v1alpha1.SyncStatusCodeOutOfSync,
|
||||
}, {
|
||||
attempts: 6,
|
||||
finishedAt: nil,
|
||||
expectedDuration: 120 * time.Second,
|
||||
shouldSelfHeal: false,
|
||||
alreadyAttempted: true,
|
||||
expectedAttempts: 6,
|
||||
syncStatus: v1alpha1.SyncStatusCodeOutOfSync,
|
||||
}, {
|
||||
attempts: 6,
|
||||
finishedAt: nil,
|
||||
expectedDuration: 0,
|
||||
shouldSelfHeal: true,
|
||||
alreadyAttempted: false,
|
||||
expectedAttempts: 0,
|
||||
syncStatus: v1alpha1.SyncStatusCodeOutOfSync,
|
||||
}, { // backoff will not reset as finished tme isn't >= cooldown
|
||||
attempts: 6,
|
||||
finishedAt: ptr.To(metav1.Now()),
|
||||
expectedDuration: 120 * time.Second,
|
||||
shouldSelfHeal: false,
|
||||
alreadyAttempted: true,
|
||||
expectedAttempts: 6,
|
||||
syncStatus: v1alpha1.SyncStatusCodeSynced,
|
||||
}, { // backoff will reset as finished time is >= cooldown
|
||||
}, {
|
||||
attempts: 40,
|
||||
finishedAt: &metav1.Time{Time: time.Now().Add(-(1 * time.Minute))},
|
||||
expectedDuration: -60 * time.Second,
|
||||
shouldSelfHeal: true,
|
||||
alreadyAttempted: true,
|
||||
expectedAttempts: 0,
|
||||
syncStatus: v1alpha1.SyncStatusCodeSynced,
|
||||
finishedAt: &metav1.Time{Time: time.Now().Add(-1 * time.Minute)},
|
||||
expectedDuration: 60 * time.Second,
|
||||
shouldSelfHeal: false,
|
||||
}}
|
||||
|
||||
for i := range testCases {
|
||||
tc := testCases[i]
|
||||
t.Run(fmt.Sprintf("test case %d", i), func(t *testing.T) {
|
||||
app.Status.OperationState.Operation.Sync.SelfHealAttemptsCount = tc.attempts
|
||||
app.Status.OperationState.FinishedAt = tc.finishedAt
|
||||
app.Status.Sync.Status = tc.syncStatus
|
||||
ok, duration := ctrl.shouldSelfHeal(app, tc.alreadyAttempted)
|
||||
require.Equal(t, ok, tc.shouldSelfHeal)
|
||||
require.Equal(t, tc.expectedAttempts, app.Status.OperationState.Operation.Sync.SelfHealAttemptsCount)
|
||||
duration := ctrl.selfHealRemainingBackoff(app, tc.attempts)
|
||||
shouldSelfHeal := duration <= 0
|
||||
require.Equal(t, tc.shouldSelfHeal, shouldSelfHeal)
|
||||
assertDurationAround(t, tc.expectedDuration, duration)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSyncTimeout(t *testing.T) {
|
||||
testCases := []struct {
|
||||
delta time.Duration
|
||||
expectedPhase synccommon.OperationPhase
|
||||
expectedMessage string
|
||||
}{{
|
||||
delta: 2 * time.Minute,
|
||||
expectedPhase: synccommon.OperationFailed,
|
||||
expectedMessage: "Operation terminated",
|
||||
}, {
|
||||
delta: 30 * time.Second,
|
||||
expectedPhase: synccommon.OperationSucceeded,
|
||||
expectedMessage: "successfully synced (no more tasks)",
|
||||
}}
|
||||
for i := range testCases {
|
||||
tc := testCases[i]
|
||||
t.Run(fmt.Sprintf("test case %d", i), func(t *testing.T) {
|
||||
app := newFakeApp()
|
||||
app.Spec.Project = "default"
|
||||
app.Operation = &v1alpha1.Operation{
|
||||
Sync: &v1alpha1.SyncOperation{
|
||||
Revision: "HEAD",
|
||||
},
|
||||
}
|
||||
ctrl := newFakeController(&fakeData{
|
||||
apps: []runtime.Object{app, &defaultProj},
|
||||
manifestResponses: []*apiclient.ManifestResponse{{
|
||||
Manifests: []string{},
|
||||
}},
|
||||
}, nil)
|
||||
func TestSelfHealBackoffCooldownElapsed(t *testing.T) {
|
||||
cooldown := time.Second * 30
|
||||
ctrl := newFakeController(&fakeData{}, nil)
|
||||
ctrl.selfHealBackoffCooldown = cooldown
|
||||
|
||||
ctrl.syncTimeout = time.Minute
|
||||
app.Status.OperationState = &v1alpha1.OperationState{
|
||||
Operation: v1alpha1.Operation{
|
||||
Sync: &v1alpha1.SyncOperation{
|
||||
Revision: "HEAD",
|
||||
},
|
||||
},
|
||||
Phase: synccommon.OperationRunning,
|
||||
StartedAt: metav1.NewTime(time.Now().Add(-tc.delta)),
|
||||
}
|
||||
ctrl.processRequestedAppOperation(app)
|
||||
|
||||
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(app.ObjectMeta.Namespace).Get(t.Context(), app.Name, metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.expectedPhase, app.Status.OperationState.Phase)
|
||||
require.Equal(t, tc.expectedMessage, app.Status.OperationState.Message)
|
||||
})
|
||||
app := &v1alpha1.Application{
|
||||
Status: v1alpha1.ApplicationStatus{
|
||||
OperationState: &v1alpha1.OperationState{
|
||||
Phase: synccommon.OperationSucceeded,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
t.Run("operation not completed", func(t *testing.T) {
|
||||
app := app.DeepCopy()
|
||||
app.Status.OperationState.FinishedAt = nil
|
||||
elapsed := ctrl.selfHealBackoffCooldownElapsed(app)
|
||||
assert.True(t, elapsed)
|
||||
})
|
||||
|
||||
t.Run("successful operation finised after cooldown", func(t *testing.T) {
|
||||
app := app.DeepCopy()
|
||||
app.Status.OperationState.FinishedAt = &metav1.Time{Time: time.Now().Add(-cooldown)}
|
||||
elapsed := ctrl.selfHealBackoffCooldownElapsed(app)
|
||||
assert.True(t, elapsed)
|
||||
})
|
||||
|
||||
t.Run("unsuccessful operation finised after cooldown", func(t *testing.T) {
|
||||
app := app.DeepCopy()
|
||||
app.Status.OperationState.Phase = synccommon.OperationFailed
|
||||
app.Status.OperationState.FinishedAt = &metav1.Time{Time: time.Now().Add(-cooldown)}
|
||||
elapsed := ctrl.selfHealBackoffCooldownElapsed(app)
|
||||
assert.False(t, elapsed)
|
||||
})
|
||||
|
||||
t.Run("successful operation finised before cooldown", func(t *testing.T) {
|
||||
app := app.DeepCopy()
|
||||
app.Status.OperationState.FinishedAt = &metav1.Time{Time: time.Now()}
|
||||
elapsed := ctrl.selfHealBackoffCooldownElapsed(app)
|
||||
assert.False(t, elapsed)
|
||||
})
|
||||
}
|
||||
|
||||
13
controller/cache/cache.go
vendored
13
controller/cache/cache.go
vendored
@@ -137,8 +137,6 @@ type LiveStateCache interface {
|
||||
IsNamespaced(server *appv1.Cluster, gk schema.GroupKind) (bool, error)
|
||||
// Returns synced cluster cache
|
||||
GetClusterCache(server *appv1.Cluster) (clustercache.ClusterCache, error)
|
||||
// Executes give callback against resource specified by the key and all its children
|
||||
IterateHierarchy(server *appv1.Cluster, key kube.ResourceKey, action func(child appv1.ResourceNode, appName string) bool) error
|
||||
// Executes give callback against resources specified by the keys and all its children
|
||||
IterateHierarchyV2(server *appv1.Cluster, keys []kube.ResourceKey, action func(child appv1.ResourceNode, appName string) bool) error
|
||||
// Returns state of live nodes which correspond for target nodes of specified application.
|
||||
@@ -669,17 +667,6 @@ func (c *liveStateCache) IsNamespaced(server *appv1.Cluster, gk schema.GroupKind
|
||||
return clusterInfo.IsNamespaced(gk)
|
||||
}
|
||||
|
||||
func (c *liveStateCache) IterateHierarchy(server *appv1.Cluster, key kube.ResourceKey, action func(child appv1.ResourceNode, appName string) bool) error {
|
||||
clusterInfo, err := c.getSyncedCluster(server)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
clusterInfo.IterateHierarchy(key, func(resource *clustercache.Resource, namespaceResources map[kube.ResourceKey]*clustercache.Resource) bool {
|
||||
return action(asResourceNode(resource), getApp(resource, namespaceResources))
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *liveStateCache) IterateHierarchyV2(server *appv1.Cluster, keys []kube.ResourceKey, action func(child appv1.ResourceNode, appName string) bool) error {
|
||||
clusterInfo, err := c.getSyncedCluster(server)
|
||||
if err != nil {
|
||||
|
||||
100
controller/cache/cache_test.go
vendored
100
controller/cache/cache_test.go
vendored
@@ -1,6 +1,7 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net"
|
||||
"net/url"
|
||||
@@ -39,6 +40,36 @@ func (n netError) Error() string { return string(n) }
|
||||
func (n netError) Timeout() bool { return false }
|
||||
func (n netError) Temporary() bool { return false }
|
||||
|
||||
func fixtures(data map[string]string, opts ...func(secret *corev1.Secret)) (*fake.Clientset, *argosettings.SettingsManager) {
|
||||
cm := &corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: common.ArgoCDConfigMapName,
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{
|
||||
"app.kubernetes.io/part-of": "argocd",
|
||||
},
|
||||
},
|
||||
Data: data,
|
||||
}
|
||||
secret := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: common.ArgoCDSecretName,
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{
|
||||
"app.kubernetes.io/part-of": "argocd",
|
||||
},
|
||||
},
|
||||
Data: map[string][]byte{},
|
||||
}
|
||||
for i := range opts {
|
||||
opts[i](secret)
|
||||
}
|
||||
kubeClient := fake.NewClientset(cm, secret)
|
||||
settingsManager := argosettings.NewSettingsManager(context.Background(), kubeClient, "default")
|
||||
|
||||
return kubeClient, settingsManager
|
||||
}
|
||||
|
||||
func TestHandleModEvent_HasChanges(_ *testing.T) {
|
||||
clusterCache := &mocks.ClusterCache{}
|
||||
clusterCache.On("Invalidate", mock.Anything, mock.Anything).Return(nil).Once()
|
||||
@@ -745,3 +776,72 @@ func Test_GetVersionsInfo_error_redacted(t *testing.T) {
|
||||
require.Error(t, err)
|
||||
assert.NotContains(t, err.Error(), "password")
|
||||
}
|
||||
|
||||
func TestLoadCacheSettings(t *testing.T) {
|
||||
_, settingsManager := fixtures(map[string]string{
|
||||
"application.instanceLabelKey": "testLabel",
|
||||
"application.resourceTrackingMethod": string(appv1.TrackingMethodLabel),
|
||||
"installationID": "123456789",
|
||||
})
|
||||
ch := liveStateCache{
|
||||
settingsMgr: settingsManager,
|
||||
}
|
||||
label, err := settingsManager.GetAppInstanceLabelKey()
|
||||
require.NoError(t, err)
|
||||
trackingMethod, err := settingsManager.GetTrackingMethod()
|
||||
require.NoError(t, err)
|
||||
res, err := ch.loadCacheSettings()
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, label, res.appInstanceLabelKey)
|
||||
assert.Equal(t, string(appv1.TrackingMethodLabel), trackingMethod)
|
||||
assert.Equal(t, "123456789", res.installationID)
|
||||
|
||||
// By default the values won't be nil
|
||||
assert.NotNil(t, res.resourceOverrides)
|
||||
assert.NotNil(t, res.clusterSettings)
|
||||
assert.True(t, res.ignoreResourceUpdatesEnabled)
|
||||
}
|
||||
|
||||
func Test_ownerRefGV(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input metav1.OwnerReference
|
||||
expected schema.GroupVersion
|
||||
}{
|
||||
{
|
||||
name: "valid API Version",
|
||||
input: metav1.OwnerReference{
|
||||
APIVersion: "apps/v1",
|
||||
},
|
||||
expected: schema.GroupVersion{
|
||||
Group: "apps",
|
||||
Version: "v1",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "custom defined version",
|
||||
input: metav1.OwnerReference{
|
||||
APIVersion: "custom-version",
|
||||
},
|
||||
expected: schema.GroupVersion{
|
||||
Version: "custom-version",
|
||||
Group: "",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "empty APIVersion",
|
||||
input: metav1.OwnerReference{
|
||||
APIVersion: "",
|
||||
},
|
||||
expected: schema.GroupVersion{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
res := ownerRefGV(tt.input)
|
||||
assert.Equal(t, tt.expected, res)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
12
controller/cache/info.go
vendored
12
controller/cache/info.go
vendored
@@ -446,6 +446,7 @@ func populatePodInfo(un *unstructured.Unstructured, res *ResourceInfo) {
|
||||
}
|
||||
|
||||
req, _ := resourcehelper.PodRequestsAndLimits(&pod)
|
||||
|
||||
res.PodInfo = &PodInfo{NodeName: pod.Spec.NodeName, ResourceRequests: req, Phase: pod.Status.Phase}
|
||||
|
||||
res.Info = append(res.Info, v1alpha1.InfoItem{Name: "Node", Value: pod.Spec.NodeName})
|
||||
@@ -454,6 +455,17 @@ func populatePodInfo(un *unstructured.Unstructured, res *ResourceInfo) {
|
||||
res.Info = append(res.Info, v1alpha1.InfoItem{Name: "Restart Count", Value: strconv.Itoa(restarts)})
|
||||
}
|
||||
|
||||
// Requests are relevant even for pods in the init phase or pending state (e.g., due to insufficient resources),
|
||||
// as they help with diagnosing scheduling and startup issues.
|
||||
// requests will be released for terminated pods either with success or failed state termination.
|
||||
if !isPodPhaseTerminal(pod.Status.Phase) {
|
||||
CPUReq := req[corev1.ResourceCPU]
|
||||
MemoryReq := req[corev1.ResourceMemory]
|
||||
|
||||
res.Info = append(res.Info, v1alpha1.InfoItem{Name: common.PodRequestsCPU, Value: strconv.FormatInt(CPUReq.MilliValue(), 10)})
|
||||
res.Info = append(res.Info, v1alpha1.InfoItem{Name: common.PodRequestsMEM, Value: strconv.FormatInt(MemoryReq.MilliValue(), 10)})
|
||||
}
|
||||
|
||||
var urls []string
|
||||
if res.NetworkingInfo != nil {
|
||||
urls = res.NetworkingInfo.ExternalURLs
|
||||
|
||||
172
controller/cache/info_test.go
vendored
172
controller/cache/info_test.go
vendored
@@ -12,6 +12,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"sigs.k8s.io/yaml"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/common"
|
||||
"github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v3/util/argo/normalizers"
|
||||
"github.com/argoproj/argo-cd/v3/util/errors"
|
||||
@@ -304,6 +305,8 @@ func TestGetPodInfo(t *testing.T) {
|
||||
assert.Equal(t, []v1alpha1.InfoItem{
|
||||
{Name: "Node", Value: "minikube"},
|
||||
{Name: "Containers", Value: "0/1"},
|
||||
{Name: common.PodRequestsCPU, Value: "0"}, // strings imported from common
|
||||
{Name: common.PodRequestsMEM, Value: "134217728000"},
|
||||
}, info.Info)
|
||||
assert.Equal(t, []string{"bar"}, info.Images)
|
||||
assert.Equal(t, &PodInfo{
|
||||
@@ -365,9 +368,81 @@ func TestGetPodInfo(t *testing.T) {
|
||||
{Name: "Status Reason", Value: "Running"},
|
||||
{Name: "Node", Value: "minikube"},
|
||||
{Name: "Containers", Value: "1/1"},
|
||||
{Name: common.PodRequestsCPU, Value: "0"},
|
||||
{Name: common.PodRequestsMEM, Value: "0"},
|
||||
}, info.Info)
|
||||
})
|
||||
|
||||
t.Run("TestGetPodWithInitialContainerInfoWithResources", func(t *testing.T) {
|
||||
pod := strToUnstructured(`
|
||||
apiVersion: "v1"
|
||||
kind: "Pod"
|
||||
metadata:
|
||||
labels:
|
||||
app: "app-with-initial-container"
|
||||
name: "app-with-initial-container-5f46976fdb-vd6rv"
|
||||
namespace: "default"
|
||||
ownerReferences:
|
||||
- apiVersion: "apps/v1"
|
||||
kind: "ReplicaSet"
|
||||
name: "app-with-initial-container-5f46976fdb"
|
||||
spec:
|
||||
containers:
|
||||
- image: "alpine:latest"
|
||||
imagePullPolicy: "Always"
|
||||
name: "app-with-initial-container"
|
||||
resources:
|
||||
requests:
|
||||
cpu: "100m"
|
||||
memory: "128Mi"
|
||||
limits:
|
||||
cpu: "500m"
|
||||
memory: "512Mi"
|
||||
initContainers:
|
||||
- image: "alpine:latest"
|
||||
imagePullPolicy: "Always"
|
||||
name: "app-with-initial-container-logshipper"
|
||||
resources:
|
||||
requests:
|
||||
cpu: "50m"
|
||||
memory: "64Mi"
|
||||
limits:
|
||||
cpu: "250m"
|
||||
memory: "256Mi"
|
||||
nodeName: "minikube"
|
||||
status:
|
||||
containerStatuses:
|
||||
- image: "alpine:latest"
|
||||
name: "app-with-initial-container"
|
||||
ready: true
|
||||
restartCount: 0
|
||||
started: true
|
||||
state:
|
||||
running:
|
||||
startedAt: "2024-10-08T08:44:25Z"
|
||||
initContainerStatuses:
|
||||
- image: "alpine:latest"
|
||||
name: "app-with-initial-container-logshipper"
|
||||
ready: true
|
||||
restartCount: 0
|
||||
started: false
|
||||
state:
|
||||
terminated:
|
||||
exitCode: 0
|
||||
reason: "Completed"
|
||||
phase: "Running"
|
||||
`)
|
||||
|
||||
info := &ResourceInfo{}
|
||||
populateNodeInfo(pod, info, []string{})
|
||||
assert.Equal(t, []v1alpha1.InfoItem{
|
||||
{Name: "Status Reason", Value: "Running"},
|
||||
{Name: "Node", Value: "minikube"},
|
||||
{Name: "Containers", Value: "1/1"},
|
||||
{Name: common.PodRequestsCPU, Value: "100"},
|
||||
{Name: common.PodRequestsMEM, Value: "134217728000"},
|
||||
}, info.Info)
|
||||
})
|
||||
t.Run("TestGetPodInfoWithSidecar", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
@@ -422,6 +497,8 @@ func TestGetPodInfo(t *testing.T) {
|
||||
{Name: "Status Reason", Value: "Running"},
|
||||
{Name: "Node", Value: "minikube"},
|
||||
{Name: "Containers", Value: "2/2"},
|
||||
{Name: common.PodRequestsCPU, Value: "0"},
|
||||
{Name: common.PodRequestsMEM, Value: "0"},
|
||||
}, info.Info)
|
||||
})
|
||||
|
||||
@@ -480,6 +557,8 @@ func TestGetPodInfo(t *testing.T) {
|
||||
{Name: "Status Reason", Value: "Init:0/1"},
|
||||
{Name: "Node", Value: "minikube"},
|
||||
{Name: "Containers", Value: "0/1"},
|
||||
{Name: common.PodRequestsCPU, Value: "0"},
|
||||
{Name: common.PodRequestsMEM, Value: "0"},
|
||||
}, info.Info)
|
||||
})
|
||||
|
||||
@@ -537,6 +616,8 @@ func TestGetPodInfo(t *testing.T) {
|
||||
{Name: "Node", Value: "minikube"},
|
||||
{Name: "Containers", Value: "0/3"},
|
||||
{Name: "Restart Count", Value: "3"},
|
||||
{Name: common.PodRequestsCPU, Value: "0"},
|
||||
{Name: common.PodRequestsMEM, Value: "0"},
|
||||
}, info.Info)
|
||||
})
|
||||
|
||||
@@ -594,6 +675,8 @@ func TestGetPodInfo(t *testing.T) {
|
||||
{Name: "Node", Value: "minikube"},
|
||||
{Name: "Containers", Value: "0/3"},
|
||||
{Name: "Restart Count", Value: "3"},
|
||||
{Name: common.PodRequestsCPU, Value: "0"},
|
||||
{Name: common.PodRequestsMEM, Value: "0"},
|
||||
}, info.Info)
|
||||
})
|
||||
|
||||
@@ -654,6 +737,8 @@ func TestGetPodInfo(t *testing.T) {
|
||||
{Name: "Node", Value: "minikube"},
|
||||
{Name: "Containers", Value: "1/3"},
|
||||
{Name: "Restart Count", Value: "7"},
|
||||
{Name: common.PodRequestsCPU, Value: "0"},
|
||||
{Name: common.PodRequestsMEM, Value: "0"},
|
||||
}, info.Info)
|
||||
})
|
||||
|
||||
@@ -696,6 +781,8 @@ func TestGetPodInfo(t *testing.T) {
|
||||
{Name: "Node", Value: "minikube"},
|
||||
{Name: "Containers", Value: "0/1"},
|
||||
{Name: "Restart Count", Value: "3"},
|
||||
{Name: common.PodRequestsCPU, Value: "0"},
|
||||
{Name: common.PodRequestsMEM, Value: "0"},
|
||||
}, info.Info)
|
||||
})
|
||||
|
||||
@@ -731,6 +818,45 @@ func TestGetPodInfo(t *testing.T) {
|
||||
}, info.Info)
|
||||
})
|
||||
|
||||
// Test pod condition succeed which had some allocated resources
|
||||
t.Run("TestPodConditionSucceededWithResources", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
pod := strToUnstructured(`
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: test8
|
||||
spec:
|
||||
nodeName: minikube
|
||||
containers:
|
||||
- name: container
|
||||
resources:
|
||||
requests:
|
||||
cpu: "50m"
|
||||
memory: "64Mi"
|
||||
limits:
|
||||
cpu: "250m"
|
||||
memory: "256Mi"
|
||||
status:
|
||||
phase: Succeeded
|
||||
containerStatuses:
|
||||
- ready: false
|
||||
restartCount: 0
|
||||
state:
|
||||
terminated:
|
||||
reason: Completed
|
||||
exitCode: 0
|
||||
`)
|
||||
info := &ResourceInfo{}
|
||||
populateNodeInfo(pod, info, []string{})
|
||||
assert.Equal(t, []v1alpha1.InfoItem{
|
||||
{Name: "Status Reason", Value: "Completed"},
|
||||
{Name: "Node", Value: "minikube"},
|
||||
{Name: "Containers", Value: "0/1"},
|
||||
}, info.Info)
|
||||
})
|
||||
|
||||
// Test pod condition failed
|
||||
t.Run("TestPodConditionFailed", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
@@ -763,6 +889,46 @@ func TestGetPodInfo(t *testing.T) {
|
||||
}, info.Info)
|
||||
})
|
||||
|
||||
// Test pod condition failed with allocated resources
|
||||
|
||||
t.Run("TestPodConditionFailedWithResources", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
pod := strToUnstructured(`
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: test9
|
||||
spec:
|
||||
nodeName: minikube
|
||||
containers:
|
||||
- name: container
|
||||
resources:
|
||||
requests:
|
||||
cpu: "50m"
|
||||
memory: "64Mi"
|
||||
limits:
|
||||
cpu: "250m"
|
||||
memory: "256Mi"
|
||||
status:
|
||||
phase: Failed
|
||||
containerStatuses:
|
||||
- ready: false
|
||||
restartCount: 0
|
||||
state:
|
||||
terminated:
|
||||
reason: Error
|
||||
exitCode: 1
|
||||
`)
|
||||
info := &ResourceInfo{}
|
||||
populateNodeInfo(pod, info, []string{})
|
||||
assert.Equal(t, []v1alpha1.InfoItem{
|
||||
{Name: "Status Reason", Value: "Error"},
|
||||
{Name: "Node", Value: "minikube"},
|
||||
{Name: "Containers", Value: "0/1"},
|
||||
}, info.Info)
|
||||
})
|
||||
|
||||
// Test pod condition succeed with deletion
|
||||
t.Run("TestPodConditionSucceededWithDeletion", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
@@ -824,6 +990,8 @@ func TestGetPodInfo(t *testing.T) {
|
||||
{Name: "Status Reason", Value: "Terminating"},
|
||||
{Name: "Node", Value: "minikube"},
|
||||
{Name: "Containers", Value: "0/1"},
|
||||
{Name: common.PodRequestsCPU, Value: "0"},
|
||||
{Name: common.PodRequestsMEM, Value: "0"},
|
||||
}, info.Info)
|
||||
})
|
||||
|
||||
@@ -850,6 +1018,8 @@ func TestGetPodInfo(t *testing.T) {
|
||||
{Name: "Status Reason", Value: "Terminating"},
|
||||
{Name: "Node", Value: "minikube"},
|
||||
{Name: "Containers", Value: "0/1"},
|
||||
{Name: common.PodRequestsCPU, Value: "0"},
|
||||
{Name: common.PodRequestsMEM, Value: "0"},
|
||||
}, info.Info)
|
||||
})
|
||||
|
||||
@@ -880,6 +1050,8 @@ func TestGetPodInfo(t *testing.T) {
|
||||
{Name: "Status Reason", Value: "SchedulingGated"},
|
||||
{Name: "Node", Value: "minikube"},
|
||||
{Name: "Containers", Value: "0/2"},
|
||||
{Name: common.PodRequestsCPU, Value: "0"},
|
||||
{Name: common.PodRequestsMEM, Value: "0"},
|
||||
}, info.Info)
|
||||
})
|
||||
}
|
||||
|
||||
63
controller/cache/mocks/LiveStateCache.go
generated
vendored
63
controller/cache/mocks/LiveStateCache.go
generated
vendored
@@ -471,69 +471,6 @@ func (_c *LiveStateCache_IsNamespaced_Call) RunAndReturn(run func(server *v1alph
|
||||
return _c
|
||||
}
|
||||
|
||||
// IterateHierarchy provides a mock function for the type LiveStateCache
|
||||
func (_mock *LiveStateCache) IterateHierarchy(server *v1alpha1.Cluster, key kube.ResourceKey, action func(child v1alpha1.ResourceNode, appName string) bool) error {
|
||||
ret := _mock.Called(server, key, action)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for IterateHierarchy")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if returnFunc, ok := ret.Get(0).(func(*v1alpha1.Cluster, kube.ResourceKey, func(child v1alpha1.ResourceNode, appName string) bool) error); ok {
|
||||
r0 = returnFunc(server, key, action)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
return r0
|
||||
}
|
||||
|
||||
// LiveStateCache_IterateHierarchy_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'IterateHierarchy'
|
||||
type LiveStateCache_IterateHierarchy_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// IterateHierarchy is a helper method to define mock.On call
|
||||
// - server *v1alpha1.Cluster
|
||||
// - key kube.ResourceKey
|
||||
// - action func(child v1alpha1.ResourceNode, appName string) bool
|
||||
func (_e *LiveStateCache_Expecter) IterateHierarchy(server interface{}, key interface{}, action interface{}) *LiveStateCache_IterateHierarchy_Call {
|
||||
return &LiveStateCache_IterateHierarchy_Call{Call: _e.mock.On("IterateHierarchy", server, key, action)}
|
||||
}
|
||||
|
||||
func (_c *LiveStateCache_IterateHierarchy_Call) Run(run func(server *v1alpha1.Cluster, key kube.ResourceKey, action func(child v1alpha1.ResourceNode, appName string) bool)) *LiveStateCache_IterateHierarchy_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
var arg0 *v1alpha1.Cluster
|
||||
if args[0] != nil {
|
||||
arg0 = args[0].(*v1alpha1.Cluster)
|
||||
}
|
||||
var arg1 kube.ResourceKey
|
||||
if args[1] != nil {
|
||||
arg1 = args[1].(kube.ResourceKey)
|
||||
}
|
||||
var arg2 func(child v1alpha1.ResourceNode, appName string) bool
|
||||
if args[2] != nil {
|
||||
arg2 = args[2].(func(child v1alpha1.ResourceNode, appName string) bool)
|
||||
}
|
||||
run(
|
||||
arg0,
|
||||
arg1,
|
||||
arg2,
|
||||
)
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *LiveStateCache_IterateHierarchy_Call) Return(err error) *LiveStateCache_IterateHierarchy_Call {
|
||||
_c.Call.Return(err)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *LiveStateCache_IterateHierarchy_Call) RunAndReturn(run func(server *v1alpha1.Cluster, key kube.ResourceKey, action func(child v1alpha1.ResourceNode, appName string) bool) error) *LiveStateCache_IterateHierarchy_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// IterateHierarchyV2 provides a mock function for the type LiveStateCache
|
||||
func (_mock *LiveStateCache) IterateHierarchyV2(server *v1alpha1.Cluster, keys []kube.ResourceKey, action func(child v1alpha1.ResourceNode, appName string) bool) error {
|
||||
ret := _mock.Called(server, keys, action)
|
||||
|
||||
@@ -27,10 +27,9 @@ func setApplicationHealth(resources []managedResource, statuses []appv1.Resource
|
||||
if res.Target != nil && hookutil.Skip(res.Target) {
|
||||
continue
|
||||
}
|
||||
if res.Target != nil && res.Target.GetAnnotations() != nil && res.Target.GetAnnotations()[common.AnnotationIgnoreHealthCheck] == "true" {
|
||||
if res.Live != nil && res.Live.GetAnnotations() != nil && res.Live.GetAnnotations()[common.AnnotationIgnoreHealthCheck] == "true" {
|
||||
continue
|
||||
}
|
||||
|
||||
if res.Live != nil && (hookutil.IsHook(res.Live) || ignore.Ignore(res.Live)) {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -82,7 +82,7 @@ func TestSetApplicationHealth(t *testing.T) {
|
||||
// The app is considered healthy
|
||||
failedJob.SetAnnotations(nil)
|
||||
failedJobIgnoreHealthcheck := resourceFromFile("./testdata/job-failed-ignore-healthcheck.yaml")
|
||||
resources[1].Target = &failedJobIgnoreHealthcheck
|
||||
resources[1].Live = &failedJobIgnoreHealthcheck
|
||||
healthStatus, err = setApplicationHealth(resources, resourceStatuses, nil, app, true)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, health.HealthStatusHealthy, healthStatus)
|
||||
|
||||
@@ -51,7 +51,7 @@ func (ctrl *ApplicationController) executePostDeleteHooks(app *v1alpha1.Applicat
|
||||
revisions = append(revisions, src.TargetRevision)
|
||||
}
|
||||
|
||||
targets, _, _, err := ctrl.appStateManager.GetRepoObjs(app, app.Spec.GetSources(), appLabelKey, revisions, false, false, false, proj, false, true)
|
||||
targets, _, _, err := ctrl.appStateManager.GetRepoObjs(app, app.Spec.GetSources(), appLabelKey, revisions, false, false, false, proj, true)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
@@ -11,12 +11,16 @@ import (
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
||||
commitclient "github.com/argoproj/argo-cd/v3/commitserver/apiclient"
|
||||
"github.com/argoproj/argo-cd/v3/controller/hydrator/types"
|
||||
appv1 "github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v3/reposerver/apiclient"
|
||||
applog "github.com/argoproj/argo-cd/v3/util/app/log"
|
||||
"github.com/argoproj/argo-cd/v3/util/git"
|
||||
utilio "github.com/argoproj/argo-cd/v3/util/io"
|
||||
)
|
||||
|
||||
// RepoGetter is an interface that defines methods for getting repository objects. It's a subset of the DB interface to
|
||||
// avoid granting access to things we don't need.
|
||||
type RepoGetter interface {
|
||||
// GetRepository returns a repository by its URL and project name.
|
||||
GetRepository(ctx context.Context, repoURL, project string) (*appv1.Repository, error)
|
||||
@@ -28,16 +32,37 @@ type RepoGetter interface {
|
||||
type Dependencies interface {
|
||||
// TODO: determine if we actually need to get the app, or if all the stuff we need the app for is done already on
|
||||
// the app controller side.
|
||||
|
||||
// GetProcessableAppProj returns the AppProject for the given application. It should only return projects that are
|
||||
// processable by the controller, meaning that the project is not deleted and the application is in a namespace
|
||||
// permitted by the project.
|
||||
GetProcessableAppProj(app *appv1.Application) (*appv1.AppProject, error)
|
||||
|
||||
// GetProcessableApps returns a list of applications that are processable by the controller.
|
||||
GetProcessableApps() (*appv1.ApplicationList, error)
|
||||
|
||||
// GetRepoObjs returns the repository objects for the given application, source, and revision. It calls the repo-
|
||||
// server and gets the manifests (objects).
|
||||
GetRepoObjs(app *appv1.Application, source appv1.ApplicationSource, revision string, project *appv1.AppProject) ([]*unstructured.Unstructured, *apiclient.ManifestResponse, error)
|
||||
|
||||
// GetWriteCredentials returns the repository credentials for the given repository URL and project. These are to be
|
||||
// sent to the commit server to write the hydrated manifests.
|
||||
GetWriteCredentials(ctx context.Context, repoURL string, project string) (*appv1.Repository, error)
|
||||
|
||||
// RequestAppRefresh requests a refresh of the application with the given name and namespace. This is used to
|
||||
// trigger a refresh after the application has been hydrated and a new commit has been pushed.
|
||||
RequestAppRefresh(appName string, appNamespace string) error
|
||||
// TODO: only allow access to the hydrator status
|
||||
|
||||
// PersistAppHydratorStatus persists the application status for the source hydrator.
|
||||
PersistAppHydratorStatus(orig *appv1.Application, newStatus *appv1.SourceHydratorStatus)
|
||||
AddHydrationQueueItem(key HydrationQueueKey)
|
||||
|
||||
// AddHydrationQueueItem adds a hydration queue item to the queue. This is used to trigger the hydration process for
|
||||
// a group of applications which are hydrating to the same repo and target branch.
|
||||
AddHydrationQueueItem(key types.HydrationQueueKey)
|
||||
}
|
||||
|
||||
// Hydrator is the main struct that implements the hydration logic. It uses the Dependencies interface to access the
|
||||
// app controller's functionality without directly depending on it.
|
||||
type Hydrator struct {
|
||||
dependencies Dependencies
|
||||
statusRefreshTimeout time.Duration
|
||||
@@ -46,6 +71,9 @@ type Hydrator struct {
|
||||
repoGetter RepoGetter
|
||||
}
|
||||
|
||||
// NewHydrator creates a new Hydrator instance with the given dependencies, status refresh timeout, commit clientset,
|
||||
// repo clientset, and repo getter. The refresh timeout determines how often the hydrator checks if an application
|
||||
// needs to be hydrated.
|
||||
func NewHydrator(dependencies Dependencies, statusRefreshTimeout time.Duration, commitClientset commitclient.Clientset, repoClientset apiclient.Clientset, repoGetter RepoGetter) *Hydrator {
|
||||
return &Hydrator{
|
||||
dependencies: dependencies,
|
||||
@@ -56,6 +84,12 @@ func NewHydrator(dependencies Dependencies, statusRefreshTimeout time.Duration,
|
||||
}
|
||||
}
|
||||
|
||||
// ProcessAppHydrateQueueItem processes an application hydrate queue item. It checks if the application needs hydration
|
||||
// and if so, it updates the application's status to indicate that hydration is in progress. It then adds the
|
||||
// hydration queue item to the queue for further processing.
|
||||
//
|
||||
// It's likely that multiple applications will trigger hydration at the same time. The hydration queue key is meant to
|
||||
// dedupe these requests.
|
||||
func (h *Hydrator) ProcessAppHydrateQueueItem(origApp *appv1.Application) {
|
||||
origApp = origApp.DeepCopy()
|
||||
app := origApp.DeepCopy()
|
||||
@@ -89,38 +123,24 @@ func (h *Hydrator) ProcessAppHydrateQueueItem(origApp *appv1.Application) {
|
||||
logCtx.Debug("Successfully processed app hydrate queue item")
|
||||
}
|
||||
|
||||
func getHydrationQueueKey(app *appv1.Application) HydrationQueueKey {
|
||||
func getHydrationQueueKey(app *appv1.Application) types.HydrationQueueKey {
|
||||
destinationBranch := app.Spec.SourceHydrator.SyncSource.TargetBranch
|
||||
if app.Spec.SourceHydrator.HydrateTo != nil {
|
||||
destinationBranch = app.Spec.SourceHydrator.HydrateTo.TargetBranch
|
||||
}
|
||||
key := HydrationQueueKey{
|
||||
SourceRepoURL: app.Spec.SourceHydrator.DrySource.RepoURL,
|
||||
key := types.HydrationQueueKey{
|
||||
SourceRepoURL: git.NormalizeGitURLAllowInvalid(app.Spec.SourceHydrator.DrySource.RepoURL),
|
||||
SourceTargetRevision: app.Spec.SourceHydrator.DrySource.TargetRevision,
|
||||
DestinationBranch: destinationBranch,
|
||||
}
|
||||
return key
|
||||
}
|
||||
|
||||
type HydrationQueueKey struct {
|
||||
SourceRepoURL string
|
||||
SourceTargetRevision string
|
||||
DestinationBranch string
|
||||
}
|
||||
|
||||
// uniqueHydrationDestination is used to detect duplicate hydrate destinations.
|
||||
type uniqueHydrationDestination struct {
|
||||
//nolint:unused // used as part of a map key
|
||||
sourceRepoURL string
|
||||
//nolint:unused // used as part of a map key
|
||||
sourceTargetRevision string
|
||||
//nolint:unused // used as part of a map key
|
||||
destinationBranch string
|
||||
//nolint:unused // used as part of a map key
|
||||
destinationPath string
|
||||
}
|
||||
|
||||
func (h *Hydrator) ProcessHydrationQueueItem(hydrationKey HydrationQueueKey) (processNext bool) {
|
||||
// ProcessHydrationQueueItem processes a hydration queue item. It retrieves the relevant applications for the given
|
||||
// hydration key, hydrates their latest commit, and updates their status accordingly. If the hydration fails, it marks
|
||||
// the operation as failed and logs the error. If successful, it updates the operation to indicate that hydration was
|
||||
// successful and requests a refresh of the applications to pick up the new hydrated commit.
|
||||
func (h *Hydrator) ProcessHydrationQueueItem(hydrationKey types.HydrationQueueKey) (processNext bool) {
|
||||
logCtx := log.WithFields(log.Fields{
|
||||
"sourceRepoURL": hydrationKey.SourceRepoURL,
|
||||
"sourceTargetRevision": hydrationKey.SourceTargetRevision,
|
||||
@@ -177,7 +197,7 @@ func (h *Hydrator) ProcessHydrationQueueItem(hydrationKey HydrationQueueKey) (pr
|
||||
return
|
||||
}
|
||||
|
||||
func (h *Hydrator) hydrateAppsLatestCommit(logCtx *log.Entry, hydrationKey HydrationQueueKey) ([]*appv1.Application, string, string, error) {
|
||||
func (h *Hydrator) hydrateAppsLatestCommit(logCtx *log.Entry, hydrationKey types.HydrationQueueKey) ([]*appv1.Application, string, string, error) {
|
||||
relevantApps, err := h.getRelevantAppsForHydration(logCtx, hydrationKey)
|
||||
if err != nil {
|
||||
return nil, "", "", fmt.Errorf("failed to get relevant apps for hydration: %w", err)
|
||||
@@ -191,7 +211,7 @@ func (h *Hydrator) hydrateAppsLatestCommit(logCtx *log.Entry, hydrationKey Hydra
|
||||
return relevantApps, dryRevision, hydratedRevision, nil
|
||||
}
|
||||
|
||||
func (h *Hydrator) getRelevantAppsForHydration(logCtx *log.Entry, hydrationKey HydrationQueueKey) ([]*appv1.Application, error) {
|
||||
func (h *Hydrator) getRelevantAppsForHydration(logCtx *log.Entry, hydrationKey types.HydrationQueueKey) ([]*appv1.Application, error) {
|
||||
// Get all apps
|
||||
apps, err := h.dependencies.GetProcessableApps()
|
||||
if err != nil {
|
||||
@@ -199,13 +219,13 @@ func (h *Hydrator) getRelevantAppsForHydration(logCtx *log.Entry, hydrationKey H
|
||||
}
|
||||
|
||||
var relevantApps []*appv1.Application
|
||||
uniqueDestinations := make(map[uniqueHydrationDestination]bool, len(apps.Items))
|
||||
uniquePaths := make(map[string]bool, len(apps.Items))
|
||||
for _, app := range apps.Items {
|
||||
if app.Spec.SourceHydrator == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if app.Spec.SourceHydrator.DrySource.RepoURL != hydrationKey.SourceRepoURL ||
|
||||
if !git.SameURL(app.Spec.SourceHydrator.DrySource.RepoURL, hydrationKey.SourceRepoURL) ||
|
||||
app.Spec.SourceHydrator.DrySource.TargetRevision != hydrationKey.SourceTargetRevision {
|
||||
continue
|
||||
}
|
||||
@@ -229,17 +249,12 @@ func (h *Hydrator) getRelevantAppsForHydration(logCtx *log.Entry, hydrationKey H
|
||||
continue
|
||||
}
|
||||
|
||||
uniqueDestinationKey := uniqueHydrationDestination{
|
||||
sourceRepoURL: app.Spec.SourceHydrator.DrySource.RepoURL,
|
||||
sourceTargetRevision: app.Spec.SourceHydrator.DrySource.TargetRevision,
|
||||
destinationBranch: destinationBranch,
|
||||
destinationPath: app.Spec.SourceHydrator.SyncSource.Path,
|
||||
}
|
||||
// TODO: test the dupe detection
|
||||
if _, ok := uniqueDestinations[uniqueDestinationKey]; ok {
|
||||
return nil, fmt.Errorf("multiple app hydrators use the same destination: %v", uniqueDestinationKey)
|
||||
// TODO: normalize the path to avoid "path/.." from being treated as different from "."
|
||||
if _, ok := uniquePaths[app.Spec.SourceHydrator.SyncSource.Path]; ok {
|
||||
return nil, fmt.Errorf("multiple app hydrators use the same destination: %v", app.Spec.SourceHydrator.SyncSource.Path)
|
||||
}
|
||||
uniqueDestinations[uniqueDestinationKey] = true
|
||||
uniquePaths[app.Spec.SourceHydrator.SyncSource.Path] = true
|
||||
|
||||
relevantApps = append(relevantApps, &app)
|
||||
}
|
||||
|
||||
@@ -4,9 +4,14 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/controller/hydrator/mocks"
|
||||
"github.com/argoproj/argo-cd/v3/controller/hydrator/types"
|
||||
"github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
|
||||
)
|
||||
|
||||
@@ -101,3 +106,64 @@ func Test_appNeedsHydration(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_getRelevantAppsForHydration_RepoURLNormalization(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
d := mocks.NewDependencies(t)
|
||||
d.On("GetProcessableApps").Return(&v1alpha1.ApplicationList{
|
||||
Items: []v1alpha1.Application{
|
||||
{
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "project",
|
||||
SourceHydrator: &v1alpha1.SourceHydrator{
|
||||
DrySource: v1alpha1.DrySource{
|
||||
RepoURL: "https://example.com/repo.git",
|
||||
TargetRevision: "main",
|
||||
Path: "app1",
|
||||
},
|
||||
SyncSource: v1alpha1.SyncSource{
|
||||
TargetBranch: "main",
|
||||
Path: "app1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "project",
|
||||
SourceHydrator: &v1alpha1.SourceHydrator{
|
||||
DrySource: v1alpha1.DrySource{
|
||||
RepoURL: "https://example.com/repo",
|
||||
TargetRevision: "main",
|
||||
Path: "app2",
|
||||
},
|
||||
SyncSource: v1alpha1.SyncSource{
|
||||
TargetBranch: "main",
|
||||
Path: "app2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, nil)
|
||||
d.On("GetProcessableAppProj", mock.Anything).Return(&v1alpha1.AppProject{
|
||||
Spec: v1alpha1.AppProjectSpec{
|
||||
SourceRepos: []string{"https://example.com/*"},
|
||||
},
|
||||
}, nil)
|
||||
|
||||
hydrator := &Hydrator{dependencies: d}
|
||||
|
||||
hydrationKey := types.HydrationQueueKey{
|
||||
SourceRepoURL: "https://example.com/repo",
|
||||
SourceTargetRevision: "main",
|
||||
DestinationBranch: "main",
|
||||
}
|
||||
|
||||
logCtx := log.WithField("test", "RepoURLNormalization")
|
||||
relevantApps, err := hydrator.getRelevantAppsForHydration(logCtx, hydrationKey)
|
||||
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, relevantApps, 2, "Expected both apps to be considered relevant despite URL differences")
|
||||
}
|
||||
|
||||
464
controller/hydrator/mocks/Dependencies.go
generated
Normal file
464
controller/hydrator/mocks/Dependencies.go
generated
Normal file
@@ -0,0 +1,464 @@
|
||||
// Code generated by mockery; DO NOT EDIT.
|
||||
// github.com/vektra/mockery
|
||||
// template: testify
|
||||
|
||||
package mocks
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/controller/hydrator/types"
|
||||
"github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v3/reposerver/apiclient"
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
)
|
||||
|
||||
// NewDependencies creates a new instance of Dependencies. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
// The first argument is typically a *testing.T value.
|
||||
func NewDependencies(t interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}) *Dependencies {
|
||||
mock := &Dependencies{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
||||
|
||||
// Dependencies is an autogenerated mock type for the Dependencies type
|
||||
type Dependencies struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
type Dependencies_Expecter struct {
|
||||
mock *mock.Mock
|
||||
}
|
||||
|
||||
func (_m *Dependencies) EXPECT() *Dependencies_Expecter {
|
||||
return &Dependencies_Expecter{mock: &_m.Mock}
|
||||
}
|
||||
|
||||
// AddHydrationQueueItem provides a mock function for the type Dependencies
|
||||
func (_mock *Dependencies) AddHydrationQueueItem(key types.HydrationQueueKey) {
|
||||
_mock.Called(key)
|
||||
return
|
||||
}
|
||||
|
||||
// Dependencies_AddHydrationQueueItem_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AddHydrationQueueItem'
|
||||
type Dependencies_AddHydrationQueueItem_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// AddHydrationQueueItem is a helper method to define mock.On call
|
||||
// - key types.HydrationQueueKey
|
||||
func (_e *Dependencies_Expecter) AddHydrationQueueItem(key interface{}) *Dependencies_AddHydrationQueueItem_Call {
|
||||
return &Dependencies_AddHydrationQueueItem_Call{Call: _e.mock.On("AddHydrationQueueItem", key)}
|
||||
}
|
||||
|
||||
func (_c *Dependencies_AddHydrationQueueItem_Call) Run(run func(key types.HydrationQueueKey)) *Dependencies_AddHydrationQueueItem_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
var arg0 types.HydrationQueueKey
|
||||
if args[0] != nil {
|
||||
arg0 = args[0].(types.HydrationQueueKey)
|
||||
}
|
||||
run(
|
||||
arg0,
|
||||
)
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *Dependencies_AddHydrationQueueItem_Call) Return() *Dependencies_AddHydrationQueueItem_Call {
|
||||
_c.Call.Return()
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *Dependencies_AddHydrationQueueItem_Call) RunAndReturn(run func(key types.HydrationQueueKey)) *Dependencies_AddHydrationQueueItem_Call {
|
||||
_c.Run(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// GetProcessableAppProj provides a mock function for the type Dependencies
|
||||
func (_mock *Dependencies) GetProcessableAppProj(app *v1alpha1.Application) (*v1alpha1.AppProject, error) {
|
||||
ret := _mock.Called(app)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for GetProcessableAppProj")
|
||||
}
|
||||
|
||||
var r0 *v1alpha1.AppProject
|
||||
var r1 error
|
||||
if returnFunc, ok := ret.Get(0).(func(*v1alpha1.Application) (*v1alpha1.AppProject, error)); ok {
|
||||
return returnFunc(app)
|
||||
}
|
||||
if returnFunc, ok := ret.Get(0).(func(*v1alpha1.Application) *v1alpha1.AppProject); ok {
|
||||
r0 = returnFunc(app)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*v1alpha1.AppProject)
|
||||
}
|
||||
}
|
||||
if returnFunc, ok := ret.Get(1).(func(*v1alpha1.Application) error); ok {
|
||||
r1 = returnFunc(app)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// Dependencies_GetProcessableAppProj_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetProcessableAppProj'
|
||||
type Dependencies_GetProcessableAppProj_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// GetProcessableAppProj is a helper method to define mock.On call
|
||||
// - app *v1alpha1.Application
|
||||
func (_e *Dependencies_Expecter) GetProcessableAppProj(app interface{}) *Dependencies_GetProcessableAppProj_Call {
|
||||
return &Dependencies_GetProcessableAppProj_Call{Call: _e.mock.On("GetProcessableAppProj", app)}
|
||||
}
|
||||
|
||||
func (_c *Dependencies_GetProcessableAppProj_Call) Run(run func(app *v1alpha1.Application)) *Dependencies_GetProcessableAppProj_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
var arg0 *v1alpha1.Application
|
||||
if args[0] != nil {
|
||||
arg0 = args[0].(*v1alpha1.Application)
|
||||
}
|
||||
run(
|
||||
arg0,
|
||||
)
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *Dependencies_GetProcessableAppProj_Call) Return(appProject *v1alpha1.AppProject, err error) *Dependencies_GetProcessableAppProj_Call {
|
||||
_c.Call.Return(appProject, err)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *Dependencies_GetProcessableAppProj_Call) RunAndReturn(run func(app *v1alpha1.Application) (*v1alpha1.AppProject, error)) *Dependencies_GetProcessableAppProj_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// GetProcessableApps provides a mock function for the type Dependencies
|
||||
func (_mock *Dependencies) GetProcessableApps() (*v1alpha1.ApplicationList, error) {
|
||||
ret := _mock.Called()
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for GetProcessableApps")
|
||||
}
|
||||
|
||||
var r0 *v1alpha1.ApplicationList
|
||||
var r1 error
|
||||
if returnFunc, ok := ret.Get(0).(func() (*v1alpha1.ApplicationList, error)); ok {
|
||||
return returnFunc()
|
||||
}
|
||||
if returnFunc, ok := ret.Get(0).(func() *v1alpha1.ApplicationList); ok {
|
||||
r0 = returnFunc()
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*v1alpha1.ApplicationList)
|
||||
}
|
||||
}
|
||||
if returnFunc, ok := ret.Get(1).(func() error); ok {
|
||||
r1 = returnFunc()
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// Dependencies_GetProcessableApps_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetProcessableApps'
|
||||
type Dependencies_GetProcessableApps_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// GetProcessableApps is a helper method to define mock.On call
|
||||
func (_e *Dependencies_Expecter) GetProcessableApps() *Dependencies_GetProcessableApps_Call {
|
||||
return &Dependencies_GetProcessableApps_Call{Call: _e.mock.On("GetProcessableApps")}
|
||||
}
|
||||
|
||||
func (_c *Dependencies_GetProcessableApps_Call) Run(run func()) *Dependencies_GetProcessableApps_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run()
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *Dependencies_GetProcessableApps_Call) Return(applicationList *v1alpha1.ApplicationList, err error) *Dependencies_GetProcessableApps_Call {
|
||||
_c.Call.Return(applicationList, err)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *Dependencies_GetProcessableApps_Call) RunAndReturn(run func() (*v1alpha1.ApplicationList, error)) *Dependencies_GetProcessableApps_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// GetRepoObjs provides a mock function for the type Dependencies
|
||||
func (_mock *Dependencies) GetRepoObjs(app *v1alpha1.Application, source v1alpha1.ApplicationSource, revision string, project *v1alpha1.AppProject) ([]*unstructured.Unstructured, *apiclient.ManifestResponse, error) {
|
||||
ret := _mock.Called(app, source, revision, project)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for GetRepoObjs")
|
||||
}
|
||||
|
||||
var r0 []*unstructured.Unstructured
|
||||
var r1 *apiclient.ManifestResponse
|
||||
var r2 error
|
||||
if returnFunc, ok := ret.Get(0).(func(*v1alpha1.Application, v1alpha1.ApplicationSource, string, *v1alpha1.AppProject) ([]*unstructured.Unstructured, *apiclient.ManifestResponse, error)); ok {
|
||||
return returnFunc(app, source, revision, project)
|
||||
}
|
||||
if returnFunc, ok := ret.Get(0).(func(*v1alpha1.Application, v1alpha1.ApplicationSource, string, *v1alpha1.AppProject) []*unstructured.Unstructured); ok {
|
||||
r0 = returnFunc(app, source, revision, project)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).([]*unstructured.Unstructured)
|
||||
}
|
||||
}
|
||||
if returnFunc, ok := ret.Get(1).(func(*v1alpha1.Application, v1alpha1.ApplicationSource, string, *v1alpha1.AppProject) *apiclient.ManifestResponse); ok {
|
||||
r1 = returnFunc(app, source, revision, project)
|
||||
} else {
|
||||
if ret.Get(1) != nil {
|
||||
r1 = ret.Get(1).(*apiclient.ManifestResponse)
|
||||
}
|
||||
}
|
||||
if returnFunc, ok := ret.Get(2).(func(*v1alpha1.Application, v1alpha1.ApplicationSource, string, *v1alpha1.AppProject) error); ok {
|
||||
r2 = returnFunc(app, source, revision, project)
|
||||
} else {
|
||||
r2 = ret.Error(2)
|
||||
}
|
||||
return r0, r1, r2
|
||||
}
|
||||
|
||||
// Dependencies_GetRepoObjs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetRepoObjs'
|
||||
type Dependencies_GetRepoObjs_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// GetRepoObjs is a helper method to define mock.On call
|
||||
// - app *v1alpha1.Application
|
||||
// - source v1alpha1.ApplicationSource
|
||||
// - revision string
|
||||
// - project *v1alpha1.AppProject
|
||||
func (_e *Dependencies_Expecter) GetRepoObjs(app interface{}, source interface{}, revision interface{}, project interface{}) *Dependencies_GetRepoObjs_Call {
|
||||
return &Dependencies_GetRepoObjs_Call{Call: _e.mock.On("GetRepoObjs", app, source, revision, project)}
|
||||
}
|
||||
|
||||
func (_c *Dependencies_GetRepoObjs_Call) Run(run func(app *v1alpha1.Application, source v1alpha1.ApplicationSource, revision string, project *v1alpha1.AppProject)) *Dependencies_GetRepoObjs_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
var arg0 *v1alpha1.Application
|
||||
if args[0] != nil {
|
||||
arg0 = args[0].(*v1alpha1.Application)
|
||||
}
|
||||
var arg1 v1alpha1.ApplicationSource
|
||||
if args[1] != nil {
|
||||
arg1 = args[1].(v1alpha1.ApplicationSource)
|
||||
}
|
||||
var arg2 string
|
||||
if args[2] != nil {
|
||||
arg2 = args[2].(string)
|
||||
}
|
||||
var arg3 *v1alpha1.AppProject
|
||||
if args[3] != nil {
|
||||
arg3 = args[3].(*v1alpha1.AppProject)
|
||||
}
|
||||
run(
|
||||
arg0,
|
||||
arg1,
|
||||
arg2,
|
||||
arg3,
|
||||
)
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *Dependencies_GetRepoObjs_Call) Return(unstructureds []*unstructured.Unstructured, manifestResponse *apiclient.ManifestResponse, err error) *Dependencies_GetRepoObjs_Call {
|
||||
_c.Call.Return(unstructureds, manifestResponse, err)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *Dependencies_GetRepoObjs_Call) RunAndReturn(run func(app *v1alpha1.Application, source v1alpha1.ApplicationSource, revision string, project *v1alpha1.AppProject) ([]*unstructured.Unstructured, *apiclient.ManifestResponse, error)) *Dependencies_GetRepoObjs_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// GetWriteCredentials provides a mock function for the type Dependencies
|
||||
func (_mock *Dependencies) GetWriteCredentials(ctx context.Context, repoURL string, project string) (*v1alpha1.Repository, error) {
|
||||
ret := _mock.Called(ctx, repoURL, project)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for GetWriteCredentials")
|
||||
}
|
||||
|
||||
var r0 *v1alpha1.Repository
|
||||
var r1 error
|
||||
if returnFunc, ok := ret.Get(0).(func(context.Context, string, string) (*v1alpha1.Repository, error)); ok {
|
||||
return returnFunc(ctx, repoURL, project)
|
||||
}
|
||||
if returnFunc, ok := ret.Get(0).(func(context.Context, string, string) *v1alpha1.Repository); ok {
|
||||
r0 = returnFunc(ctx, repoURL, project)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*v1alpha1.Repository)
|
||||
}
|
||||
}
|
||||
if returnFunc, ok := ret.Get(1).(func(context.Context, string, string) error); ok {
|
||||
r1 = returnFunc(ctx, repoURL, project)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// Dependencies_GetWriteCredentials_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetWriteCredentials'
|
||||
type Dependencies_GetWriteCredentials_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// GetWriteCredentials is a helper method to define mock.On call
|
||||
// - ctx context.Context
|
||||
// - repoURL string
|
||||
// - project string
|
||||
func (_e *Dependencies_Expecter) GetWriteCredentials(ctx interface{}, repoURL interface{}, project interface{}) *Dependencies_GetWriteCredentials_Call {
|
||||
return &Dependencies_GetWriteCredentials_Call{Call: _e.mock.On("GetWriteCredentials", ctx, repoURL, project)}
|
||||
}
|
||||
|
||||
func (_c *Dependencies_GetWriteCredentials_Call) Run(run func(ctx context.Context, repoURL string, project string)) *Dependencies_GetWriteCredentials_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
var arg0 context.Context
|
||||
if args[0] != nil {
|
||||
arg0 = args[0].(context.Context)
|
||||
}
|
||||
var arg1 string
|
||||
if args[1] != nil {
|
||||
arg1 = args[1].(string)
|
||||
}
|
||||
var arg2 string
|
||||
if args[2] != nil {
|
||||
arg2 = args[2].(string)
|
||||
}
|
||||
run(
|
||||
arg0,
|
||||
arg1,
|
||||
arg2,
|
||||
)
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *Dependencies_GetWriteCredentials_Call) Return(repository *v1alpha1.Repository, err error) *Dependencies_GetWriteCredentials_Call {
|
||||
_c.Call.Return(repository, err)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *Dependencies_GetWriteCredentials_Call) RunAndReturn(run func(ctx context.Context, repoURL string, project string) (*v1alpha1.Repository, error)) *Dependencies_GetWriteCredentials_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// PersistAppHydratorStatus provides a mock function for the type Dependencies
|
||||
func (_mock *Dependencies) PersistAppHydratorStatus(orig *v1alpha1.Application, newStatus *v1alpha1.SourceHydratorStatus) {
|
||||
_mock.Called(orig, newStatus)
|
||||
return
|
||||
}
|
||||
|
||||
// Dependencies_PersistAppHydratorStatus_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PersistAppHydratorStatus'
|
||||
type Dependencies_PersistAppHydratorStatus_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// PersistAppHydratorStatus is a helper method to define mock.On call
|
||||
// - orig *v1alpha1.Application
|
||||
// - newStatus *v1alpha1.SourceHydratorStatus
|
||||
func (_e *Dependencies_Expecter) PersistAppHydratorStatus(orig interface{}, newStatus interface{}) *Dependencies_PersistAppHydratorStatus_Call {
|
||||
return &Dependencies_PersistAppHydratorStatus_Call{Call: _e.mock.On("PersistAppHydratorStatus", orig, newStatus)}
|
||||
}
|
||||
|
||||
func (_c *Dependencies_PersistAppHydratorStatus_Call) Run(run func(orig *v1alpha1.Application, newStatus *v1alpha1.SourceHydratorStatus)) *Dependencies_PersistAppHydratorStatus_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
var arg0 *v1alpha1.Application
|
||||
if args[0] != nil {
|
||||
arg0 = args[0].(*v1alpha1.Application)
|
||||
}
|
||||
var arg1 *v1alpha1.SourceHydratorStatus
|
||||
if args[1] != nil {
|
||||
arg1 = args[1].(*v1alpha1.SourceHydratorStatus)
|
||||
}
|
||||
run(
|
||||
arg0,
|
||||
arg1,
|
||||
)
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *Dependencies_PersistAppHydratorStatus_Call) Return() *Dependencies_PersistAppHydratorStatus_Call {
|
||||
_c.Call.Return()
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *Dependencies_PersistAppHydratorStatus_Call) RunAndReturn(run func(orig *v1alpha1.Application, newStatus *v1alpha1.SourceHydratorStatus)) *Dependencies_PersistAppHydratorStatus_Call {
|
||||
_c.Run(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// RequestAppRefresh provides a mock function for the type Dependencies
|
||||
func (_mock *Dependencies) RequestAppRefresh(appName string, appNamespace string) error {
|
||||
ret := _mock.Called(appName, appNamespace)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for RequestAppRefresh")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if returnFunc, ok := ret.Get(0).(func(string, string) error); ok {
|
||||
r0 = returnFunc(appName, appNamespace)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
return r0
|
||||
}
|
||||
|
||||
// Dependencies_RequestAppRefresh_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RequestAppRefresh'
|
||||
type Dependencies_RequestAppRefresh_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// RequestAppRefresh is a helper method to define mock.On call
|
||||
// - appName string
|
||||
// - appNamespace string
|
||||
func (_e *Dependencies_Expecter) RequestAppRefresh(appName interface{}, appNamespace interface{}) *Dependencies_RequestAppRefresh_Call {
|
||||
return &Dependencies_RequestAppRefresh_Call{Call: _e.mock.On("RequestAppRefresh", appName, appNamespace)}
|
||||
}
|
||||
|
||||
func (_c *Dependencies_RequestAppRefresh_Call) Run(run func(appName string, appNamespace string)) *Dependencies_RequestAppRefresh_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
var arg0 string
|
||||
if args[0] != nil {
|
||||
arg0 = args[0].(string)
|
||||
}
|
||||
var arg1 string
|
||||
if args[1] != nil {
|
||||
arg1 = args[1].(string)
|
||||
}
|
||||
run(
|
||||
arg0,
|
||||
arg1,
|
||||
)
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *Dependencies_RequestAppRefresh_Call) Return(err error) *Dependencies_RequestAppRefresh_Call {
|
||||
_c.Call.Return(err)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *Dependencies_RequestAppRefresh_Call) RunAndReturn(run func(appName string, appNamespace string) error) *Dependencies_RequestAppRefresh_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
11
controller/hydrator/types/types.go
Normal file
11
controller/hydrator/types/types.go
Normal file
@@ -0,0 +1,11 @@
|
||||
package types
|
||||
|
||||
// HydrationQueueKey is used to uniquely identify a hydration operation in the queue. If several applications request
|
||||
// hydration, but they have the same queue key, only one hydration operation will be performed.
|
||||
type HydrationQueueKey struct {
|
||||
// SourceRepoURL must be normalized with git.NormalizeGitURL to ensure that we don't double-queue a single hydration
|
||||
// operation because two apps have different URL formats.
|
||||
SourceRepoURL string
|
||||
SourceTargetRevision string
|
||||
DestinationBranch string
|
||||
}
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/controller/hydrator"
|
||||
"github.com/argoproj/argo-cd/v3/controller/hydrator/types"
|
||||
appv1 "github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v3/reposerver/apiclient"
|
||||
argoutil "github.com/argoproj/argo-cd/v3/util/argo"
|
||||
@@ -50,10 +50,19 @@ func (ctrl *ApplicationController) GetRepoObjs(origApp *appv1.Application, drySo
|
||||
delete(app.Annotations, appv1.AnnotationKeyManifestGeneratePaths)
|
||||
|
||||
// FIXME: use cache and revision cache
|
||||
objs, resp, _, err := ctrl.appStateManager.GetRepoObjs(app, drySources, appLabelKey, dryRevisions, true, true, false, project, false, false)
|
||||
objs, resp, _, err := ctrl.appStateManager.GetRepoObjs(app, drySources, appLabelKey, dryRevisions, true, true, false, project, false)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to get repo objects: %w", err)
|
||||
}
|
||||
trackingMethod, err := ctrl.settingsMgr.GetTrackingMethod()
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to get tracking method: %w", err)
|
||||
}
|
||||
for _, obj := range objs {
|
||||
if err := argoutil.NewResourceTracking().RemoveAppInstance(obj, trackingMethod); err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to remove the app instance value: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(resp) != 1 {
|
||||
return nil, nil, fmt.Errorf("expected one manifest response, got %d", len(resp))
|
||||
@@ -85,6 +94,6 @@ func (ctrl *ApplicationController) PersistAppHydratorStatus(orig *appv1.Applicat
|
||||
ctrl.persistAppStatus(orig, status)
|
||||
}
|
||||
|
||||
func (ctrl *ApplicationController) AddHydrationQueueItem(key hydrator.HydrationQueueKey) {
|
||||
func (ctrl *ApplicationController) AddHydrationQueueItem(key types.HydrationQueueKey) {
|
||||
ctrl.hydrationQueue.AddRateLimited(key)
|
||||
}
|
||||
|
||||
79
controller/hydrator_dependencies_test.go
Normal file
79
controller/hydrator_dependencies_test.go
Normal file
@@ -0,0 +1,79 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/common"
|
||||
"github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v3/reposerver/apiclient"
|
||||
"github.com/argoproj/argo-cd/v3/test"
|
||||
)
|
||||
|
||||
func TestGetRepoObjs(t *testing.T) {
|
||||
cm := test.NewConfigMap()
|
||||
cm.SetAnnotations(map[string]string{
|
||||
"custom-annotation": "custom-value",
|
||||
common.AnnotationInstallationID: "id", // tracking annotation should be removed
|
||||
common.AnnotationKeyAppInstance: "my-app", // tracking annotation should be removed
|
||||
})
|
||||
cmBytes, _ := json.Marshal(cm)
|
||||
|
||||
app := newFakeApp()
|
||||
// Enable the manifest-generate-paths annotation and set a synced revision
|
||||
app.SetAnnotations(map[string]string{v1alpha1.AnnotationKeyManifestGeneratePaths: "."})
|
||||
app.Status.Sync = v1alpha1.SyncStatus{
|
||||
Revision: "abc123",
|
||||
Status: v1alpha1.SyncStatusCodeSynced,
|
||||
}
|
||||
|
||||
data := fakeData{
|
||||
manifestResponse: &apiclient.ManifestResponse{
|
||||
Manifests: []string{string(cmBytes)},
|
||||
Namespace: test.FakeDestNamespace,
|
||||
Server: test.FakeClusterURL,
|
||||
Revision: "abc123",
|
||||
},
|
||||
}
|
||||
|
||||
ctrl := newFakeControllerWithResync(&data, time.Minute, nil, errors.New("this should not be called"))
|
||||
source := app.Spec.GetSource()
|
||||
source.RepoURL = "oci://example.com/argo/argo-cd"
|
||||
|
||||
objs, resp, err := ctrl.GetRepoObjs(app, source, "abc123", &v1alpha1.AppProject{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "default",
|
||||
Namespace: test.FakeArgoCDNamespace,
|
||||
},
|
||||
Spec: v1alpha1.AppProjectSpec{
|
||||
SourceRepos: []string{"*"},
|
||||
Destinations: []v1alpha1.ApplicationDestination{
|
||||
{
|
||||
Server: "*",
|
||||
Namespace: "*",
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, resp)
|
||||
assert.Equal(t, "abc123", resp.Revision)
|
||||
assert.Len(t, objs, 1)
|
||||
|
||||
annotations := objs[0].GetAnnotations()
|
||||
|
||||
// only the tracking annotations set by Argo CD should be removed
|
||||
// and not the custom annotations set by user
|
||||
require.NotNil(t, annotations)
|
||||
assert.Equal(t, "custom-value", annotations["custom-annotation"])
|
||||
assert.NotContains(t, annotations, common.AnnotationInstallationID)
|
||||
assert.NotContains(t, annotations, common.AnnotationKeyAppInstance)
|
||||
|
||||
assert.Equal(t, "ConfigMap", objs[0].GetKind())
|
||||
}
|
||||
@@ -37,8 +37,7 @@ type Consistent struct {
|
||||
loadMap map[string]*Host
|
||||
totalLoad int64
|
||||
replicationFactor int
|
||||
|
||||
sync.RWMutex
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
type item struct {
|
||||
@@ -68,8 +67,8 @@ func NewWithReplicationFactor(replicationFactor int) *Consistent {
|
||||
}
|
||||
|
||||
func (c *Consistent) Add(server string) {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
if _, ok := c.loadMap[server]; ok {
|
||||
return
|
||||
@@ -87,8 +86,8 @@ func (c *Consistent) Add(server string) {
|
||||
// As described in https://en.wikipedia.org/wiki/Consistent_hashing
|
||||
// It returns ErrNoHosts if the ring has no servers in it.
|
||||
func (c *Consistent) Get(client string) (string, error) {
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
|
||||
if c.clients.Len() == 0 {
|
||||
return "", ErrNoHosts
|
||||
@@ -116,8 +115,8 @@ func (c *Consistent) Get(client string) (string, error) {
|
||||
// https://research.googleblog.com/2017/04/consistent-hashing-with-bounded-loads.html
|
||||
// It returns ErrNoHosts if the ring has no hosts in it.
|
||||
func (c *Consistent) GetLeast(client string) (string, error) {
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
|
||||
if c.clients.Len() == 0 {
|
||||
return "", ErrNoHosts
|
||||
@@ -151,8 +150,8 @@ func (c *Consistent) GetLeast(client string) (string, error) {
|
||||
|
||||
// Sets the load of `server` to the given `load`
|
||||
func (c *Consistent) UpdateLoad(server string, load int64) {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
if _, ok := c.loadMap[server]; !ok {
|
||||
return
|
||||
@@ -166,8 +165,8 @@ func (c *Consistent) UpdateLoad(server string, load int64) {
|
||||
//
|
||||
// should only be used with if you obtained a host with GetLeast
|
||||
func (c *Consistent) Inc(server string) {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
if _, ok := c.loadMap[server]; !ok {
|
||||
return
|
||||
@@ -180,8 +179,8 @@ func (c *Consistent) Inc(server string) {
|
||||
//
|
||||
// should only be used with if you obtained a host with GetLeast
|
||||
func (c *Consistent) Done(server string) {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
if _, ok := c.loadMap[server]; !ok {
|
||||
return
|
||||
@@ -192,8 +191,8 @@ func (c *Consistent) Done(server string) {
|
||||
|
||||
// Deletes host from the ring
|
||||
func (c *Consistent) Remove(server string) bool {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
for i := 0; i < c.replicationFactor; i++ {
|
||||
h := c.hash(fmt.Sprintf("%s%d", server, i))
|
||||
@@ -206,8 +205,8 @@ func (c *Consistent) Remove(server string) bool {
|
||||
|
||||
// Return the list of servers in the ring
|
||||
func (c *Consistent) Servers() (servers []string) {
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
for k := range c.loadMap {
|
||||
servers = append(servers, k)
|
||||
}
|
||||
|
||||
@@ -27,7 +27,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/common"
|
||||
statecache "github.com/argoproj/argo-cd/v3/controller/cache"
|
||||
@@ -71,9 +70,9 @@ type managedResource struct {
|
||||
|
||||
// AppStateManager defines methods which allow to compare application spec and actual application state.
|
||||
type AppStateManager interface {
|
||||
CompareAppState(app *v1alpha1.Application, project *v1alpha1.AppProject, revisions []string, sources []v1alpha1.ApplicationSource, noCache bool, noRevisionCache bool, localObjects []string, hasMultipleSources bool, rollback bool) (*comparisonResult, error)
|
||||
SyncAppState(app *v1alpha1.Application, state *v1alpha1.OperationState)
|
||||
GetRepoObjs(app *v1alpha1.Application, sources []v1alpha1.ApplicationSource, appLabelKey string, revisions []string, noCache, noRevisionCache, verifySignature bool, proj *v1alpha1.AppProject, rollback, sendRuntimeState bool) ([]*unstructured.Unstructured, []*apiclient.ManifestResponse, bool, error)
|
||||
CompareAppState(app *v1alpha1.Application, project *v1alpha1.AppProject, revisions []string, sources []v1alpha1.ApplicationSource, noCache bool, noRevisionCache bool, localObjects []string, hasMultipleSources bool) (*comparisonResult, error)
|
||||
SyncAppState(app *v1alpha1.Application, project *v1alpha1.AppProject, state *v1alpha1.OperationState)
|
||||
GetRepoObjs(app *v1alpha1.Application, sources []v1alpha1.ApplicationSource, appLabelKey string, revisions []string, noCache, noRevisionCache, verifySignature bool, proj *v1alpha1.AppProject, sendRuntimeState bool) ([]*unstructured.Unstructured, []*apiclient.ManifestResponse, bool, error)
|
||||
}
|
||||
|
||||
// comparisonResult holds the state of an application after the reconciliation
|
||||
@@ -91,7 +90,8 @@ type comparisonResult struct {
|
||||
timings map[string]time.Duration
|
||||
diffResultList *diff.DiffResultList
|
||||
hasPostDeleteHooks bool
|
||||
revisionUpdated bool
|
||||
// revisionsMayHaveChanges indicates if there are any possibilities that the revisions contain changes
|
||||
revisionsMayHaveChanges bool
|
||||
}
|
||||
|
||||
func (res *comparisonResult) GetSyncStatus() *v1alpha1.SyncStatus {
|
||||
@@ -108,7 +108,6 @@ type appStateManager struct {
|
||||
db db.ArgoDB
|
||||
settingsMgr *settings.SettingsManager
|
||||
appclientset appclientset.Interface
|
||||
projInformer cache.SharedIndexInformer
|
||||
kubectl kubeutil.Kubectl
|
||||
onKubectlRun kubeutil.OnKubectlRunFunc
|
||||
repoClientset apiclient.Clientset
|
||||
@@ -128,7 +127,7 @@ type appStateManager struct {
|
||||
// task to the repo-server. It returns the list of generated manifests as unstructured
|
||||
// objects. It also returns the full response from all calls to the repo server as the
|
||||
// second argument.
|
||||
func (m *appStateManager) GetRepoObjs(app *v1alpha1.Application, sources []v1alpha1.ApplicationSource, appLabelKey string, revisions []string, noCache, noRevisionCache, verifySignature bool, proj *v1alpha1.AppProject, rollback, sendRuntimeState bool) ([]*unstructured.Unstructured, []*apiclient.ManifestResponse, bool, error) {
|
||||
func (m *appStateManager) GetRepoObjs(app *v1alpha1.Application, sources []v1alpha1.ApplicationSource, appLabelKey string, revisions []string, noCache, noRevisionCache, verifySignature bool, proj *v1alpha1.AppProject, sendRuntimeState bool) ([]*unstructured.Unstructured, []*apiclient.ManifestResponse, bool, error) {
|
||||
ts := stats.NewTimingStats()
|
||||
helmRepos, err := m.db.ListHelmRepositories(context.Background())
|
||||
if err != nil {
|
||||
@@ -219,14 +218,12 @@ func (m *appStateManager) GetRepoObjs(app *v1alpha1.Application, sources []v1alp
|
||||
// Store the map of all sources having ref field into a map for applications with sources field
|
||||
// If it's for a rollback process, the refSources[*].targetRevision fields are the desired
|
||||
// revisions for the rollback
|
||||
refSources, err := argo.GetRefSources(context.Background(), sources, app.Spec.Project, m.db.GetRepository, revisions, rollback)
|
||||
refSources, err := argo.GetRefSources(context.Background(), sources, app.Spec.Project, m.db.GetRepository, revisions)
|
||||
if err != nil {
|
||||
return nil, nil, false, fmt.Errorf("failed to get ref sources: %w", err)
|
||||
}
|
||||
|
||||
revisionUpdated := false
|
||||
|
||||
atLeastOneRevisionIsNotPossibleToBeUpdated := false
|
||||
revisionsMayHaveChanges := false
|
||||
|
||||
keyManifestGenerateAnnotationVal, keyManifestGenerateAnnotationExists := app.Annotations[v1alpha1.AnnotationKeyManifestGeneratePaths]
|
||||
|
||||
@@ -238,10 +235,6 @@ func (m *appStateManager) GetRepoObjs(app *v1alpha1.Application, sources []v1alp
|
||||
if err != nil {
|
||||
return nil, nil, false, fmt.Errorf("failed to get repo %q: %w", source.RepoURL, err)
|
||||
}
|
||||
kustomizeOptions, err := kustomizeSettings.GetOptions(source)
|
||||
if err != nil {
|
||||
return nil, nil, false, fmt.Errorf("failed to get Kustomize options for source %d of %d: %w", i+1, len(sources), err)
|
||||
}
|
||||
|
||||
syncedRevision := app.Status.Sync.Revision
|
||||
if app.Spec.HasMultipleSources() {
|
||||
@@ -260,7 +253,7 @@ func (m *appStateManager) GetRepoObjs(app *v1alpha1.Application, sources []v1alp
|
||||
appNamespace = ""
|
||||
}
|
||||
|
||||
if !source.IsHelm() && syncedRevision != "" && keyManifestGenerateAnnotationExists && keyManifestGenerateAnnotationVal != "" {
|
||||
if !source.IsHelm() && !source.IsOCI() && syncedRevision != "" && keyManifestGenerateAnnotationExists && keyManifestGenerateAnnotationVal != "" {
|
||||
// Validate the manifest-generate-path annotation to avoid generating manifests if it has not changed.
|
||||
updateRevisionResult, err := repoClient.UpdateRevisionForPaths(context.Background(), &apiclient.UpdateRevisionForPathsRequest{
|
||||
Repo: repo,
|
||||
@@ -283,7 +276,7 @@ func (m *appStateManager) GetRepoObjs(app *v1alpha1.Application, sources []v1alp
|
||||
return nil, nil, false, fmt.Errorf("failed to compare revisions for source %d of %d: %w", i+1, len(sources), err)
|
||||
}
|
||||
if updateRevisionResult.Changes {
|
||||
revisionUpdated = true
|
||||
revisionsMayHaveChanges = true
|
||||
}
|
||||
|
||||
// Generate manifests should use same revision as updateRevisionForPaths, because HEAD revision may be different between these two calls
|
||||
@@ -291,8 +284,8 @@ func (m *appStateManager) GetRepoObjs(app *v1alpha1.Application, sources []v1alp
|
||||
revision = updateRevisionResult.Revision
|
||||
}
|
||||
} else {
|
||||
// revisionUpdated is set to true if at least one revision is not possible to be updated,
|
||||
atLeastOneRevisionIsNotPossibleToBeUpdated = true
|
||||
// revisionsMayHaveChanges is set to true if at least one revision is not possible to be updated
|
||||
revisionsMayHaveChanges = true
|
||||
}
|
||||
|
||||
repos := permittedHelmRepos
|
||||
@@ -318,7 +311,7 @@ func (m *appStateManager) GetRepoObjs(app *v1alpha1.Application, sources []v1alp
|
||||
AppName: app.InstanceName(m.namespace),
|
||||
Namespace: appNamespace,
|
||||
ApplicationSource: &source,
|
||||
KustomizeOptions: kustomizeOptions,
|
||||
KustomizeOptions: kustomizeSettings,
|
||||
KubeVersion: serverVersion,
|
||||
ApiVersions: apiVersions,
|
||||
VerifySignature: verifySignature,
|
||||
@@ -353,13 +346,7 @@ func (m *appStateManager) GetRepoObjs(app *v1alpha1.Application, sources []v1alp
|
||||
logCtx = logCtx.WithField("time_ms", time.Since(ts.StartTime).Milliseconds())
|
||||
logCtx.Info("GetRepoObjs stats")
|
||||
|
||||
// If a revision in any of the sources cannot be updated,
|
||||
// we should trigger self-healing whenever there are changes to the manifests.
|
||||
if atLeastOneRevisionIsNotPossibleToBeUpdated {
|
||||
revisionUpdated = true
|
||||
}
|
||||
|
||||
return targetObjs, manifestInfos, revisionUpdated, nil
|
||||
return targetObjs, manifestInfos, revisionsMayHaveChanges, nil
|
||||
}
|
||||
|
||||
// ResolveGitRevision will resolve the given revision to a full commit SHA. Only works for git.
|
||||
@@ -542,32 +529,37 @@ func isManagedNamespace(ns *unstructured.Unstructured, app *v1alpha1.Application
|
||||
// CompareAppState compares application git state to the live app state, using the specified
|
||||
// revision and supplied source. If revision or overrides are empty, then compares against
|
||||
// revision and overrides in the app spec.
|
||||
func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1alpha1.AppProject, revisions []string, sources []v1alpha1.ApplicationSource, noCache bool, noRevisionCache bool, localManifests []string, hasMultipleSources bool, rollback bool) (*comparisonResult, error) {
|
||||
func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1alpha1.AppProject, revisions []string, sources []v1alpha1.ApplicationSource, noCache bool, noRevisionCache bool, localManifests []string, hasMultipleSources bool) (*comparisonResult, error) {
|
||||
ts := stats.NewTimingStats()
|
||||
appLabelKey, resourceOverrides, resFilter, installationID, trackingMethod, err := m.getComparisonSettings()
|
||||
logCtx := log.WithFields(applog.GetAppLogFields(app))
|
||||
|
||||
ts.AddCheckpoint("settings_ms")
|
||||
|
||||
// return unknown comparison result if basic comparison settings cannot be loaded
|
||||
if err != nil {
|
||||
if hasMultipleSources {
|
||||
return &comparisonResult{
|
||||
syncStatus: &v1alpha1.SyncStatus{
|
||||
ComparedTo: app.Spec.BuildComparedToStatus(),
|
||||
Status: v1alpha1.SyncStatusCodeUnknown,
|
||||
Revisions: revisions,
|
||||
},
|
||||
healthStatus: health.HealthStatusUnknown,
|
||||
}, nil
|
||||
// Build initial sync status
|
||||
syncStatus := &v1alpha1.SyncStatus{
|
||||
ComparedTo: v1alpha1.ComparedTo{
|
||||
Destination: app.Spec.Destination,
|
||||
IgnoreDifferences: app.Spec.IgnoreDifferences,
|
||||
},
|
||||
Status: v1alpha1.SyncStatusCodeUnknown,
|
||||
}
|
||||
if hasMultipleSources {
|
||||
syncStatus.ComparedTo.Sources = sources
|
||||
syncStatus.Revisions = revisions
|
||||
} else {
|
||||
if len(sources) > 0 {
|
||||
syncStatus.ComparedTo.Source = sources[0]
|
||||
} else {
|
||||
logCtx.Warn("CompareAppState: sources should not be empty")
|
||||
}
|
||||
return &comparisonResult{
|
||||
syncStatus: &v1alpha1.SyncStatus{
|
||||
ComparedTo: app.Spec.BuildComparedToStatus(),
|
||||
Status: v1alpha1.SyncStatusCodeUnknown,
|
||||
Revision: revisions[0],
|
||||
},
|
||||
healthStatus: health.HealthStatusUnknown,
|
||||
}, nil
|
||||
if len(revisions) > 0 {
|
||||
syncStatus.Revision = revisions[0]
|
||||
}
|
||||
}
|
||||
|
||||
appLabelKey, resourceOverrides, resFilter, installationID, trackingMethod, err := m.getComparisonSettings()
|
||||
ts.AddCheckpoint("settings_ms")
|
||||
if err != nil {
|
||||
// return unknown comparison result if basic comparison settings cannot be loaded
|
||||
return &comparisonResult{syncStatus: syncStatus, healthStatus: health.HealthStatusUnknown}, nil
|
||||
}
|
||||
|
||||
// When signature keys are defined in the project spec, we need to verify the signature on the Git revision
|
||||
@@ -582,7 +574,6 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1
|
||||
return nil, err
|
||||
}
|
||||
|
||||
logCtx := log.WithFields(applog.GetAppLogFields(app))
|
||||
logCtx.Infof("Comparing app state (cluster: %s, namespace: %s)", app.Spec.Destination.Server, app.Spec.Destination.Namespace)
|
||||
|
||||
var targetObjs []*unstructured.Unstructured
|
||||
@@ -591,7 +582,7 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1
|
||||
var manifestInfos []*apiclient.ManifestResponse
|
||||
targetNsExists := false
|
||||
|
||||
var revisionUpdated bool
|
||||
var revisionsMayHaveChanges bool
|
||||
|
||||
if len(localManifests) == 0 {
|
||||
// If the length of revisions is not same as the length of sources,
|
||||
@@ -603,7 +594,7 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1
|
||||
}
|
||||
}
|
||||
|
||||
targetObjs, manifestInfos, revisionUpdated, err = m.GetRepoObjs(app, sources, appLabelKey, revisions, noCache, noRevisionCache, verifySignature, project, rollback, true)
|
||||
targetObjs, manifestInfos, revisionsMayHaveChanges, err = m.GetRepoObjs(app, sources, appLabelKey, revisions, noCache, noRevisionCache, verifySignature, project, true)
|
||||
if err != nil {
|
||||
targetObjs = make([]*unstructured.Unstructured, 0)
|
||||
msg := "Failed to load target state: " + err.Error()
|
||||
@@ -951,32 +942,14 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1
|
||||
} else if app.HasChangedManagedNamespaceMetadata() {
|
||||
syncCode = v1alpha1.SyncStatusCodeOutOfSync
|
||||
}
|
||||
var revision string
|
||||
|
||||
if !hasMultipleSources && len(manifestRevisions) > 0 {
|
||||
revision = manifestRevisions[0]
|
||||
}
|
||||
var syncStatus v1alpha1.SyncStatus
|
||||
syncStatus.Status = syncCode
|
||||
|
||||
// Update the initial revision to the resolved manifest SHA
|
||||
if hasMultipleSources {
|
||||
syncStatus = v1alpha1.SyncStatus{
|
||||
ComparedTo: v1alpha1.ComparedTo{
|
||||
Destination: app.Spec.Destination,
|
||||
Sources: sources,
|
||||
IgnoreDifferences: app.Spec.IgnoreDifferences,
|
||||
},
|
||||
Status: syncCode,
|
||||
Revisions: manifestRevisions,
|
||||
}
|
||||
} else {
|
||||
syncStatus = v1alpha1.SyncStatus{
|
||||
ComparedTo: v1alpha1.ComparedTo{
|
||||
Destination: app.Spec.Destination,
|
||||
Source: app.Spec.GetSource(),
|
||||
IgnoreDifferences: app.Spec.IgnoreDifferences,
|
||||
},
|
||||
Status: syncCode,
|
||||
Revision: revision,
|
||||
}
|
||||
syncStatus.Revisions = manifestRevisions
|
||||
} else if len(manifestRevisions) > 0 {
|
||||
syncStatus.Revision = manifestRevisions[0]
|
||||
}
|
||||
|
||||
ts.AddCheckpoint("sync_ms")
|
||||
@@ -996,15 +969,15 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1
|
||||
}
|
||||
|
||||
compRes := comparisonResult{
|
||||
syncStatus: &syncStatus,
|
||||
healthStatus: healthStatus,
|
||||
resources: resourceSummaries,
|
||||
managedResources: managedResources,
|
||||
reconciliationResult: reconciliation,
|
||||
diffConfig: diffConfig,
|
||||
diffResultList: diffResults,
|
||||
hasPostDeleteHooks: hasPostDeleteHooks,
|
||||
revisionUpdated: revisionUpdated,
|
||||
syncStatus: syncStatus,
|
||||
healthStatus: healthStatus,
|
||||
resources: resourceSummaries,
|
||||
managedResources: managedResources,
|
||||
reconciliationResult: reconciliation,
|
||||
diffConfig: diffConfig,
|
||||
diffResultList: diffResults,
|
||||
hasPostDeleteHooks: hasPostDeleteHooks,
|
||||
revisionsMayHaveChanges: revisionsMayHaveChanges,
|
||||
}
|
||||
|
||||
if hasMultipleSources {
|
||||
@@ -1062,7 +1035,7 @@ func useDiffCache(noCache bool, manifestInfos []*apiclient.ManifestResponse, sou
|
||||
return false
|
||||
}
|
||||
|
||||
if !specEqualsCompareTo(app.Spec, app.Status.Sync.ComparedTo) {
|
||||
if !specEqualsCompareTo(app.Spec, sources, app.Status.Sync.ComparedTo) {
|
||||
log.WithField("useDiffCache", "false").Debug("specChanged")
|
||||
return false
|
||||
}
|
||||
@@ -1073,11 +1046,11 @@ func useDiffCache(noCache bool, manifestInfos []*apiclient.ManifestResponse, sou
|
||||
|
||||
// specEqualsCompareTo compares the application spec to the comparedTo status. It normalizes the destination to match
|
||||
// the comparedTo destination before comparing. It does not mutate the original spec or comparedTo.
|
||||
func specEqualsCompareTo(spec v1alpha1.ApplicationSpec, comparedTo v1alpha1.ComparedTo) bool {
|
||||
func specEqualsCompareTo(spec v1alpha1.ApplicationSpec, sources []v1alpha1.ApplicationSource, comparedTo v1alpha1.ComparedTo) bool {
|
||||
// Make a copy to be sure we don't mutate the original.
|
||||
specCopy := spec.DeepCopy()
|
||||
currentSpec := specCopy.BuildComparedToStatus()
|
||||
return reflect.DeepEqual(comparedTo, currentSpec)
|
||||
compareToSpec := specCopy.BuildComparedToStatus(sources)
|
||||
return reflect.DeepEqual(comparedTo, compareToSpec)
|
||||
}
|
||||
|
||||
func (m *appStateManager) persistRevisionHistory(
|
||||
@@ -1139,7 +1112,6 @@ func NewAppStateManager(
|
||||
onKubectlRun kubeutil.OnKubectlRunFunc,
|
||||
settingsMgr *settings.SettingsManager,
|
||||
liveStateCache statecache.LiveStateCache,
|
||||
projInformer cache.SharedIndexInformer,
|
||||
metricsServer *metrics.MetricsServer,
|
||||
cache *appstatecache.Cache,
|
||||
statusRefreshTimeout time.Duration,
|
||||
@@ -1159,7 +1131,6 @@ func NewAppStateManager(
|
||||
repoClientset: repoClientset,
|
||||
namespace: namespace,
|
||||
settingsMgr: settingsMgr,
|
||||
projInformer: projInformer,
|
||||
metricsServer: metricsServer,
|
||||
statusRefreshTimeout: statusRefreshTimeout,
|
||||
resourceTracking: resourceTracking,
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/common"
|
||||
"github.com/argoproj/argo-cd/v3/controller/testdata"
|
||||
@@ -52,7 +53,7 @@ func TestCompareAppStateEmpty(t *testing.T) {
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "")
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false, false)
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, compRes)
|
||||
assert.NotNil(t, compRes.syncStatus)
|
||||
@@ -70,18 +71,18 @@ func TestCompareAppStateRepoError(t *testing.T) {
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "")
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false, false)
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
|
||||
assert.Nil(t, compRes)
|
||||
require.EqualError(t, err, ErrCompareStateRepo.Error())
|
||||
|
||||
// expect to still get compare state error to as inside grace period
|
||||
compRes, err = ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false, false)
|
||||
compRes, err = ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
|
||||
assert.Nil(t, compRes)
|
||||
require.EqualError(t, err, ErrCompareStateRepo.Error())
|
||||
|
||||
time.Sleep(10 * time.Second)
|
||||
// expect to not get error as outside of grace period, but status should be unknown
|
||||
compRes, err = ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false, false)
|
||||
compRes, err = ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
|
||||
assert.NotNil(t, compRes)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, v1alpha1.SyncStatusCodeUnknown, compRes.syncStatus.Status)
|
||||
@@ -116,7 +117,7 @@ func TestCompareAppStateNamespaceMetadataDiffers(t *testing.T) {
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "")
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false, false)
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, compRes)
|
||||
assert.NotNil(t, compRes.syncStatus)
|
||||
@@ -165,7 +166,7 @@ func TestCompareAppStateNamespaceMetadataDiffersToManifest(t *testing.T) {
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "")
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false, false)
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, compRes)
|
||||
assert.NotNil(t, compRes.syncStatus)
|
||||
@@ -223,7 +224,7 @@ func TestCompareAppStateNamespaceMetadata(t *testing.T) {
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "")
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false, false)
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, compRes)
|
||||
assert.NotNil(t, compRes.syncStatus)
|
||||
@@ -282,7 +283,7 @@ func TestCompareAppStateNamespaceMetadataIsTheSame(t *testing.T) {
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "")
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false, false)
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, compRes)
|
||||
assert.NotNil(t, compRes.syncStatus)
|
||||
@@ -310,7 +311,7 @@ func TestCompareAppStateMissing(t *testing.T) {
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "")
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false, false)
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, compRes)
|
||||
assert.NotNil(t, compRes.syncStatus)
|
||||
@@ -342,7 +343,7 @@ func TestCompareAppStateExtra(t *testing.T) {
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "")
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false, false)
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, compRes)
|
||||
assert.Equal(t, v1alpha1.SyncStatusCodeOutOfSync, compRes.syncStatus.Status)
|
||||
@@ -373,7 +374,7 @@ func TestCompareAppStateHook(t *testing.T) {
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "")
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false, false)
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, compRes)
|
||||
assert.Equal(t, v1alpha1.SyncStatusCodeSynced, compRes.syncStatus.Status)
|
||||
@@ -405,7 +406,7 @@ func TestCompareAppStateSkipHook(t *testing.T) {
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "")
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false, false)
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, compRes)
|
||||
assert.Equal(t, v1alpha1.SyncStatusCodeSynced, compRes.syncStatus.Status)
|
||||
@@ -436,7 +437,7 @@ func TestCompareAppStateCompareOptionIgnoreExtraneous(t *testing.T) {
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "")
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false, false)
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.NotNil(t, compRes)
|
||||
@@ -469,7 +470,7 @@ func TestCompareAppStateExtraHook(t *testing.T) {
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "")
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false, false)
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.NotNil(t, compRes)
|
||||
@@ -498,7 +499,7 @@ func TestAppRevisionsSingleSource(t *testing.T) {
|
||||
app := newFakeApp()
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "")
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, app.Spec.GetSources(), false, false, nil, app.Spec.HasMultipleSources(), false)
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, app.Spec.GetSources(), false, false, nil, app.Spec.HasMultipleSources())
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, compRes)
|
||||
assert.NotNil(t, compRes.syncStatus)
|
||||
@@ -538,7 +539,7 @@ func TestAppRevisionsMultiSource(t *testing.T) {
|
||||
app := newFakeMultiSourceApp()
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "")
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, app.Spec.GetSources(), false, false, nil, app.Spec.HasMultipleSources(), false)
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, app.Spec.GetSources(), false, false, nil, app.Spec.HasMultipleSources())
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, compRes)
|
||||
assert.NotNil(t, compRes.syncStatus)
|
||||
@@ -587,7 +588,7 @@ func TestCompareAppStateDuplicatedNamespacedResources(t *testing.T) {
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "")
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false, false)
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.NotNil(t, compRes)
|
||||
@@ -624,7 +625,7 @@ func TestCompareAppStateManagedNamespaceMetadataWithLiveNsDoesNotGetPruned(t *te
|
||||
},
|
||||
}
|
||||
ctrl := newFakeController(&data, nil)
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, []string{}, app.Spec.Sources, false, false, nil, false, false)
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, []string{}, app.Spec.Sources, false, false, nil, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.NotNil(t, compRes)
|
||||
@@ -678,7 +679,7 @@ func TestCompareAppStateWithManifestGeneratePath(t *testing.T) {
|
||||
ctrl := newFakeController(&data, nil)
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "abc123")
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, app.Spec.GetSources(), false, false, nil, false, false)
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, app.Spec.GetSources(), false, false, nil, false)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, compRes)
|
||||
assert.Equal(t, v1alpha1.SyncStatusCodeSynced, compRes.syncStatus.Status)
|
||||
@@ -714,7 +715,7 @@ func TestSetHealth(t *testing.T) {
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "")
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false, false)
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, health.HealthStatusHealthy, compRes.healthStatus)
|
||||
@@ -750,7 +751,7 @@ func TestPreserveStatusTimestamp(t *testing.T) {
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "")
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false, false)
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, health.HealthStatusHealthy, compRes.healthStatus)
|
||||
@@ -787,7 +788,7 @@ func TestSetHealthSelfReferencedApp(t *testing.T) {
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "")
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false, false)
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, health.HealthStatusHealthy, compRes.healthStatus)
|
||||
@@ -862,7 +863,7 @@ func TestReturnUnknownComparisonStateOnSettingLoadError(t *testing.T) {
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "")
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false, false)
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, health.HealthStatusUnknown, compRes.healthStatus)
|
||||
@@ -1011,7 +1012,7 @@ func TestSignedResponseNoSignatureRequired(t *testing.T) {
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "")
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false, false)
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, compRes)
|
||||
assert.NotNil(t, compRes.syncStatus)
|
||||
@@ -1038,7 +1039,7 @@ func TestSignedResponseNoSignatureRequired(t *testing.T) {
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "")
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false, false)
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, compRes)
|
||||
assert.NotNil(t, compRes.syncStatus)
|
||||
@@ -1070,7 +1071,7 @@ func TestSignedResponseSignatureRequired(t *testing.T) {
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "")
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, &signedProj, revisions, sources, false, false, nil, false, false)
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, &signedProj, revisions, sources, false, false, nil, false)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, compRes)
|
||||
assert.NotNil(t, compRes.syncStatus)
|
||||
@@ -1097,7 +1098,7 @@ func TestSignedResponseSignatureRequired(t *testing.T) {
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "abc123")
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, &signedProj, revisions, sources, false, false, nil, false, false)
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, &signedProj, revisions, sources, false, false, nil, false)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, compRes)
|
||||
assert.NotNil(t, compRes.syncStatus)
|
||||
@@ -1124,7 +1125,7 @@ func TestSignedResponseSignatureRequired(t *testing.T) {
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "abc123")
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, &signedProj, revisions, sources, false, false, nil, false, false)
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, &signedProj, revisions, sources, false, false, nil, false)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, compRes)
|
||||
assert.NotNil(t, compRes.syncStatus)
|
||||
@@ -1151,7 +1152,7 @@ func TestSignedResponseSignatureRequired(t *testing.T) {
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "abc123")
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, &signedProj, revisions, sources, false, false, nil, false, false)
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, &signedProj, revisions, sources, false, false, nil, false)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, compRes)
|
||||
assert.NotNil(t, compRes.syncStatus)
|
||||
@@ -1181,7 +1182,7 @@ func TestSignedResponseSignatureRequired(t *testing.T) {
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "abc123")
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, &testProj, revisions, sources, false, false, nil, false, false)
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, &testProj, revisions, sources, false, false, nil, false)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, compRes)
|
||||
assert.NotNil(t, compRes.syncStatus)
|
||||
@@ -1211,7 +1212,7 @@ func TestSignedResponseSignatureRequired(t *testing.T) {
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "abc123")
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, &signedProj, revisions, sources, false, false, localManifests, false, false)
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, &signedProj, revisions, sources, false, false, localManifests, false)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, compRes)
|
||||
assert.NotNil(t, compRes.syncStatus)
|
||||
@@ -1241,7 +1242,7 @@ func TestSignedResponseSignatureRequired(t *testing.T) {
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "abc123")
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, &signedProj, revisions, sources, false, false, nil, false, false)
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, &signedProj, revisions, sources, false, false, nil, false)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, compRes)
|
||||
assert.NotNil(t, compRes.syncStatus)
|
||||
@@ -1271,7 +1272,7 @@ func TestSignedResponseSignatureRequired(t *testing.T) {
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "abc123")
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, &signedProj, revisions, sources, false, false, localManifests, false, false)
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, &signedProj, revisions, sources, false, false, localManifests, false)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, compRes)
|
||||
assert.NotNil(t, compRes.syncStatus)
|
||||
@@ -1481,7 +1482,6 @@ func TestIsLiveResourceManaged(t *testing.T) {
|
||||
|
||||
func TestUseDiffCache(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
type fixture struct {
|
||||
testName string
|
||||
noCache bool
|
||||
@@ -1493,7 +1493,6 @@ func TestUseDiffCache(t *testing.T) {
|
||||
expectedUseCache bool
|
||||
serverSideDiff bool
|
||||
}
|
||||
|
||||
manifestInfos := func(revision string) []*apiclient.ManifestResponse {
|
||||
return []*apiclient.ManifestResponse{
|
||||
{
|
||||
@@ -1509,15 +1508,16 @@ func TestUseDiffCache(t *testing.T) {
|
||||
},
|
||||
}
|
||||
}
|
||||
sources := func() []v1alpha1.ApplicationSource {
|
||||
return []v1alpha1.ApplicationSource{
|
||||
{
|
||||
RepoURL: "https://some-repo.com",
|
||||
Path: "argocd/httpbin",
|
||||
TargetRevision: "HEAD",
|
||||
},
|
||||
source := func() v1alpha1.ApplicationSource {
|
||||
return v1alpha1.ApplicationSource{
|
||||
RepoURL: "https://some-repo.com",
|
||||
Path: "argocd/httpbin",
|
||||
TargetRevision: "HEAD",
|
||||
}
|
||||
}
|
||||
sources := func() []v1alpha1.ApplicationSource {
|
||||
return []v1alpha1.ApplicationSource{source()}
|
||||
}
|
||||
|
||||
app := func(namespace string, revision string, refresh bool, a *v1alpha1.Application) *v1alpha1.Application {
|
||||
app := &v1alpha1.Application{
|
||||
@@ -1526,11 +1526,7 @@ func TestUseDiffCache(t *testing.T) {
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
RepoURL: "https://some-repo.com",
|
||||
Path: "argocd/httpbin",
|
||||
TargetRevision: "HEAD",
|
||||
},
|
||||
Source: ptr.To(source()),
|
||||
Destination: v1alpha1.ApplicationDestination{
|
||||
Server: "https://kubernetes.default.svc",
|
||||
Namespace: "httpbin",
|
||||
@@ -1548,11 +1544,7 @@ func TestUseDiffCache(t *testing.T) {
|
||||
Sync: v1alpha1.SyncStatus{
|
||||
Status: v1alpha1.SyncStatusCodeSynced,
|
||||
ComparedTo: v1alpha1.ComparedTo{
|
||||
Source: v1alpha1.ApplicationSource{
|
||||
RepoURL: "https://some-repo.com",
|
||||
Path: "argocd/httpbin",
|
||||
TargetRevision: "HEAD",
|
||||
},
|
||||
Source: source(),
|
||||
Destination: v1alpha1.ApplicationDestination{
|
||||
Server: "https://kubernetes.default.svc",
|
||||
Namespace: "httpbin",
|
||||
@@ -1577,7 +1569,6 @@ func TestUseDiffCache(t *testing.T) {
|
||||
}
|
||||
return app
|
||||
}
|
||||
|
||||
cases := []fixture{
|
||||
{
|
||||
testName: "will use diff cache",
|
||||
@@ -1594,7 +1585,7 @@ func TestUseDiffCache(t *testing.T) {
|
||||
testName: "will use diff cache with sync policy",
|
||||
noCache: false,
|
||||
manifestInfos: manifestInfos("rev1"),
|
||||
sources: sources(),
|
||||
sources: []v1alpha1.ApplicationSource{test.YamlToApplication(testdata.DiffCacheYaml).Status.Sync.ComparedTo.Source},
|
||||
app: test.YamlToApplication(testdata.DiffCacheYaml),
|
||||
manifestRevisions: []string{"rev1"},
|
||||
statusRefreshTimeout: time.Hour * 24,
|
||||
@@ -1604,8 +1595,15 @@ func TestUseDiffCache(t *testing.T) {
|
||||
{
|
||||
testName: "will use diff cache for multisource",
|
||||
noCache: false,
|
||||
manifestInfos: manifestInfos("rev1"),
|
||||
sources: sources(),
|
||||
manifestInfos: append(manifestInfos("rev1"), manifestInfos("rev2")...),
|
||||
sources: v1alpha1.ApplicationSources{
|
||||
{
|
||||
RepoURL: "multisource repo1",
|
||||
},
|
||||
{
|
||||
RepoURL: "multisource repo2",
|
||||
},
|
||||
},
|
||||
app: app("httpbin", "", false, &v1alpha1.Application{
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Source: nil,
|
||||
@@ -1743,16 +1741,13 @@ func TestUseDiffCache(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
tc := tc
|
||||
t.Run(tc.testName, func(t *testing.T) {
|
||||
// Given
|
||||
t.Parallel()
|
||||
logger, _ := logrustest.NewNullLogger()
|
||||
log := logrus.NewEntry(logger)
|
||||
|
||||
// When
|
||||
useDiffCache := useDiffCache(tc.noCache, tc.manifestInfos, tc.sources, tc.app, tc.manifestRevisions, tc.statusRefreshTimeout, tc.serverSideDiff, log)
|
||||
|
||||
// Then
|
||||
assert.Equal(t, tc.expectedUseCache, useDiffCache)
|
||||
})
|
||||
@@ -1775,11 +1770,11 @@ func TestCompareAppStateDefaultRevisionUpdated(t *testing.T) {
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "")
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false, false)
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, compRes)
|
||||
assert.NotNil(t, compRes.syncStatus)
|
||||
assert.True(t, compRes.revisionUpdated)
|
||||
assert.True(t, compRes.revisionsMayHaveChanges)
|
||||
}
|
||||
|
||||
func TestCompareAppStateRevisionUpdatedWithHelmSource(t *testing.T) {
|
||||
@@ -1798,11 +1793,11 @@ func TestCompareAppStateRevisionUpdatedWithHelmSource(t *testing.T) {
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "")
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false, false)
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, compRes)
|
||||
assert.NotNil(t, compRes.syncStatus)
|
||||
assert.True(t, compRes.revisionUpdated)
|
||||
assert.True(t, compRes.revisionsMayHaveChanges)
|
||||
}
|
||||
|
||||
func Test_normalizeClusterScopeTracking(t *testing.T) {
|
||||
@@ -1825,3 +1820,31 @@ func Test_normalizeClusterScopeTracking(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.True(t, called, "normalization function should have called the callback function")
|
||||
}
|
||||
|
||||
func TestCompareAppState_DoesNotCallUpdateRevisionForPaths_ForOCI(t *testing.T) {
|
||||
app := newFakeApp()
|
||||
// Enable the manifest-generate-paths annotation and set a synced revision
|
||||
app.SetAnnotations(map[string]string{v1alpha1.AnnotationKeyManifestGeneratePaths: "."})
|
||||
app.Status.Sync = v1alpha1.SyncStatus{
|
||||
Revision: "abc123",
|
||||
Status: v1alpha1.SyncStatusCodeSynced,
|
||||
}
|
||||
|
||||
data := fakeData{
|
||||
manifestResponse: &apiclient.ManifestResponse{
|
||||
Manifests: []string{},
|
||||
Namespace: test.FakeDestNamespace,
|
||||
Server: test.FakeClusterURL,
|
||||
Revision: "abc123",
|
||||
},
|
||||
}
|
||||
ctrl := newFakeControllerWithResync(&data, time.Minute, nil, errors.New("this should not be called"))
|
||||
|
||||
source := app.Spec.GetSource()
|
||||
source.RepoURL = "oci://example.com/argo/argo-cd"
|
||||
sources := make([]v1alpha1.ApplicationSource, 0)
|
||||
sources = append(sources, source)
|
||||
|
||||
_, _, _, err := ctrl.appStateManager.GetRepoObjs(app, sources, "abc123", []string{"123456"}, false, false, false, &defaultProj, false)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
||||
@@ -29,8 +28,8 @@ import (
|
||||
"k8s.io/kubectl/pkg/util/openapi"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/controller/metrics"
|
||||
"github.com/argoproj/argo-cd/v3/controller/syncid"
|
||||
"github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
|
||||
listersv1alpha1 "github.com/argoproj/argo-cd/v3/pkg/client/listers/application/v1alpha1"
|
||||
applog "github.com/argoproj/argo-cd/v3/util/app/log"
|
||||
"github.com/argoproj/argo-cd/v3/util/argo"
|
||||
"github.com/argoproj/argo-cd/v3/util/argo/diff"
|
||||
@@ -38,11 +37,8 @@ import (
|
||||
kubeutil "github.com/argoproj/argo-cd/v3/util/kube"
|
||||
logutils "github.com/argoproj/argo-cd/v3/util/log"
|
||||
"github.com/argoproj/argo-cd/v3/util/lua"
|
||||
"github.com/argoproj/argo-cd/v3/util/rand"
|
||||
)
|
||||
|
||||
var syncIdPrefix uint64
|
||||
|
||||
const (
|
||||
// EnvVarSyncWaveDelay is an environment variable which controls the delay in seconds between
|
||||
// each sync-wave
|
||||
@@ -90,124 +86,97 @@ func (m *appStateManager) getServerSideDiffDryRunApplier(cluster *v1alpha1.Clust
|
||||
return ops, cleanup, nil
|
||||
}
|
||||
|
||||
func (m *appStateManager) SyncAppState(app *v1alpha1.Application, state *v1alpha1.OperationState) {
|
||||
func NewOperationState(operation v1alpha1.Operation) *v1alpha1.OperationState {
|
||||
return &v1alpha1.OperationState{
|
||||
Phase: common.OperationRunning,
|
||||
Operation: operation,
|
||||
StartedAt: metav1.Now(),
|
||||
}
|
||||
}
|
||||
|
||||
func newSyncOperationResult(app *v1alpha1.Application, op v1alpha1.SyncOperation) *v1alpha1.SyncOperationResult {
|
||||
syncRes := &v1alpha1.SyncOperationResult{}
|
||||
|
||||
if len(op.Sources) > 0 || op.Source != nil {
|
||||
// specific source specified in the SyncOperation
|
||||
if op.Source != nil {
|
||||
syncRes.Source = *op.Source
|
||||
}
|
||||
syncRes.Sources = op.Sources
|
||||
} else {
|
||||
// normal sync case, get sources from the spec
|
||||
syncRes.Sources = app.Spec.Sources
|
||||
syncRes.Source = app.Spec.GetSource()
|
||||
}
|
||||
|
||||
// Sync requests might be requested with ambiguous revisions (e.g. master, HEAD, v1.2.3).
|
||||
// This can change meaning when resuming operations (e.g a hook sync). After calculating a
|
||||
// concrete git commit SHA, the SHA is remembered in the status.operationState.syncResult field.
|
||||
// This ensures that when resuming an operation, we sync to the same revision that we initially
|
||||
// started with.
|
||||
var revision string
|
||||
var syncOp v1alpha1.SyncOperation
|
||||
var syncRes *v1alpha1.SyncOperationResult
|
||||
var source v1alpha1.ApplicationSource
|
||||
var sources []v1alpha1.ApplicationSource
|
||||
revisions := make([]string, 0)
|
||||
// concrete git commit SHA, the revision of the SyncOperationResult will be updated with the SHA
|
||||
syncRes.Revision = op.Revision
|
||||
syncRes.Revisions = op.Revisions
|
||||
return syncRes
|
||||
}
|
||||
|
||||
func (m *appStateManager) SyncAppState(app *v1alpha1.Application, project *v1alpha1.AppProject, state *v1alpha1.OperationState) {
|
||||
syncId, err := syncid.Generate()
|
||||
if err != nil {
|
||||
state.Phase = common.OperationError
|
||||
state.Message = fmt.Sprintf("Failed to generate sync ID: %v", err)
|
||||
return
|
||||
}
|
||||
logEntry := log.WithFields(applog.GetAppLogFields(app)).WithField("syncId", syncId)
|
||||
|
||||
if state.Operation.Sync == nil {
|
||||
state.Phase = common.OperationFailed
|
||||
state.Phase = common.OperationError
|
||||
state.Message = "Invalid operation request: no operation specified"
|
||||
return
|
||||
}
|
||||
syncOp = *state.Operation.Sync
|
||||
|
||||
// validates if it should fail the sync if it finds shared resources
|
||||
hasSharedResource, sharedResourceMessage := hasSharedResourceCondition(app)
|
||||
if syncOp.SyncOptions.HasOption("FailOnSharedResource=true") &&
|
||||
hasSharedResource {
|
||||
state.Phase = common.OperationFailed
|
||||
state.Message = "Shared resource found: " + sharedResourceMessage
|
||||
return
|
||||
syncOp := *state.Operation.Sync
|
||||
|
||||
if state.SyncResult == nil {
|
||||
state.SyncResult = newSyncOperationResult(app, syncOp)
|
||||
}
|
||||
|
||||
isMultiSourceRevision := app.Spec.HasMultipleSources()
|
||||
rollback := len(syncOp.Sources) > 0 || syncOp.Source != nil
|
||||
if rollback {
|
||||
// rollback case
|
||||
if len(state.Operation.Sync.Sources) > 0 {
|
||||
sources = state.Operation.Sync.Sources
|
||||
isMultiSourceRevision = true
|
||||
} else {
|
||||
source = *state.Operation.Sync.Source
|
||||
sources = make([]v1alpha1.ApplicationSource, 0)
|
||||
isMultiSourceRevision = false
|
||||
}
|
||||
} else {
|
||||
// normal sync case (where source is taken from app.spec.sources)
|
||||
if app.Spec.HasMultipleSources() {
|
||||
sources = app.Spec.Sources
|
||||
} else {
|
||||
// normal sync case (where source is taken from app.spec.source)
|
||||
source = app.Spec.GetSource()
|
||||
sources = make([]v1alpha1.ApplicationSource, 0)
|
||||
}
|
||||
}
|
||||
|
||||
if state.SyncResult != nil {
|
||||
syncRes = state.SyncResult
|
||||
revision = state.SyncResult.Revision
|
||||
revisions = append(revisions, state.SyncResult.Revisions...)
|
||||
} else {
|
||||
syncRes = &v1alpha1.SyncOperationResult{}
|
||||
// status.operationState.syncResult.source. must be set properly since auto-sync relies
|
||||
// on this information to decide if it should sync (if source is different than the last
|
||||
// sync attempt)
|
||||
if isMultiSourceRevision {
|
||||
syncRes.Sources = sources
|
||||
} else {
|
||||
syncRes.Source = source
|
||||
}
|
||||
state.SyncResult = syncRes
|
||||
}
|
||||
|
||||
// if we get here, it means we did not remember a commit SHA which we should be syncing to.
|
||||
// This typically indicates we are just about to begin a brand new sync/rollback operation.
|
||||
// Take the value in the requested operation. We will resolve this to a SHA later.
|
||||
if isMultiSourceRevision {
|
||||
if len(revisions) != len(sources) {
|
||||
revisions = syncOp.Revisions
|
||||
}
|
||||
} else {
|
||||
if revision == "" {
|
||||
revision = syncOp.Revision
|
||||
}
|
||||
}
|
||||
|
||||
proj, err := argo.GetAppProject(context.TODO(), app, listersv1alpha1.NewAppProjectLister(m.projInformer.GetIndexer()), m.namespace, m.settingsMgr, m.db)
|
||||
if err != nil {
|
||||
state.Phase = common.OperationError
|
||||
state.Message = fmt.Sprintf("Failed to load application project: %v", err)
|
||||
return
|
||||
} else {
|
||||
isBlocked, err := syncWindowPreventsSync(app, proj)
|
||||
if isBlocked {
|
||||
// If the operation is currently running, simply let the user know the sync is blocked by a current sync window
|
||||
if state.Phase == common.OperationRunning {
|
||||
state.Message = "Sync operation blocked by sync window"
|
||||
if err != nil {
|
||||
state.Message = fmt.Sprintf("%s: %v", state.Message, err)
|
||||
}
|
||||
if isBlocked, err := syncWindowPreventsSync(app, project); isBlocked {
|
||||
// If the operation is currently running, simply let the user know the sync is blocked by a current sync window
|
||||
if state.Phase == common.OperationRunning {
|
||||
state.Message = "Sync operation blocked by sync window"
|
||||
if err != nil {
|
||||
state.Message = fmt.Sprintf("%s: %v", state.Message, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if !isMultiSourceRevision {
|
||||
sources = []v1alpha1.ApplicationSource{source}
|
||||
revisions = []string{revision}
|
||||
revisions := state.SyncResult.Revisions
|
||||
sources := state.SyncResult.Sources
|
||||
isMultiSourceSync := len(sources) > 0
|
||||
if !isMultiSourceSync {
|
||||
sources = []v1alpha1.ApplicationSource{state.SyncResult.Source}
|
||||
revisions = []string{state.SyncResult.Revision}
|
||||
}
|
||||
|
||||
// ignore error if CompareStateRepoError, this shouldn't happen as noRevisionCache is true
|
||||
compareResult, err := m.CompareAppState(app, proj, revisions, sources, false, true, syncOp.Manifests, isMultiSourceRevision, rollback)
|
||||
compareResult, err := m.CompareAppState(app, project, revisions, sources, false, true, syncOp.Manifests, isMultiSourceSync)
|
||||
if err != nil && !stderrors.Is(err, ErrCompareStateRepo) {
|
||||
state.Phase = common.OperationError
|
||||
state.Message = err.Error()
|
||||
return
|
||||
}
|
||||
// We now have a concrete commit SHA. Save this in the sync result revision so that we remember
|
||||
// what we should be syncing to when resuming operations.
|
||||
|
||||
syncRes.Revision = compareResult.syncStatus.Revision
|
||||
syncRes.Revisions = compareResult.syncStatus.Revisions
|
||||
// We are now guaranteed to have a concrete commit SHA. Save this in the sync result revision so that we remember
|
||||
// what we should be syncing to when resuming operations.
|
||||
state.SyncResult.Revision = compareResult.syncStatus.Revision
|
||||
state.SyncResult.Revisions = compareResult.syncStatus.Revisions
|
||||
|
||||
// validates if it should fail the sync on that revision if it finds shared resources
|
||||
hasSharedResource, sharedResourceMessage := hasSharedResourceCondition(app)
|
||||
if syncOp.SyncOptions.HasOption("FailOnSharedResource=true") && hasSharedResource {
|
||||
state.Phase = common.OperationFailed
|
||||
state.Message = "Shared resource found: " + sharedResourceMessage
|
||||
return
|
||||
}
|
||||
|
||||
// If there are any comparison or spec errors error conditions do not perform the operation
|
||||
if errConditions := app.Status.GetConditions(map[v1alpha1.ApplicationConditionType]bool{
|
||||
@@ -248,18 +217,8 @@ func (m *appStateManager) SyncAppState(app *v1alpha1.Application, state *v1alpha
|
||||
return
|
||||
}
|
||||
|
||||
atomic.AddUint64(&syncIdPrefix, 1)
|
||||
randSuffix, err := rand.String(5)
|
||||
if err != nil {
|
||||
state.Phase = common.OperationError
|
||||
state.Message = fmt.Sprintf("Failed generate random sync ID: %v", err)
|
||||
return
|
||||
}
|
||||
syncId := fmt.Sprintf("%05d-%s", syncIdPrefix, randSuffix)
|
||||
|
||||
logEntry := log.WithFields(applog.GetAppLogFields(app)).WithField("syncId", syncId)
|
||||
initialResourcesRes := make([]common.ResourceSyncResult, len(syncRes.Resources))
|
||||
for i, res := range syncRes.Resources {
|
||||
initialResourcesRes := make([]common.ResourceSyncResult, len(state.SyncResult.Resources))
|
||||
for i, res := range state.SyncResult.Resources {
|
||||
key := kube.ResourceKey{Group: res.Group, Kind: res.Kind, Namespace: res.Namespace, Name: res.Name}
|
||||
initialResourcesRes[i] = common.ResourceSyncResult{
|
||||
ResourceKey: key,
|
||||
@@ -329,7 +288,7 @@ func (m *appStateManager) SyncAppState(app *v1alpha1.Application, state *v1alpha
|
||||
return
|
||||
}
|
||||
if impersonationEnabled {
|
||||
serviceAccountToImpersonate, err := deriveServiceAccountToImpersonate(proj, app)
|
||||
serviceAccountToImpersonate, err := deriveServiceAccountToImpersonate(project, app, destCluster)
|
||||
if err != nil {
|
||||
state.Phase = common.OperationError
|
||||
state.Message = fmt.Sprintf("failed to find a matching service account to impersonate: %v", err)
|
||||
@@ -349,11 +308,11 @@ func (m *appStateManager) SyncAppState(app *v1alpha1.Application, state *v1alpha
|
||||
sync.WithLogr(logutils.NewLogrusLogger(logEntry)),
|
||||
sync.WithHealthOverride(lua.ResourceHealthOverrides(resourceOverrides)),
|
||||
sync.WithPermissionValidator(func(un *unstructured.Unstructured, res *metav1.APIResource) error {
|
||||
if !proj.IsGroupKindPermitted(un.GroupVersionKind().GroupKind(), res.Namespaced) {
|
||||
return fmt.Errorf("resource %s:%s is not permitted in project %s", un.GroupVersionKind().Group, un.GroupVersionKind().Kind, proj.Name)
|
||||
if !project.IsGroupKindPermitted(un.GroupVersionKind().GroupKind(), res.Namespaced) {
|
||||
return fmt.Errorf("resource %s:%s is not permitted in project %s", un.GroupVersionKind().Group, un.GroupVersionKind().Kind, project.Name)
|
||||
}
|
||||
if res.Namespaced {
|
||||
permitted, err := proj.IsDestinationPermitted(destCluster, un.GetNamespace(), func(project string) ([]*v1alpha1.Cluster, error) {
|
||||
permitted, err := project.IsDestinationPermitted(destCluster, un.GetNamespace(), func(project string) ([]*v1alpha1.Cluster, error) {
|
||||
return m.db.GetProjectClusters(context.TODO(), project)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -361,7 +320,7 @@ func (m *appStateManager) SyncAppState(app *v1alpha1.Application, state *v1alpha
|
||||
}
|
||||
|
||||
if !permitted {
|
||||
return fmt.Errorf("namespace %v is not permitted in project '%s'", un.GetNamespace(), proj.Name)
|
||||
return fmt.Errorf("namespace %v is not permitted in project '%s'", un.GetNamespace(), project.Name)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@@ -463,7 +422,7 @@ func (m *appStateManager) SyncAppState(app *v1alpha1.Application, state *v1alpha
|
||||
logEntry.WithField("duration", time.Since(start)).Info("sync/terminate complete")
|
||||
|
||||
if !syncOp.DryRun && len(syncOp.Resources) == 0 && state.Phase.Successful() {
|
||||
err := m.persistRevisionHistory(app, compareResult.syncStatus.Revision, source, compareResult.syncStatus.Revisions, compareResult.syncStatus.ComparedTo.Sources, isMultiSourceRevision, state.StartedAt, state.Operation.InitiatedBy)
|
||||
err := m.persistRevisionHistory(app, compareResult.syncStatus.Revision, compareResult.syncStatus.ComparedTo.Source, compareResult.syncStatus.Revisions, compareResult.syncStatus.ComparedTo.Sources, isMultiSourceSync, state.StartedAt, state.Operation.InitiatedBy)
|
||||
if err != nil {
|
||||
state.Phase = common.OperationError
|
||||
state.Message = fmt.Sprintf("failed to record sync to history: %v", err)
|
||||
@@ -612,7 +571,7 @@ func syncWindowPreventsSync(app *v1alpha1.Application, proj *v1alpha1.AppProject
|
||||
|
||||
// deriveServiceAccountToImpersonate determines the service account to be used for impersonation for the sync operation.
|
||||
// The returned service account will be fully qualified including namespace and the service account name in the format system:serviceaccount:<namespace>:<service_account>
|
||||
func deriveServiceAccountToImpersonate(project *v1alpha1.AppProject, application *v1alpha1.Application) (string, error) {
|
||||
func deriveServiceAccountToImpersonate(project *v1alpha1.AppProject, application *v1alpha1.Application, destCluster *v1alpha1.Cluster) (string, error) {
|
||||
// spec.Destination.Namespace is optional. If not specified, use the Application's
|
||||
// namespace
|
||||
serviceAccountNamespace := application.Spec.Destination.Namespace
|
||||
@@ -622,7 +581,7 @@ func deriveServiceAccountToImpersonate(project *v1alpha1.AppProject, application
|
||||
// Loop through the destinationServiceAccounts and see if there is any destination that is a candidate.
|
||||
// if so, return the service account specified for that destination.
|
||||
for _, item := range project.Spec.DestinationServiceAccounts {
|
||||
dstServerMatched, err := glob.MatchWithError(item.Server, application.Spec.Destination.Server)
|
||||
dstServerMatched, err := glob.MatchWithError(item.Server, destCluster.Server)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("invalid glob pattern for destination server: %w", err)
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/sync"
|
||||
"github.com/argoproj/gitops-engine/pkg/sync/common"
|
||||
synccommon "github.com/argoproj/gitops-engine/pkg/sync/common"
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/common"
|
||||
"github.com/argoproj/argo-cd/v3/controller/testdata"
|
||||
"github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v3/reposerver/apiclient"
|
||||
@@ -49,7 +50,7 @@ func TestPersistRevisionHistory(t *testing.T) {
|
||||
opState := &v1alpha1.OperationState{Operation: v1alpha1.Operation{
|
||||
Sync: &v1alpha1.SyncOperation{},
|
||||
}}
|
||||
ctrl.appStateManager.SyncAppState(app, opState)
|
||||
ctrl.appStateManager.SyncAppState(app, defaultProject, opState)
|
||||
// Ensure we record spec.source into sync result
|
||||
assert.Equal(t, app.Spec.GetSource(), opState.SyncResult.Source)
|
||||
|
||||
@@ -95,7 +96,7 @@ func TestPersistManagedNamespaceMetadataState(t *testing.T) {
|
||||
opState := &v1alpha1.OperationState{Operation: v1alpha1.Operation{
|
||||
Sync: &v1alpha1.SyncOperation{},
|
||||
}}
|
||||
ctrl.appStateManager.SyncAppState(app, opState)
|
||||
ctrl.appStateManager.SyncAppState(app, defaultProject, opState)
|
||||
// Ensure we record spec.syncPolicy.managedNamespaceMetadata into sync result
|
||||
assert.Equal(t, app.Spec.SyncPolicy.ManagedNamespaceMetadata, opState.SyncResult.ManagedNamespaceMetadata)
|
||||
}
|
||||
@@ -138,7 +139,7 @@ func TestPersistRevisionHistoryRollback(t *testing.T) {
|
||||
Source: &source,
|
||||
},
|
||||
}}
|
||||
ctrl.appStateManager.SyncAppState(app, opState)
|
||||
ctrl.appStateManager.SyncAppState(app, defaultProject, opState)
|
||||
// Ensure we record opState's source into sync result
|
||||
assert.Equal(t, source, opState.SyncResult.Source)
|
||||
|
||||
@@ -181,7 +182,7 @@ func TestSyncComparisonError(t *testing.T) {
|
||||
Sync: &v1alpha1.SyncOperation{},
|
||||
}}
|
||||
t.Setenv("ARGOCD_GPG_ENABLED", "true")
|
||||
ctrl.appStateManager.SyncAppState(app, opState)
|
||||
ctrl.appStateManager.SyncAppState(app, defaultProject, opState)
|
||||
|
||||
conditions := app.Status.GetConditions(map[v1alpha1.ApplicationConditionType]bool{v1alpha1.ApplicationConditionComparisonError: true})
|
||||
assert.NotEmpty(t, conditions)
|
||||
@@ -193,14 +194,19 @@ func TestAppStateManager_SyncAppState(t *testing.T) {
|
||||
|
||||
type fixture struct {
|
||||
application *v1alpha1.Application
|
||||
project *v1alpha1.AppProject
|
||||
controller *ApplicationController
|
||||
}
|
||||
|
||||
setup := func() *fixture {
|
||||
setup := func(liveObjects map[kube.ResourceKey]*unstructured.Unstructured) *fixture {
|
||||
app := newFakeApp()
|
||||
app.Status.OperationState = nil
|
||||
app.Status.History = nil
|
||||
|
||||
if liveObjects == nil {
|
||||
liveObjects = make(map[kube.ResourceKey]*unstructured.Unstructured)
|
||||
}
|
||||
|
||||
project := &v1alpha1.AppProject{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: test.FakeArgoCDNamespace,
|
||||
@@ -208,6 +214,12 @@ func TestAppStateManager_SyncAppState(t *testing.T) {
|
||||
},
|
||||
Spec: v1alpha1.AppProjectSpec{
|
||||
SignatureKeys: []v1alpha1.SignatureKey{{KeyID: "test"}},
|
||||
Destinations: []v1alpha1.ApplicationDestination{
|
||||
{
|
||||
Namespace: "*",
|
||||
Server: "*",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
data := fakeData{
|
||||
@@ -218,12 +230,13 @@ func TestAppStateManager_SyncAppState(t *testing.T) {
|
||||
Server: test.FakeClusterURL,
|
||||
Revision: "abc123",
|
||||
},
|
||||
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
|
||||
managedLiveObjs: liveObjects,
|
||||
}
|
||||
ctrl := newFakeController(&data, nil)
|
||||
|
||||
return &fixture{
|
||||
application: app,
|
||||
project: project,
|
||||
controller: ctrl,
|
||||
}
|
||||
}
|
||||
@@ -231,13 +244,23 @@ func TestAppStateManager_SyncAppState(t *testing.T) {
|
||||
t.Run("will fail the sync if finds shared resources", func(t *testing.T) {
|
||||
// given
|
||||
t.Parallel()
|
||||
f := setup()
|
||||
syncErrorMsg := "deployment already applied by another application"
|
||||
condition := v1alpha1.ApplicationCondition{
|
||||
Type: v1alpha1.ApplicationConditionSharedResourceWarning,
|
||||
Message: syncErrorMsg,
|
||||
}
|
||||
f.application.Status.Conditions = append(f.application.Status.Conditions, condition)
|
||||
|
||||
sharedObject := kube.MustToUnstructured(&corev1.ConfigMap{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "v1",
|
||||
Kind: "ConfigMap",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "configmap1",
|
||||
Namespace: "default",
|
||||
Annotations: map[string]string{
|
||||
common.AnnotationKeyAppInstance: "guestbook:/ConfigMap:default/configmap1",
|
||||
},
|
||||
},
|
||||
})
|
||||
liveObjects := make(map[kube.ResourceKey]*unstructured.Unstructured)
|
||||
liveObjects[kube.GetResourceKey(sharedObject)] = sharedObject
|
||||
f := setup(liveObjects)
|
||||
|
||||
// Sync with source unspecified
|
||||
opState := &v1alpha1.OperationState{Operation: v1alpha1.Operation{
|
||||
@@ -248,11 +271,11 @@ func TestAppStateManager_SyncAppState(t *testing.T) {
|
||||
}}
|
||||
|
||||
// when
|
||||
f.controller.appStateManager.SyncAppState(f.application, opState)
|
||||
f.controller.appStateManager.SyncAppState(f.application, f.project, opState)
|
||||
|
||||
// then
|
||||
assert.Equal(t, common.OperationFailed, opState.Phase)
|
||||
assert.Contains(t, opState.Message, syncErrorMsg)
|
||||
assert.Equal(t, synccommon.OperationFailed, opState.Phase)
|
||||
assert.Contains(t, opState.Message, "ConfigMap/configmap1 is part of applications fake-argocd-ns/my-app and guestbook")
|
||||
})
|
||||
}
|
||||
|
||||
@@ -261,6 +284,7 @@ func TestSyncWindowDeniesSync(t *testing.T) {
|
||||
|
||||
type fixture struct {
|
||||
application *v1alpha1.Application
|
||||
project *v1alpha1.AppProject
|
||||
controller *ApplicationController
|
||||
}
|
||||
|
||||
@@ -299,6 +323,7 @@ func TestSyncWindowDeniesSync(t *testing.T) {
|
||||
|
||||
return &fixture{
|
||||
application: app,
|
||||
project: project,
|
||||
controller: ctrl,
|
||||
}
|
||||
}
|
||||
@@ -315,13 +340,13 @@ func TestSyncWindowDeniesSync(t *testing.T) {
|
||||
Source: &v1alpha1.ApplicationSource{},
|
||||
},
|
||||
},
|
||||
Phase: common.OperationRunning,
|
||||
Phase: synccommon.OperationRunning,
|
||||
}
|
||||
// when
|
||||
f.controller.appStateManager.SyncAppState(f.application, opState)
|
||||
f.controller.appStateManager.SyncAppState(f.application, f.project, opState)
|
||||
|
||||
// then
|
||||
assert.Equal(t, common.OperationRunning, opState.Phase)
|
||||
assert.Equal(t, synccommon.OperationRunning, opState.Phase)
|
||||
assert.Contains(t, opState.Message, opMessage)
|
||||
})
|
||||
}
|
||||
@@ -651,6 +676,7 @@ func TestDeriveServiceAccountMatchingNamespaces(t *testing.T) {
|
||||
type fixture struct {
|
||||
project *v1alpha1.AppProject
|
||||
application *v1alpha1.Application
|
||||
cluster *v1alpha1.Cluster
|
||||
}
|
||||
|
||||
setup := func(destinationServiceAccounts []v1alpha1.ApplicationDestinationServiceAccount, destinationNamespace, destinationServerURL, applicationNamespace string) *fixture {
|
||||
@@ -676,9 +702,14 @@ func TestDeriveServiceAccountMatchingNamespaces(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
cluster := &v1alpha1.Cluster{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Name: "test-cluster",
|
||||
}
|
||||
return &fixture{
|
||||
project: project,
|
||||
application: app,
|
||||
cluster: cluster,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -694,7 +725,7 @@ func TestDeriveServiceAccountMatchingNamespaces(t *testing.T) {
|
||||
|
||||
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
|
||||
// when
|
||||
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
|
||||
sa, err := deriveServiceAccountToImpersonate(f.project, f.application, f.cluster)
|
||||
assert.Equal(t, expectedSA, sa)
|
||||
|
||||
// then, there should be an error saying no valid match was found
|
||||
@@ -718,7 +749,7 @@ func TestDeriveServiceAccountMatchingNamespaces(t *testing.T) {
|
||||
|
||||
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
|
||||
// when
|
||||
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
|
||||
sa, err := deriveServiceAccountToImpersonate(f.project, f.application, f.cluster)
|
||||
|
||||
// then, there should be no error and should use the right service account for impersonation
|
||||
require.NoError(t, err)
|
||||
@@ -757,7 +788,7 @@ func TestDeriveServiceAccountMatchingNamespaces(t *testing.T) {
|
||||
|
||||
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
|
||||
// when
|
||||
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
|
||||
sa, err := deriveServiceAccountToImpersonate(f.project, f.application, f.cluster)
|
||||
|
||||
// then, there should be no error and should use the right service account for impersonation
|
||||
require.NoError(t, err)
|
||||
@@ -796,7 +827,7 @@ func TestDeriveServiceAccountMatchingNamespaces(t *testing.T) {
|
||||
|
||||
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
|
||||
// when
|
||||
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
|
||||
sa, err := deriveServiceAccountToImpersonate(f.project, f.application, f.cluster)
|
||||
|
||||
// then, there should be no error and it should use the first matching service account for impersonation
|
||||
require.NoError(t, err)
|
||||
@@ -830,7 +861,7 @@ func TestDeriveServiceAccountMatchingNamespaces(t *testing.T) {
|
||||
|
||||
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
|
||||
// when
|
||||
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
|
||||
sa, err := deriveServiceAccountToImpersonate(f.project, f.application, f.cluster)
|
||||
|
||||
// then, there should not be any error and should use the first matching glob pattern service account for impersonation
|
||||
require.NoError(t, err)
|
||||
@@ -865,7 +896,7 @@ func TestDeriveServiceAccountMatchingNamespaces(t *testing.T) {
|
||||
|
||||
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
|
||||
// when
|
||||
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
|
||||
sa, err := deriveServiceAccountToImpersonate(f.project, f.application, f.cluster)
|
||||
|
||||
// then, there should be an error saying no match was found
|
||||
require.EqualError(t, err, expectedErrMsg)
|
||||
@@ -893,7 +924,7 @@ func TestDeriveServiceAccountMatchingNamespaces(t *testing.T) {
|
||||
|
||||
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
|
||||
// when
|
||||
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
|
||||
sa, err := deriveServiceAccountToImpersonate(f.project, f.application, f.cluster)
|
||||
|
||||
// then, there should not be any error and the service account configured for with empty namespace should be used.
|
||||
require.NoError(t, err)
|
||||
@@ -927,7 +958,7 @@ func TestDeriveServiceAccountMatchingNamespaces(t *testing.T) {
|
||||
|
||||
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
|
||||
// when
|
||||
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
|
||||
sa, err := deriveServiceAccountToImpersonate(f.project, f.application, f.cluster)
|
||||
|
||||
// then, there should not be any error and the catch all service account should be returned
|
||||
require.NoError(t, err)
|
||||
@@ -951,7 +982,7 @@ func TestDeriveServiceAccountMatchingNamespaces(t *testing.T) {
|
||||
|
||||
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
|
||||
// when
|
||||
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
|
||||
sa, err := deriveServiceAccountToImpersonate(f.project, f.application, f.cluster)
|
||||
|
||||
// then, there must be an error as the glob pattern is invalid.
|
||||
require.ErrorContains(t, err, "invalid glob pattern for destination namespace")
|
||||
@@ -985,7 +1016,35 @@ func TestDeriveServiceAccountMatchingNamespaces(t *testing.T) {
|
||||
|
||||
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
|
||||
// when
|
||||
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
|
||||
sa, err := deriveServiceAccountToImpersonate(f.project, f.application, f.cluster)
|
||||
assert.Equal(t, expectedSA, sa)
|
||||
|
||||
// then, there should not be any error and the service account with its namespace should be returned.
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("app destination name instead of server URL", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
destinationServiceAccounts := []v1alpha1.ApplicationDestinationServiceAccount{
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "*",
|
||||
DefaultServiceAccount: "test-sa",
|
||||
},
|
||||
}
|
||||
destinationNamespace := "testns"
|
||||
destinationServerURL := "https://kubernetes.svc.local"
|
||||
applicationNamespace := "argocd-ns"
|
||||
expectedSA := "system:serviceaccount:testns:test-sa"
|
||||
|
||||
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
|
||||
|
||||
// Use destination name instead of server URL
|
||||
f.application.Spec.Destination.Server = ""
|
||||
f.application.Spec.Destination.Name = f.cluster.Name
|
||||
|
||||
// when
|
||||
sa, err := deriveServiceAccountToImpersonate(f.project, f.application, f.cluster)
|
||||
assert.Equal(t, expectedSA, sa)
|
||||
|
||||
// then, there should not be any error and the service account with its namespace should be returned.
|
||||
@@ -999,6 +1058,7 @@ func TestDeriveServiceAccountMatchingServers(t *testing.T) {
|
||||
type fixture struct {
|
||||
project *v1alpha1.AppProject
|
||||
application *v1alpha1.Application
|
||||
cluster *v1alpha1.Cluster
|
||||
}
|
||||
|
||||
setup := func(destinationServiceAccounts []v1alpha1.ApplicationDestinationServiceAccount, destinationNamespace, destinationServerURL, applicationNamespace string) *fixture {
|
||||
@@ -1024,9 +1084,14 @@ func TestDeriveServiceAccountMatchingServers(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
cluster := &v1alpha1.Cluster{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Name: "test-cluster",
|
||||
}
|
||||
return &fixture{
|
||||
project: project,
|
||||
application: app,
|
||||
cluster: cluster,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1062,7 +1127,7 @@ func TestDeriveServiceAccountMatchingServers(t *testing.T) {
|
||||
|
||||
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
|
||||
// when
|
||||
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
|
||||
sa, err := deriveServiceAccountToImpersonate(f.project, f.application, f.cluster)
|
||||
|
||||
// then, there should not be any error and the right service account must be returned.
|
||||
require.NoError(t, err)
|
||||
@@ -1101,7 +1166,7 @@ func TestDeriveServiceAccountMatchingServers(t *testing.T) {
|
||||
|
||||
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
|
||||
// when
|
||||
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
|
||||
sa, err := deriveServiceAccountToImpersonate(f.project, f.application, f.cluster)
|
||||
|
||||
// then, there should not be any error and first matching service account should be used
|
||||
require.NoError(t, err)
|
||||
@@ -1135,7 +1200,7 @@ func TestDeriveServiceAccountMatchingServers(t *testing.T) {
|
||||
|
||||
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
|
||||
// when
|
||||
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
|
||||
sa, err := deriveServiceAccountToImpersonate(f.project, f.application, f.cluster)
|
||||
assert.Equal(t, expectedSA, sa)
|
||||
|
||||
// then, there should not be any error and the service account of the glob pattern, being the first match should be returned.
|
||||
@@ -1170,7 +1235,7 @@ func TestDeriveServiceAccountMatchingServers(t *testing.T) {
|
||||
|
||||
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
|
||||
// when
|
||||
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
|
||||
sa, err := deriveServiceAccountToImpersonate(f.project, f.application, &v1alpha1.Cluster{Server: destinationServerURL})
|
||||
|
||||
// then, there an error with appropriate message must be returned
|
||||
require.EqualError(t, err, expectedErr)
|
||||
@@ -1204,7 +1269,7 @@ func TestDeriveServiceAccountMatchingServers(t *testing.T) {
|
||||
|
||||
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
|
||||
// when
|
||||
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
|
||||
sa, err := deriveServiceAccountToImpersonate(f.project, f.application, f.cluster)
|
||||
|
||||
// then, there should not be any error and the service account of the glob pattern match must be returned.
|
||||
require.NoError(t, err)
|
||||
@@ -1228,7 +1293,7 @@ func TestDeriveServiceAccountMatchingServers(t *testing.T) {
|
||||
|
||||
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
|
||||
// when
|
||||
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
|
||||
sa, err := deriveServiceAccountToImpersonate(f.project, f.application, f.cluster)
|
||||
|
||||
// then, there must be an error as the glob pattern is invalid.
|
||||
require.ErrorContains(t, err, "invalid glob pattern for destination server")
|
||||
@@ -1262,17 +1327,46 @@ func TestDeriveServiceAccountMatchingServers(t *testing.T) {
|
||||
|
||||
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
|
||||
// when
|
||||
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
|
||||
sa, err := deriveServiceAccountToImpersonate(f.project, f.application, &v1alpha1.Cluster{Server: destinationServerURL})
|
||||
|
||||
// then, there should not be any error and the service account with the given namespace prefix must be returned.
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expectedSA, sa)
|
||||
})
|
||||
|
||||
t.Run("app destination name instead of server URL", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
destinationServiceAccounts := []v1alpha1.ApplicationDestinationServiceAccount{
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "*",
|
||||
DefaultServiceAccount: "test-sa",
|
||||
},
|
||||
}
|
||||
destinationNamespace := "testns"
|
||||
destinationServerURL := "https://kubernetes.svc.local"
|
||||
applicationNamespace := "argocd-ns"
|
||||
expectedSA := "system:serviceaccount:testns:test-sa"
|
||||
|
||||
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
|
||||
|
||||
// Use destination name instead of server URL
|
||||
f.application.Spec.Destination.Server = ""
|
||||
f.application.Spec.Destination.Name = f.cluster.Name
|
||||
|
||||
// when
|
||||
sa, err := deriveServiceAccountToImpersonate(f.project, f.application, f.cluster)
|
||||
assert.Equal(t, expectedSA, sa)
|
||||
|
||||
// then, there should not be any error and the service account with its namespace should be returned.
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestSyncWithImpersonate(t *testing.T) {
|
||||
type fixture struct {
|
||||
application *v1alpha1.Application
|
||||
project *v1alpha1.AppProject
|
||||
controller *ApplicationController
|
||||
}
|
||||
|
||||
@@ -1322,6 +1416,7 @@ func TestSyncWithImpersonate(t *testing.T) {
|
||||
ctrl := newFakeController(&data, nil)
|
||||
return &fixture{
|
||||
application: app,
|
||||
project: project,
|
||||
controller: ctrl,
|
||||
}
|
||||
}
|
||||
@@ -1337,13 +1432,13 @@ func TestSyncWithImpersonate(t *testing.T) {
|
||||
Source: &v1alpha1.ApplicationSource{},
|
||||
},
|
||||
},
|
||||
Phase: common.OperationRunning,
|
||||
Phase: synccommon.OperationRunning,
|
||||
}
|
||||
// when
|
||||
f.controller.appStateManager.SyncAppState(f.application, opState)
|
||||
f.controller.appStateManager.SyncAppState(f.application, f.project, opState)
|
||||
|
||||
// then, app sync should fail with expected error message in operation state
|
||||
assert.Equal(t, common.OperationError, opState.Phase)
|
||||
assert.Equal(t, synccommon.OperationError, opState.Phase)
|
||||
assert.Contains(t, opState.Message, opMessage)
|
||||
})
|
||||
|
||||
@@ -1358,13 +1453,13 @@ func TestSyncWithImpersonate(t *testing.T) {
|
||||
Source: &v1alpha1.ApplicationSource{},
|
||||
},
|
||||
},
|
||||
Phase: common.OperationRunning,
|
||||
Phase: synccommon.OperationRunning,
|
||||
}
|
||||
// when
|
||||
f.controller.appStateManager.SyncAppState(f.application, opState)
|
||||
f.controller.appStateManager.SyncAppState(f.application, f.project, opState)
|
||||
|
||||
// then app sync should fail with expected error message in operation state
|
||||
assert.Equal(t, common.OperationError, opState.Phase)
|
||||
assert.Equal(t, synccommon.OperationError, opState.Phase)
|
||||
assert.Contains(t, opState.Message, opMessage)
|
||||
})
|
||||
|
||||
@@ -1379,13 +1474,13 @@ func TestSyncWithImpersonate(t *testing.T) {
|
||||
Source: &v1alpha1.ApplicationSource{},
|
||||
},
|
||||
},
|
||||
Phase: common.OperationRunning,
|
||||
Phase: synccommon.OperationRunning,
|
||||
}
|
||||
// when
|
||||
f.controller.appStateManager.SyncAppState(f.application, opState)
|
||||
f.controller.appStateManager.SyncAppState(f.application, f.project, opState)
|
||||
|
||||
// then app sync should not fail
|
||||
assert.Equal(t, common.OperationSucceeded, opState.Phase)
|
||||
assert.Equal(t, synccommon.OperationSucceeded, opState.Phase)
|
||||
assert.Contains(t, opState.Message, opMessage)
|
||||
})
|
||||
|
||||
@@ -1400,13 +1495,38 @@ func TestSyncWithImpersonate(t *testing.T) {
|
||||
Source: &v1alpha1.ApplicationSource{},
|
||||
},
|
||||
},
|
||||
Phase: common.OperationRunning,
|
||||
Phase: synccommon.OperationRunning,
|
||||
}
|
||||
// when
|
||||
f.controller.appStateManager.SyncAppState(f.application, opState)
|
||||
f.controller.appStateManager.SyncAppState(f.application, f.project, opState)
|
||||
|
||||
// then application sync should pass using the control plane service account
|
||||
assert.Equal(t, common.OperationSucceeded, opState.Phase)
|
||||
assert.Equal(t, synccommon.OperationSucceeded, opState.Phase)
|
||||
assert.Contains(t, opState.Message, opMessage)
|
||||
})
|
||||
|
||||
t.Run("app destination name instead of server URL", func(t *testing.T) {
|
||||
// given app sync impersonation feature is enabled with an application referring a project matching service account
|
||||
f := setup(true, test.FakeDestNamespace, "test-sa")
|
||||
opMessage := "successfully synced (no more tasks)"
|
||||
|
||||
opState := &v1alpha1.OperationState{
|
||||
Operation: v1alpha1.Operation{
|
||||
Sync: &v1alpha1.SyncOperation{
|
||||
Source: &v1alpha1.ApplicationSource{},
|
||||
},
|
||||
},
|
||||
Phase: synccommon.OperationRunning,
|
||||
}
|
||||
|
||||
f.application.Spec.Destination.Server = ""
|
||||
f.application.Spec.Destination.Name = "minikube"
|
||||
|
||||
// when
|
||||
f.controller.appStateManager.SyncAppState(f.application, f.project, opState)
|
||||
|
||||
// then app sync should not fail
|
||||
assert.Equal(t, synccommon.OperationSucceeded, opState.Phase)
|
||||
assert.Contains(t, opState.Message, opMessage)
|
||||
})
|
||||
}
|
||||
@@ -1416,6 +1536,7 @@ func TestClientSideApplyMigration(t *testing.T) {
|
||||
|
||||
type fixture struct {
|
||||
application *v1alpha1.Application
|
||||
project *v1alpha1.AppProject
|
||||
controller *ApplicationController
|
||||
}
|
||||
|
||||
@@ -1456,6 +1577,7 @@ func TestClientSideApplyMigration(t *testing.T) {
|
||||
|
||||
return &fixture{
|
||||
application: app,
|
||||
project: project,
|
||||
controller: ctrl,
|
||||
}
|
||||
}
|
||||
@@ -1471,10 +1593,10 @@ func TestClientSideApplyMigration(t *testing.T) {
|
||||
Source: &v1alpha1.ApplicationSource{},
|
||||
},
|
||||
}}
|
||||
f.controller.appStateManager.SyncAppState(f.application, opState)
|
||||
f.controller.appStateManager.SyncAppState(f.application, f.project, opState)
|
||||
|
||||
// then
|
||||
assert.Equal(t, common.OperationSucceeded, opState.Phase)
|
||||
assert.Equal(t, synccommon.OperationSucceeded, opState.Phase)
|
||||
assert.Contains(t, opState.Message, "successfully synced")
|
||||
})
|
||||
|
||||
@@ -1489,10 +1611,10 @@ func TestClientSideApplyMigration(t *testing.T) {
|
||||
Source: &v1alpha1.ApplicationSource{},
|
||||
},
|
||||
}}
|
||||
f.controller.appStateManager.SyncAppState(f.application, opState)
|
||||
f.controller.appStateManager.SyncAppState(f.application, f.project, opState)
|
||||
|
||||
// then
|
||||
assert.Equal(t, common.OperationSucceeded, opState.Phase)
|
||||
assert.Equal(t, synccommon.OperationSucceeded, opState.Phase)
|
||||
assert.Contains(t, opState.Message, "successfully synced")
|
||||
})
|
||||
|
||||
@@ -1507,10 +1629,10 @@ func TestClientSideApplyMigration(t *testing.T) {
|
||||
Source: &v1alpha1.ApplicationSource{},
|
||||
},
|
||||
}}
|
||||
f.controller.appStateManager.SyncAppState(f.application, opState)
|
||||
f.controller.appStateManager.SyncAppState(f.application, f.project, opState)
|
||||
|
||||
// then
|
||||
assert.Equal(t, common.OperationSucceeded, opState.Phase)
|
||||
assert.Equal(t, synccommon.OperationSucceeded, opState.Phase)
|
||||
assert.Contains(t, opState.Message, "successfully synced")
|
||||
})
|
||||
}
|
||||
|
||||
20
controller/syncid/id.go
Normal file
20
controller/syncid/id.go
Normal file
@@ -0,0 +1,20 @@
|
||||
package syncid
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/util/rand"
|
||||
)
|
||||
|
||||
var globalCount = &atomic.Uint64{}
|
||||
|
||||
// Generate generates a new ID
|
||||
func Generate() (string, error) {
|
||||
randSuffix, err := rand.String(5)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to generate random suffix: %w", err)
|
||||
}
|
||||
prefix := globalCount.Add(1)
|
||||
return fmt.Sprintf("%05d-%s", prefix, randSuffix), nil
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user