mirror of
https://github.com/argoproj/argo-cd.git
synced 2026-02-20 09:38:49 +01:00
Compare commits
288 Commits
hydrator
...
update-ver
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1d77740f95 | ||
|
|
58ded15863 | ||
|
|
8d0279895c | ||
|
|
6207fd0040 | ||
|
|
3875dde5cc | ||
|
|
17a535f6d4 | ||
|
|
180d6890af | ||
|
|
6ef7f61d9a | ||
|
|
c7937f101c | ||
|
|
03600ae7ac | ||
|
|
c6112c05fe | ||
|
|
49771c1d4f | ||
|
|
102853d31a | ||
|
|
10b9589f1c | ||
|
|
53dc116353 | ||
|
|
99aaf43bdb | ||
|
|
c8a62bb162 | ||
|
|
fd67e4970f | ||
|
|
2618ccca2d | ||
|
|
38e02ab9e8 | ||
|
|
2fe4536ed2 | ||
|
|
49163b09b1 | ||
|
|
c0f847f301 | ||
|
|
2e794fbbc5 | ||
|
|
a25c8a0eef | ||
|
|
c76a131b17 | ||
|
|
64a14a08e0 | ||
|
|
09eede0c17 | ||
|
|
f260510f38 | ||
|
|
079754c639 | ||
|
|
dc43124058 | ||
|
|
b6af657295 | ||
|
|
a3624a3f20 | ||
|
|
01ae20d1b3 | ||
|
|
89ef3563db | ||
|
|
831e4525c3 | ||
|
|
f8d6665c67 | ||
|
|
0680ddbdf9 | ||
|
|
ad36916ec4 | ||
|
|
af54ef8db5 | ||
|
|
68606c6caf | ||
|
|
6a8cb6eff0 | ||
|
|
d03ccf305c | ||
|
|
7f45c9e093 | ||
|
|
449e6939b2 | ||
|
|
99aab9a5f3 | ||
|
|
347f221adb | ||
|
|
1fcbe3f511 | ||
|
|
d417417c21 | ||
|
|
e7f98814a9 | ||
|
|
3f708b8b14 | ||
|
|
7bc333d193 | ||
|
|
e3b1d9327d | ||
|
|
deb07ee698 | ||
|
|
435989c07e | ||
|
|
2503eb32af | ||
|
|
be57dfe1fa | ||
|
|
e99c8b754b | ||
|
|
2076b4f73c | ||
|
|
5c595d8410 | ||
|
|
8340e1e43f | ||
|
|
1cddb8e607 | ||
|
|
262c8fa529 | ||
|
|
97a49a24cc | ||
|
|
a9a8d0e45f | ||
|
|
92de225ce5 | ||
|
|
a713e5023a | ||
|
|
ec60abd4d8 | ||
|
|
c6d9d50ee9 | ||
|
|
7244b8b40f | ||
|
|
8e81bb6c80 | ||
|
|
3bc2e1ae4c | ||
|
|
61f63f35ae | ||
|
|
5eb1f9bd16 | ||
|
|
4149f484bf | ||
|
|
0b2895977e | ||
|
|
99b30a87a6 | ||
|
|
9fc6ec116d | ||
|
|
f7f553f675 | ||
|
|
a9d9d07edd | ||
|
|
0f083c9e58 | ||
|
|
5392ca7e79 | ||
|
|
243ecc2f25 | ||
|
|
425b4087f3 | ||
|
|
74a367d10e | ||
|
|
e67a7b6674 | ||
|
|
ddf337e893 | ||
|
|
5540c37f3a | ||
|
|
60df9eb384 | ||
|
|
c6a414c7db | ||
|
|
d49e175c53 | ||
|
|
42c001dd14 | ||
|
|
ccc66cc54d | ||
|
|
f22c332d92 | ||
|
|
cb6fbbfdea | ||
|
|
81de487cf6 | ||
|
|
28f424f8f9 | ||
|
|
bf02881374 | ||
|
|
393f7fc7c1 | ||
|
|
48a03a9884 | ||
|
|
7abdd88d81 | ||
|
|
c20734df37 | ||
|
|
f5a202abb3 | ||
|
|
20e7f8edca | ||
|
|
ddab959958 | ||
|
|
aeb8b55fc0 | ||
|
|
c4709fbf5f | ||
|
|
022c4fd061 | ||
|
|
02df74192f | ||
|
|
ad399c0a88 | ||
|
|
f980187f17 | ||
|
|
da118ad6aa | ||
|
|
44d56954b7 | ||
|
|
e86258d8a5 | ||
|
|
8487a93931 | ||
|
|
76870db199 | ||
|
|
d60f8d8ba2 | ||
|
|
5e55d1d502 | ||
|
|
ebbd3d1321 | ||
|
|
b098f2152e | ||
|
|
a7bc623fef | ||
|
|
1de5f3b7fc | ||
|
|
14c1da6e40 | ||
|
|
bc4c4757fd | ||
|
|
ca7a08eb95 | ||
|
|
5776554819 | ||
|
|
878494f037 | ||
|
|
d8c773dd3d | ||
|
|
d2d9a37a0c | ||
|
|
ccc528aa9a | ||
|
|
031fb88fbb | ||
|
|
21a364158e | ||
|
|
47c7e46405 | ||
|
|
cb926d004d | ||
|
|
a2aaf7fd1d | ||
|
|
06237b3fee | ||
|
|
be90cc04fb | ||
|
|
5af95b1350 | ||
|
|
aa990d6696 | ||
|
|
71bbdccacf | ||
|
|
473665795c | ||
|
|
3661f09456 | ||
|
|
1759a4406b | ||
|
|
cc42d5f92d | ||
|
|
3136d08f44 | ||
|
|
6533a6f686 | ||
|
|
3f5b80f626 | ||
|
|
3d66b05899 | ||
|
|
b84f01eb3d | ||
|
|
09fdec4c6b | ||
|
|
01bbd91c9d | ||
|
|
d28229dc1c | ||
|
|
9d3409f7d5 | ||
|
|
ba67abed40 | ||
|
|
6dc7405cf9 | ||
|
|
c27091cb4f | ||
|
|
bd93902325 | ||
|
|
d9bda34605 | ||
|
|
ece68bd143 | ||
|
|
de35745fc0 | ||
|
|
bb43c5a83d | ||
|
|
01874d64de | ||
|
|
aa2bafd812 | ||
|
|
d3fbeec825 | ||
|
|
63b6565079 | ||
|
|
ef41eebd10 | ||
|
|
832fefb533 | ||
|
|
9c47a709fb | ||
|
|
1028808bb7 | ||
|
|
f071fdcfa3 | ||
|
|
e3e02f0064 | ||
|
|
6cdba1e536 | ||
|
|
a6d11354bb | ||
|
|
b9bd45b059 | ||
|
|
da43a20c6a | ||
|
|
de8ed2b9a7 | ||
|
|
f3b90ee517 | ||
|
|
f651ce7169 | ||
|
|
2b929ef2b6 | ||
|
|
06e85eed36 | ||
|
|
09e44e5f21 | ||
|
|
047f709c50 | ||
|
|
566e1d2ada | ||
|
|
5e294d84b7 | ||
|
|
da345ee5f6 | ||
|
|
b574cdc714 | ||
|
|
79b1e4bfc7 | ||
|
|
1f8add5907 | ||
|
|
843a2cdb09 | ||
|
|
3ee71e92dc | ||
|
|
f8dc8b470d | ||
|
|
8b2542ac76 | ||
|
|
a9095fe4e7 | ||
|
|
430f315924 | ||
|
|
9fbe05c7ae | ||
|
|
2a63a8b3d9 | ||
|
|
a43559d796 | ||
|
|
b656b9efb8 | ||
|
|
5834175dba | ||
|
|
0d0db3c110 | ||
|
|
aa14f76d38 | ||
|
|
1bbe17e988 | ||
|
|
0acecad8f8 | ||
|
|
00466c3094 | ||
|
|
2c8a574fff | ||
|
|
3feab7a668 | ||
|
|
233a14bb8b | ||
|
|
3a5b653fb1 | ||
|
|
ef573498c5 | ||
|
|
f597912a6f | ||
|
|
bb3d7730ec | ||
|
|
40186209cd | ||
|
|
60af76fd46 | ||
|
|
c1d3373b8f | ||
|
|
beb71a889d | ||
|
|
3cbb1522dd | ||
|
|
32ee00f1f4 | ||
|
|
4ec26ce399 | ||
|
|
9af0ff5233 | ||
|
|
0361fcb1e4 | ||
|
|
81444474d6 | ||
|
|
7b21eeefee | ||
|
|
e612199c68 | ||
|
|
f49a71c728 | ||
|
|
7fd9d02de7 | ||
|
|
843329174b | ||
|
|
ddd9d6a9f0 | ||
|
|
a0a5a186d9 | ||
|
|
42c2349d7c | ||
|
|
457bb1f23c | ||
|
|
58bab92294 | ||
|
|
e0eb80a45c | ||
|
|
59e0091130 | ||
|
|
86322b5621 | ||
|
|
615dc90999 | ||
|
|
7315033efc | ||
|
|
9b8b044874 | ||
|
|
3a267b8f24 | ||
|
|
3cdce8300c | ||
|
|
b6551cdce9 | ||
|
|
9fc5f14f40 | ||
|
|
085cc6cd73 | ||
|
|
5dd6020dda | ||
|
|
01798055e8 | ||
|
|
d1c74ac984 | ||
|
|
80f2043978 | ||
|
|
69ef68f7a8 | ||
|
|
7d53ef1c40 | ||
|
|
ea2527987c | ||
|
|
f28323b37d | ||
|
|
effbdc936c | ||
|
|
f97024965f | ||
|
|
44072bbdbf | ||
|
|
82003a2c3c | ||
|
|
bacdceda79 | ||
|
|
74af92fd95 | ||
|
|
de53d8eb61 | ||
|
|
d82a746dce | ||
|
|
e7beda0ecc | ||
|
|
014f4424b7 | ||
|
|
a0624f03e4 | ||
|
|
47bcb09c4a | ||
|
|
ab1d5b6f94 | ||
|
|
116180b182 | ||
|
|
6ed7410cf6 | ||
|
|
1ce5824ff4 | ||
|
|
5a7bf2ef66 | ||
|
|
6aa4de62c8 | ||
|
|
fe8dcb60fc | ||
|
|
b2c92c7b6e | ||
|
|
39cb06281f | ||
|
|
f7840c73b3 | ||
|
|
4835e5008a | ||
|
|
9fc93c670a | ||
|
|
afc2fe7172 | ||
|
|
31249489bf | ||
|
|
58543f3835 | ||
|
|
6296b178df | ||
|
|
ea362766db | ||
|
|
159da3c936 | ||
|
|
7601833527 | ||
|
|
b34628db9d | ||
|
|
d2231577c7 | ||
|
|
2cb7616d51 | ||
|
|
6dc559a3e5 | ||
|
|
33e0dda53c | ||
|
|
588b251acc | ||
|
|
3ef05b31ff |
33
.github/workflows/ci-build.yaml
vendored
33
.github/workflows/ci-build.yaml
vendored
@@ -31,7 +31,7 @@ jobs:
|
||||
docs: ${{ steps.filter.outputs.docs_any_changed }}
|
||||
steps:
|
||||
- uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
|
||||
- uses: tj-actions/changed-files@c65cd883420fd2eb864698a825fc4162dd94482c # v44.5.7
|
||||
- uses: tj-actions/changed-files@e9772d140489982e0e3704fea5ee93d536f1e275 # v45.0.1
|
||||
id: filter
|
||||
with:
|
||||
# Any file which is not under docs/, ui/ or is not a markdown file is counted as a backend file
|
||||
@@ -81,7 +81,7 @@ jobs:
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Restore go build cache
|
||||
uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2
|
||||
uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0
|
||||
with:
|
||||
path: ~/.cache/go-build
|
||||
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
|
||||
@@ -151,7 +151,7 @@ jobs:
|
||||
run: |
|
||||
echo "/usr/local/bin" >> $GITHUB_PATH
|
||||
- name: Restore go build cache
|
||||
uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2
|
||||
uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0
|
||||
with:
|
||||
path: ~/.cache/go-build
|
||||
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
|
||||
@@ -172,7 +172,7 @@ jobs:
|
||||
- name: Run all unit tests
|
||||
run: make test-local
|
||||
- name: Generate test results artifacts
|
||||
uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a # v4.3.6
|
||||
uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0
|
||||
with:
|
||||
name: test-results
|
||||
path: test-results
|
||||
@@ -215,7 +215,7 @@ jobs:
|
||||
run: |
|
||||
echo "/usr/local/bin" >> $GITHUB_PATH
|
||||
- name: Restore go build cache
|
||||
uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2
|
||||
uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0
|
||||
with:
|
||||
path: ~/.cache/go-build
|
||||
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
|
||||
@@ -236,7 +236,7 @@ jobs:
|
||||
- name: Run all unit tests
|
||||
run: make test-race-local
|
||||
- name: Generate test results artifacts
|
||||
uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a # v4.3.6
|
||||
uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0
|
||||
with:
|
||||
name: race-results
|
||||
path: test-results/
|
||||
@@ -308,7 +308,7 @@ jobs:
|
||||
node-version: '21.6.1'
|
||||
- name: Restore node dependency cache
|
||||
id: cache-dependencies
|
||||
uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2
|
||||
uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0
|
||||
with:
|
||||
path: ui/node_modules
|
||||
key: ${{ runner.os }}-node-dep-v2-${{ hashFiles('**/yarn.lock') }}
|
||||
@@ -323,6 +323,8 @@ jobs:
|
||||
NODE_ENV: production
|
||||
NODE_ONLINE_ENV: online
|
||||
HOST_ARCH: amd64
|
||||
# If we're on the master branch, set the codecov token so that we upload bundle analysis
|
||||
CODECOV_TOKEN: ${{ github.ref == 'refs/heads/master' && secrets.CODECOV_TOKEN || '' }}
|
||||
working-directory: ui/
|
||||
- name: Run ESLint
|
||||
run: yarn lint
|
||||
@@ -346,7 +348,7 @@ jobs:
|
||||
fetch-depth: 0
|
||||
- name: Restore node dependency cache
|
||||
id: cache-dependencies
|
||||
uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2
|
||||
uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0
|
||||
with:
|
||||
path: ui/node_modules
|
||||
key: ${{ runner.os }}-node-dep-v2-${{ hashFiles('**/yarn.lock') }}
|
||||
@@ -377,6 +379,13 @@ jobs:
|
||||
fail_ci_if_error: true
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
- name: Upload test results to Codecov
|
||||
if: github.ref == 'refs/heads/master' && github.event_name == 'push' && github.repository == 'argoproj/argo-cd'
|
||||
uses: codecov/test-results-action@1b5b448b98e58ba90d1a1a1d9fcb72ca2263be46 # v1.0.0
|
||||
with:
|
||||
file: test-results/junit.xml
|
||||
fail_ci_if_error: true
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
- name: Perform static code analysis using SonarCloud
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
@@ -439,7 +448,7 @@ jobs:
|
||||
sudo chmod go-r $HOME/.kube/config
|
||||
kubectl version
|
||||
- name: Restore go build cache
|
||||
uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2
|
||||
uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0
|
||||
with:
|
||||
path: ~/.cache/go-build
|
||||
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
|
||||
@@ -497,13 +506,13 @@ jobs:
|
||||
goreman run stop-all || echo "goreman trouble"
|
||||
sleep 30
|
||||
- name: Upload e2e coverage report
|
||||
uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a # v4.3.6
|
||||
uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0
|
||||
with:
|
||||
name: e2e-code-coverage
|
||||
path: /tmp/coverage
|
||||
if: ${{ matrix.k3s.latest }}
|
||||
- name: Upload e2e-server logs
|
||||
uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a # v4.3.6
|
||||
uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0
|
||||
with:
|
||||
name: e2e-server-k8s${{ matrix.k3s.version }}.log
|
||||
path: /tmp/e2e-server.log
|
||||
@@ -530,4 +539,4 @@ jobs:
|
||||
exit 0
|
||||
else
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
2
.github/workflows/image-reuse.yaml
vendored
2
.github/workflows/image-reuse.yaml
vendored
@@ -143,7 +143,7 @@ jobs:
|
||||
|
||||
- name: Build and push container image
|
||||
id: image
|
||||
uses: docker/build-push-action@16ebe778df0e7752d2cfcbd924afdbbd89c1a755 #v6.6.1
|
||||
uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 #v6.7.0
|
||||
with:
|
||||
context: .
|
||||
platforms: ${{ inputs.platforms }}
|
||||
|
||||
2
.github/workflows/init-release.yaml
vendored
2
.github/workflows/init-release.yaml
vendored
@@ -64,7 +64,7 @@ jobs:
|
||||
git stash pop
|
||||
|
||||
- name: Create pull request
|
||||
uses: peter-evans/create-pull-request@c5a7806660adbe173f04e3e038b0ccdcd758773c # v6.1.0
|
||||
uses: peter-evans/create-pull-request@d121e62763d8cc35b5fb1710e887d6e69a52d3a4 # v7.0.2
|
||||
with:
|
||||
commit-message: "Bump version to ${{ inputs.TARGET_VERSION }}"
|
||||
title: "Bump version to ${{ inputs.TARGET_VERSION }} on ${{ inputs.TARGET_BRANCH }} branch"
|
||||
|
||||
2
.github/workflows/release.yaml
vendored
2
.github/workflows/release.yaml
vendored
@@ -295,7 +295,7 @@ jobs:
|
||||
if: ${{ env.UPDATE_VERSION == 'true' }}
|
||||
|
||||
- name: Create PR to update VERSION on master branch
|
||||
uses: peter-evans/create-pull-request@c5a7806660adbe173f04e3e038b0ccdcd758773c # v6.1.0
|
||||
uses: peter-evans/create-pull-request@d121e62763d8cc35b5fb1710e887d6e69a52d3a4 # v7.0.2
|
||||
with:
|
||||
commit-message: Bump version in master
|
||||
title: "chore: Bump version in master"
|
||||
|
||||
2
.github/workflows/scorecard.yaml
vendored
2
.github/workflows/scorecard.yaml
vendored
@@ -54,7 +54,7 @@ jobs:
|
||||
# Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
|
||||
# format to the repository Actions tab.
|
||||
- name: "Upload artifact"
|
||||
uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a # v4.3.6
|
||||
uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0
|
||||
with:
|
||||
name: SARIF file
|
||||
path: results.sarif
|
||||
|
||||
2
.gitpod.Dockerfile
vendored
2
.gitpod.Dockerfile
vendored
@@ -1,4 +1,4 @@
|
||||
FROM gitpod/workspace-full@sha256:fbff2dce4236535b96de0e94622bbe9a44fba954ca064862004c34e3e08904df
|
||||
FROM gitpod/workspace-full@sha256:230285e0b949e6d728d384b2029a4111db7b9c87c182f22f32a0be9e36b225df
|
||||
|
||||
USER root
|
||||
|
||||
|
||||
@@ -1,9 +1,12 @@
|
||||
issues:
|
||||
exclude:
|
||||
- SA1019
|
||||
- SA5011
|
||||
max-issues-per-linter: 0
|
||||
max-same-issues: 0
|
||||
exclude-rules:
|
||||
- path: '(.+)_test\.go'
|
||||
linters:
|
||||
- unparam
|
||||
linters:
|
||||
enable:
|
||||
- errcheck
|
||||
@@ -17,6 +20,7 @@ linters:
|
||||
- misspell
|
||||
- staticcheck
|
||||
- testifylint
|
||||
- unparam
|
||||
- unused
|
||||
- whitespace
|
||||
linters-settings:
|
||||
|
||||
@@ -43,6 +43,7 @@ packages:
|
||||
ProjectGetter:
|
||||
RbacEnforcer:
|
||||
SettingsGetter:
|
||||
UserGetter:
|
||||
github.com/argoproj/argo-cd/v2/util/db:
|
||||
interfaces:
|
||||
ArgoDB:
|
||||
@@ -65,4 +66,4 @@ packages:
|
||||
SessionServiceClient:
|
||||
github.com/argoproj/argo-cd/v2/pkg/apiclient/cluster:
|
||||
interfaces:
|
||||
ClusterServiceServer:
|
||||
ClusterServiceServer:
|
||||
|
||||
@@ -2,6 +2,7 @@ version: 2
|
||||
formats: all
|
||||
mkdocs:
|
||||
fail_on_warning: false
|
||||
configuration: mkdocs.yml
|
||||
python:
|
||||
install:
|
||||
- requirements: docs/requirements.txt
|
||||
|
||||
@@ -1,5 +1,10 @@
|
||||
# Changelog
|
||||
|
||||
## v2.13.6 (2025-03-21)
|
||||
|
||||
### Bug fixes
|
||||
- fix: handle annotated git tags correctly in repo server cache (#21548) (#21771)
|
||||
|
||||
## v2.4.8 (2022-07-29)
|
||||
|
||||
### Bug fixes
|
||||
|
||||
@@ -4,7 +4,7 @@ ARG BASE_IMAGE=docker.io/library/ubuntu:24.04@sha256:3f85b7caad41a95462cf5b787d8
|
||||
# Initial stage which pulls prepares build dependencies and CLI tooling we need for our final image
|
||||
# Also used as the image in CI jobs so needs all dependencies
|
||||
####################################################################################################
|
||||
FROM docker.io/library/golang:1.22.6@sha256:2bd56f00ff47baf33e64eae7996b65846c7cb5e0a46e0a882ef179fd89654afa AS builder
|
||||
FROM docker.io/library/golang:1.23.1@sha256:2fe82a3f3e006b4f2a316c6a21f62b66e1330ae211d039bb8d1128e12ed57bf1 AS builder
|
||||
|
||||
RUN echo 'deb http://archive.debian.org/debian buster-backports main' >> /etc/apt/sources.list
|
||||
|
||||
@@ -83,7 +83,7 @@ WORKDIR /home/argocd
|
||||
####################################################################################################
|
||||
# Argo CD UI stage
|
||||
####################################################################################################
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/node:22.3.0@sha256:5e4044ff6001d06e7748e35bfa4f80c73cf5f5a7360a1b782995e038a01b0585 AS argocd-ui
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/node:22.8.0@sha256:bd00c03095f7586432805dbf7989be10361d27987f93de904b1fc003949a4794 AS argocd-ui
|
||||
|
||||
WORKDIR /src
|
||||
COPY ["ui/package.json", "ui/yarn.lock", "./"]
|
||||
@@ -101,7 +101,7 @@ RUN HOST_ARCH=$TARGETARCH NODE_ENV='production' NODE_ONLINE_ENV='online' NODE_OP
|
||||
####################################################################################################
|
||||
# Argo CD Build stage which performs the actual build of Argo CD binaries
|
||||
####################################################################################################
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/golang:1.22.6@sha256:2bd56f00ff47baf33e64eae7996b65846c7cb5e0a46e0a882ef179fd89654afa AS argocd-build
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/golang:1.23.1@sha256:2fe82a3f3e006b4f2a316c6a21f62b66e1330ae211d039bb8d1128e12ed57bf1 AS argocd-build
|
||||
|
||||
WORKDIR /go/src/github.com/argoproj/argo-cd
|
||||
|
||||
|
||||
6
Makefile
6
Makefile
@@ -254,7 +254,7 @@ cli: test-tools-image
|
||||
|
||||
.PHONY: cli-local
|
||||
cli-local: clean-debug
|
||||
CGO_ENABLED=${CGO_FLAG} GODEBUG="tarinsecurepath=0,zipinsecurepath=0" go build $(COVERAGE_FLAG) -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/${CLI_NAME} ./cmd
|
||||
CGO_ENABLED=${CGO_FLAG} GODEBUG="tarinsecurepath=0,zipinsecurepath=0" go build -gcflags="all=-N -l" $(COVERAGE_FLAG) -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/${CLI_NAME} ./cmd
|
||||
|
||||
.PHONY: gen-resources-cli-local
|
||||
gen-resources-cli-local: clean-debug
|
||||
@@ -553,7 +553,7 @@ build-docs-local:
|
||||
|
||||
.PHONY: build-docs
|
||||
build-docs:
|
||||
$(DOCKER) run ${MKDOCS_RUN_ARGS} --rm -it -v ${CURRENT_DIR}:/docs -w /docs --entrypoint "" ${MKDOCS_DOCKER_IMAGE} sh -c 'pip install -r docs/requirements.txt; mkdocs build'
|
||||
$(DOCKER) run ${MKDOCS_RUN_ARGS} --rm -it -v ${CURRENT_DIR}:/docs -w /docs --entrypoint "" ${MKDOCS_DOCKER_IMAGE} sh -c 'pip install mkdocs; pip install $$(mkdocs get-deps); mkdocs build'
|
||||
|
||||
.PHONY: serve-docs-local
|
||||
serve-docs-local:
|
||||
@@ -561,7 +561,7 @@ serve-docs-local:
|
||||
|
||||
.PHONY: serve-docs
|
||||
serve-docs:
|
||||
$(DOCKER) run ${MKDOCS_RUN_ARGS} --rm -it -p 8000:8000 -v ${CURRENT_DIR}:/docs -w /docs --entrypoint "" ${MKDOCS_DOCKER_IMAGE} sh -c 'pip install -r docs/requirements.txt; mkdocs serve -a $$(ip route get 1 | awk '\''{print $$7}'\''):8000'
|
||||
$(DOCKER) run ${MKDOCS_RUN_ARGS} --rm -it -p 8000:8000 -v ${CURRENT_DIR}:/docs -w /docs --entrypoint "" ${MKDOCS_DOCKER_IMAGE} sh -c 'pip install mkdocs; pip install $$(mkdocs get-deps); mkdocs serve -a $$(ip route get 1 | awk '\''{print $$7}'\''):8000'
|
||||
|
||||
# Verify that kubectl can connect to your K8s cluster from Docker
|
||||
.PHONY: verify-kube-connect
|
||||
|
||||
4
USERS.md
4
USERS.md
@@ -11,6 +11,7 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [7shifts](https://www.7shifts.com/)
|
||||
1. [Adevinta](https://www.adevinta.com/)
|
||||
1. [Adfinis](https://adfinis.com)
|
||||
1. [Adobe](https://www.adobe.com/)
|
||||
1. [Adventure](https://jp.adventurekk.com/)
|
||||
1. [Adyen](https://www.adyen.com)
|
||||
1. [AirQo](https://airqo.net/)
|
||||
@@ -29,6 +30,7 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Arctiq Inc.](https://www.arctiq.ca)
|
||||
2. [Arturia](https://www.arturia.com)
|
||||
1. [ARZ Allgemeines Rechenzentrum GmbH](https://www.arz.at/)
|
||||
1. [Augury](https://www.augury.com/)
|
||||
1. [Autodesk](https://www.autodesk.com)
|
||||
1. [Axians ACSP](https://www.axians.fr)
|
||||
1. [Axual B.V.](https://axual.com)
|
||||
@@ -39,6 +41,7 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Beez Innovation Labs](https://www.beezlabs.com/)
|
||||
1. [Bedag Informatik AG](https://www.bedag.ch/)
|
||||
1. [Beleza Na Web](https://www.belezanaweb.com.br/)
|
||||
1. [Believable Bots](https://believablebots.io)
|
||||
1. [BigPanda](https://bigpanda.io)
|
||||
1. [BioBox Analytics](https://biobox.io)
|
||||
1. [BMW Group](https://www.bmwgroup.com/)
|
||||
@@ -207,6 +210,7 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Moengage](https://www.moengage.com/)
|
||||
1. [Money Forward](https://corp.moneyforward.com/en/)
|
||||
1. [MOO Print](https://www.moo.com/)
|
||||
1. [Mozilla](https://www.mozilla.org)
|
||||
1. [MTN Group](https://www.mtn.com/)
|
||||
1. [Municipality of The Hague](https://www.denhaag.nl/)
|
||||
1. [My Job Glasses](https://myjobglasses.com)
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -31,11 +32,10 @@ import (
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
k8scache "k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/retry"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/builder"
|
||||
"sigs.k8s.io/controller-runtime/pkg/cache"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
@@ -45,11 +45,11 @@ import (
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/controllers/template"
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/generators"
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/metrics"
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/status"
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/utils"
|
||||
"github.com/argoproj/argo-cd/v2/common"
|
||||
"github.com/argoproj/argo-cd/v2/util/db"
|
||||
"github.com/argoproj/argo-cd/v2/util/glob"
|
||||
|
||||
argov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
appclientset "github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned"
|
||||
@@ -90,7 +90,7 @@ type ApplicationSetReconciler struct {
|
||||
SCMRootCAPath string
|
||||
GlobalPreservedAnnotations []string
|
||||
GlobalPreservedLabels []string
|
||||
Cache cache.Cache
|
||||
Metrics *metrics.ApplicationsetMetrics
|
||||
}
|
||||
|
||||
// +kubebuilder:rbac:groups=argoproj.io,resources=applicationsets,verbs=get;list;watch;create;update;patch;delete
|
||||
@@ -101,7 +101,7 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
||||
|
||||
var applicationSetInfo argov1alpha1.ApplicationSet
|
||||
parametersGenerated := false
|
||||
|
||||
startTime := time.Now()
|
||||
if err := r.Get(ctx, req.NamespacedName, &applicationSetInfo); err != nil {
|
||||
if client.IgnoreNotFound(err) != nil {
|
||||
logCtx.WithError(err).Infof("unable to get ApplicationSet: '%v' ", err)
|
||||
@@ -109,6 +109,10 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
||||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
r.Metrics.ObserveReconcile(&applicationSetInfo, time.Since(startTime))
|
||||
}()
|
||||
|
||||
// Do not attempt to further reconcile the ApplicationSet if it is being deleted.
|
||||
if applicationSetInfo.ObjectMeta.DeletionTimestamp != nil {
|
||||
appsetName := applicationSetInfo.ObjectMeta.Name
|
||||
@@ -242,20 +246,8 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
||||
|
||||
if r.EnableProgressiveSyncs {
|
||||
// trigger appropriate application syncs if RollingSync strategy is enabled
|
||||
if progressiveSyncsStrategyEnabled(&applicationSetInfo, "RollingSync") {
|
||||
validApps, err = r.syncValidApplications(logCtx, &applicationSetInfo, appSyncMap, appMap, validApps)
|
||||
if err != nil {
|
||||
_ = r.setApplicationSetStatusCondition(ctx,
|
||||
&applicationSetInfo,
|
||||
argov1alpha1.ApplicationSetCondition{
|
||||
Type: argov1alpha1.ApplicationSetConditionErrorOccurred,
|
||||
Message: err.Error(),
|
||||
Reason: argov1alpha1.ApplicationSetReasonSyncApplicationError,
|
||||
Status: argov1alpha1.ApplicationSetConditionStatusTrue,
|
||||
}, parametersGenerated,
|
||||
)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
if progressiveSyncsRollingSyncStrategyEnabled(&applicationSetInfo) {
|
||||
validApps = r.syncValidApplications(logCtx, &applicationSetInfo, appSyncMap, appMap, validApps)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -409,8 +401,21 @@ func (r *ApplicationSetReconciler) setApplicationSetStatusCondition(ctx context.
|
||||
paramtersGeneratedCondition := getParametersGeneratedCondition(paramtersGenerated, condition.Message)
|
||||
resourceUpToDateCondition := getResourceUpToDateCondition(errOccurred, condition.Message, condition.Reason)
|
||||
|
||||
evaluatedTypes := map[argov1alpha1.ApplicationSetConditionType]bool{
|
||||
argov1alpha1.ApplicationSetConditionErrorOccurred: true,
|
||||
argov1alpha1.ApplicationSetConditionParametersGenerated: true,
|
||||
argov1alpha1.ApplicationSetConditionResourcesUpToDate: true,
|
||||
}
|
||||
newConditions := []argov1alpha1.ApplicationSetCondition{errOccurredCondition, paramtersGeneratedCondition, resourceUpToDateCondition}
|
||||
|
||||
if progressiveSyncsRollingSyncStrategyEnabled(applicationSet) {
|
||||
evaluatedTypes[argov1alpha1.ApplicationSetConditionRolloutProgressing] = true
|
||||
|
||||
if condition.Type == argov1alpha1.ApplicationSetConditionRolloutProgressing {
|
||||
newConditions = append(newConditions, condition)
|
||||
}
|
||||
}
|
||||
|
||||
needToUpdateConditions := false
|
||||
for _, condition := range newConditions {
|
||||
// do nothing if appset already has same condition
|
||||
@@ -421,28 +426,32 @@ func (r *ApplicationSetReconciler) setApplicationSetStatusCondition(ctx context.
|
||||
}
|
||||
}
|
||||
}
|
||||
evaluatedTypes := map[argov1alpha1.ApplicationSetConditionType]bool{
|
||||
argov1alpha1.ApplicationSetConditionErrorOccurred: true,
|
||||
argov1alpha1.ApplicationSetConditionParametersGenerated: true,
|
||||
argov1alpha1.ApplicationSetConditionResourcesUpToDate: true,
|
||||
}
|
||||
|
||||
if needToUpdateConditions || len(applicationSet.Status.Conditions) < 3 {
|
||||
if needToUpdateConditions || len(applicationSet.Status.Conditions) < len(newConditions) {
|
||||
// fetch updated Application Set object before updating it
|
||||
namespacedName := types.NamespacedName{Namespace: applicationSet.Namespace, Name: applicationSet.Name}
|
||||
if err := r.Get(ctx, namespacedName, applicationSet); err != nil {
|
||||
if client.IgnoreNotFound(err) != nil {
|
||||
return nil
|
||||
// DefaultRetry will retry 5 times with a backoff factor of 1, jitter of 0.1 and a duration of 10ms
|
||||
err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
namespacedName := types.NamespacedName{Namespace: applicationSet.Namespace, Name: applicationSet.Name}
|
||||
updatedAppset := &argov1alpha1.ApplicationSet{}
|
||||
if err := r.Get(ctx, namespacedName, updatedAppset); err != nil {
|
||||
if client.IgnoreNotFound(err) != nil {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("error fetching updated application set: %w", err)
|
||||
}
|
||||
return fmt.Errorf("error fetching updated application set: %w", err)
|
||||
}
|
||||
|
||||
applicationSet.Status.SetConditions(
|
||||
newConditions, evaluatedTypes,
|
||||
)
|
||||
updatedAppset.Status.SetConditions(
|
||||
newConditions, evaluatedTypes,
|
||||
)
|
||||
|
||||
// Update the newly fetched object with new set of conditions
|
||||
err := r.Client.Status().Update(ctx, applicationSet)
|
||||
// Update the newly fetched object with new set of conditions
|
||||
err := r.Client.Status().Update(ctx, updatedAppset)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
updatedAppset.DeepCopyInto(applicationSet)
|
||||
return nil
|
||||
})
|
||||
if err != nil && !apierr.IsNotFound(err) {
|
||||
return fmt.Errorf("unable to set application set condition: %w", err)
|
||||
}
|
||||
@@ -501,11 +510,9 @@ func (r *ApplicationSetReconciler) getMinRequeueAfter(applicationSetInfo *argov1
|
||||
}
|
||||
|
||||
func ignoreNotAllowedNamespaces(namespaces []string) predicate.Predicate {
|
||||
return predicate.Funcs{
|
||||
CreateFunc: func(e event.CreateEvent) bool {
|
||||
return glob.MatchStringInList(namespaces, e.Object.GetNamespace(), false)
|
||||
},
|
||||
}
|
||||
return predicate.NewPredicateFuncs(func(object client.Object) bool {
|
||||
return utils.IsNamespaceAllowed(namespaces, object.GetNamespace())
|
||||
})
|
||||
}
|
||||
|
||||
func appControllerIndexer(rawObj client.Object) []string {
|
||||
@@ -546,25 +553,6 @@ func (r *ApplicationSetReconciler) SetupWithManager(mgr ctrl.Manager, enableProg
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
func (r *ApplicationSetReconciler) updateCache(ctx context.Context, obj client.Object, logger *log.Entry) {
|
||||
informer, err := r.Cache.GetInformer(ctx, obj)
|
||||
if err != nil {
|
||||
logger.Errorf("failed to get informer: %v", err)
|
||||
return
|
||||
}
|
||||
// The controller runtime abstract away informers creation
|
||||
// so unfortunately could not find any other way to access informer store.
|
||||
k8sInformer, ok := informer.(k8scache.SharedInformer)
|
||||
if !ok {
|
||||
logger.Error("informer is not a kubernetes informer")
|
||||
return
|
||||
}
|
||||
if err := k8sInformer.GetStore().Update(obj); err != nil {
|
||||
logger.Errorf("failed to update cache: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// createOrUpdateInCluster will create / update application resources in the cluster.
|
||||
// - For new applications, it will call create
|
||||
// - For existing application, it will call update
|
||||
@@ -662,7 +650,6 @@ func (r *ApplicationSetReconciler) createOrUpdateInCluster(ctx context.Context,
|
||||
}
|
||||
continue
|
||||
}
|
||||
r.updateCache(ctx, found, appLog)
|
||||
|
||||
if action != controllerutil.OperationResultNone {
|
||||
// Don't pollute etcd with "unchanged Application" events
|
||||
@@ -829,7 +816,6 @@ func (r *ApplicationSetReconciler) removeFinalizerOnInvalidDestination(ctx conte
|
||||
if err := r.Client.Patch(ctx, updated, patch); err != nil {
|
||||
return fmt.Errorf("error updating finalizers: %w", err)
|
||||
}
|
||||
r.updateCache(ctx, updated, appLog)
|
||||
// Application must have updated list of finalizers
|
||||
updated.DeepCopyInto(app)
|
||||
|
||||
@@ -859,12 +845,9 @@ func (r *ApplicationSetReconciler) removeOwnerReferencesOnDeleteAppSet(ctx conte
|
||||
}
|
||||
|
||||
func (r *ApplicationSetReconciler) performProgressiveSyncs(ctx context.Context, logCtx *log.Entry, appset argov1alpha1.ApplicationSet, applications []argov1alpha1.Application, desiredApplications []argov1alpha1.Application, appMap map[string]argov1alpha1.Application) (map[string]bool, error) {
|
||||
appDependencyList, appStepMap, err := r.buildAppDependencyList(logCtx, appset, desiredApplications)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to build app dependency list: %w", err)
|
||||
}
|
||||
appDependencyList, appStepMap := r.buildAppDependencyList(logCtx, appset, desiredApplications)
|
||||
|
||||
_, err = r.updateApplicationSetApplicationStatus(ctx, logCtx, &appset, applications, appStepMap)
|
||||
_, err := r.updateApplicationSetApplicationStatus(ctx, logCtx, &appset, applications, appStepMap)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to update applicationset app status: %w", err)
|
||||
}
|
||||
@@ -874,34 +857,27 @@ func (r *ApplicationSetReconciler) performProgressiveSyncs(ctx context.Context,
|
||||
logCtx.Infof("step %v: %+v", i+1, step)
|
||||
}
|
||||
|
||||
appSyncMap, err := r.buildAppSyncMap(ctx, appset, appDependencyList, appMap)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to build app sync map: %w", err)
|
||||
}
|
||||
|
||||
appSyncMap := r.buildAppSyncMap(appset, appDependencyList, appMap)
|
||||
logCtx.Infof("Application allowed to sync before maxUpdate?: %+v", appSyncMap)
|
||||
|
||||
_, err = r.updateApplicationSetApplicationStatusProgress(ctx, logCtx, &appset, appSyncMap, appStepMap, appMap)
|
||||
_, err = r.updateApplicationSetApplicationStatusProgress(ctx, logCtx, &appset, appSyncMap, appStepMap)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to update applicationset application status progress: %w", err)
|
||||
}
|
||||
|
||||
_, err = r.updateApplicationSetApplicationStatusConditions(ctx, &appset)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to update applicationset application status conditions: %w", err)
|
||||
}
|
||||
_ = r.updateApplicationSetApplicationStatusConditions(ctx, &appset)
|
||||
|
||||
return appSyncMap, nil
|
||||
}
|
||||
|
||||
// this list tracks which Applications belong to each RollingUpdate step
|
||||
func (r *ApplicationSetReconciler) buildAppDependencyList(logCtx *log.Entry, applicationSet argov1alpha1.ApplicationSet, applications []argov1alpha1.Application) ([][]string, map[string]int, error) {
|
||||
func (r *ApplicationSetReconciler) buildAppDependencyList(logCtx *log.Entry, applicationSet argov1alpha1.ApplicationSet, applications []argov1alpha1.Application) ([][]string, map[string]int) {
|
||||
if applicationSet.Spec.Strategy == nil || applicationSet.Spec.Strategy.Type == "" || applicationSet.Spec.Strategy.Type == "AllAtOnce" {
|
||||
return [][]string{}, map[string]int{}, nil
|
||||
return [][]string{}, map[string]int{}
|
||||
}
|
||||
|
||||
steps := []argov1alpha1.ApplicationSetRolloutStep{}
|
||||
if progressiveSyncsStrategyEnabled(&applicationSet, "RollingSync") {
|
||||
if progressiveSyncsRollingSyncStrategyEnabled(&applicationSet) {
|
||||
steps = applicationSet.Spec.Strategy.RollingSync.Steps
|
||||
}
|
||||
|
||||
@@ -942,7 +918,7 @@ func (r *ApplicationSetReconciler) buildAppDependencyList(logCtx *log.Entry, app
|
||||
}
|
||||
}
|
||||
|
||||
return appDependencyList, appStepMap, nil
|
||||
return appDependencyList, appStepMap
|
||||
}
|
||||
|
||||
func labelMatchedExpression(logCtx *log.Entry, val string, matchExpression argov1alpha1.ApplicationMatchExpression) bool {
|
||||
@@ -966,7 +942,7 @@ func labelMatchedExpression(logCtx *log.Entry, val string, matchExpression argov
|
||||
}
|
||||
|
||||
// this map is used to determine which stage of Applications are ready to be updated in the reconciler loop
|
||||
func (r *ApplicationSetReconciler) buildAppSyncMap(ctx context.Context, applicationSet argov1alpha1.ApplicationSet, appDependencyList [][]string, appMap map[string]argov1alpha1.Application) (map[string]bool, error) {
|
||||
func (r *ApplicationSetReconciler) buildAppSyncMap(applicationSet argov1alpha1.ApplicationSet, appDependencyList [][]string, appMap map[string]argov1alpha1.Application) map[string]bool {
|
||||
appSyncMap := map[string]bool{}
|
||||
syncEnabled := true
|
||||
|
||||
@@ -1003,11 +979,11 @@ func (r *ApplicationSetReconciler) buildAppSyncMap(ctx context.Context, applicat
|
||||
}
|
||||
}
|
||||
|
||||
return appSyncMap, nil
|
||||
return appSyncMap
|
||||
}
|
||||
|
||||
func appSyncEnabledForNextStep(appset *argov1alpha1.ApplicationSet, app argov1alpha1.Application, appStatus argov1alpha1.ApplicationSetApplicationStatus) bool {
|
||||
if progressiveSyncsStrategyEnabled(appset, "RollingSync") {
|
||||
if progressiveSyncsRollingSyncStrategyEnabled(appset) {
|
||||
// we still need to complete the current step if the Application is not yet Healthy or there are still pending Application changes
|
||||
return isApplicationHealthy(app) && appStatus.Status == "Healthy"
|
||||
}
|
||||
@@ -1015,16 +991,8 @@ func appSyncEnabledForNextStep(appset *argov1alpha1.ApplicationSet, app argov1al
|
||||
return true
|
||||
}
|
||||
|
||||
func progressiveSyncsStrategyEnabled(appset *argov1alpha1.ApplicationSet, strategyType string) bool {
|
||||
if appset.Spec.Strategy == nil || appset.Spec.Strategy.Type != strategyType {
|
||||
return false
|
||||
}
|
||||
|
||||
if strategyType == "RollingSync" && appset.Spec.Strategy.RollingSync == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
func progressiveSyncsRollingSyncStrategyEnabled(appset *argov1alpha1.ApplicationSet) bool {
|
||||
return appset.Spec.Strategy != nil && appset.Spec.Strategy.RollingSync != nil && appset.Spec.Strategy.Type == "RollingSync" && len(appset.Spec.Strategy.RollingSync.Steps) > 0
|
||||
}
|
||||
|
||||
func isApplicationHealthy(app argov1alpha1.Application) bool {
|
||||
@@ -1047,6 +1015,16 @@ func statusStrings(app argov1alpha1.Application) (string, string, string) {
|
||||
return healthStatusString, syncStatusString, operationPhaseString
|
||||
}
|
||||
|
||||
func getAppStep(appName string, appStepMap map[string]int) int {
|
||||
// if an application is not selected by any match expression, it defaults to step -1
|
||||
step := -1
|
||||
if appStep, ok := appStepMap[appName]; ok {
|
||||
// 1-based indexing
|
||||
step = appStep + 1
|
||||
}
|
||||
return step
|
||||
}
|
||||
|
||||
// check the status of each Application's status and promote Applications to the next status if needed
|
||||
func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatus(ctx context.Context, logCtx *log.Entry, applicationSet *argov1alpha1.ApplicationSet, applications []argov1alpha1.Application, appStepMap map[string]int) ([]argov1alpha1.ApplicationSetApplicationStatus, error) {
|
||||
now := metav1.Now()
|
||||
@@ -1066,7 +1044,7 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatus(ctx con
|
||||
LastTransitionTime: &now,
|
||||
Message: "No Application status found, defaulting status to Waiting.",
|
||||
Status: "Waiting",
|
||||
Step: fmt.Sprint(appStepMap[app.Name] + 1),
|
||||
Step: fmt.Sprint(getAppStep(app.Name, appStepMap)),
|
||||
TargetRevisions: app.Status.GetRevisions(),
|
||||
}
|
||||
} else {
|
||||
@@ -1076,13 +1054,13 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatus(ctx con
|
||||
// upgrade any existing AppStatus that might have been set by an older argo-cd version
|
||||
// note: currentAppStatus.TargetRevisions may be set to empty list earlier during migrations,
|
||||
// to prevent other usage of r.Client.Status().Update to fail before reaching here.
|
||||
if currentAppStatus.TargetRevisions == nil || len(currentAppStatus.TargetRevisions) == 0 {
|
||||
if len(currentAppStatus.TargetRevisions) == 0 {
|
||||
currentAppStatus.TargetRevisions = app.Status.GetRevisions()
|
||||
}
|
||||
}
|
||||
|
||||
appOutdated := false
|
||||
if progressiveSyncsStrategyEnabled(applicationSet, "RollingSync") {
|
||||
if progressiveSyncsRollingSyncStrategyEnabled(applicationSet) {
|
||||
appOutdated = syncStatusString == "OutOfSync"
|
||||
}
|
||||
|
||||
@@ -1091,7 +1069,7 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatus(ctx con
|
||||
currentAppStatus.LastTransitionTime = &now
|
||||
currentAppStatus.Status = "Waiting"
|
||||
currentAppStatus.Message = "Application has pending changes, setting status to Waiting."
|
||||
currentAppStatus.Step = fmt.Sprint(appStepMap[currentAppStatus.Application] + 1)
|
||||
currentAppStatus.Step = fmt.Sprint(getAppStep(currentAppStatus.Application, appStepMap))
|
||||
currentAppStatus.TargetRevisions = app.Status.GetRevisions()
|
||||
}
|
||||
|
||||
@@ -1109,14 +1087,14 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatus(ctx con
|
||||
currentAppStatus.LastTransitionTime = &now
|
||||
currentAppStatus.Status = "Progressing"
|
||||
currentAppStatus.Message = "Application resource completed a sync successfully, updating status from Pending to Progressing."
|
||||
currentAppStatus.Step = fmt.Sprint(appStepMap[currentAppStatus.Application] + 1)
|
||||
currentAppStatus.Step = fmt.Sprint(getAppStep(currentAppStatus.Application, appStepMap))
|
||||
}
|
||||
} else if operationPhaseString == "Running" || healthStatusString == "Progressing" {
|
||||
logCtx.Infof("Application %v has entered Progressing status, updating its ApplicationSet status to Progressing", app.Name)
|
||||
currentAppStatus.LastTransitionTime = &now
|
||||
currentAppStatus.Status = "Progressing"
|
||||
currentAppStatus.Message = "Application resource became Progressing, updating status from Pending to Progressing."
|
||||
currentAppStatus.Step = fmt.Sprint(appStepMap[currentAppStatus.Application] + 1)
|
||||
currentAppStatus.Step = fmt.Sprint(getAppStep(currentAppStatus.Application, appStepMap))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1125,7 +1103,7 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatus(ctx con
|
||||
currentAppStatus.LastTransitionTime = &now
|
||||
currentAppStatus.Status = healthStatusString
|
||||
currentAppStatus.Message = "Application resource is already Healthy, updating status from Waiting to Healthy."
|
||||
currentAppStatus.Step = fmt.Sprint(appStepMap[currentAppStatus.Application] + 1)
|
||||
currentAppStatus.Step = fmt.Sprint(getAppStep(currentAppStatus.Application, appStepMap))
|
||||
}
|
||||
|
||||
if currentAppStatus.Status == "Progressing" && isApplicationHealthy(app) {
|
||||
@@ -1133,7 +1111,7 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatus(ctx con
|
||||
currentAppStatus.LastTransitionTime = &now
|
||||
currentAppStatus.Status = healthStatusString
|
||||
currentAppStatus.Message = "Application resource became Healthy, updating status from Progressing to Healthy."
|
||||
currentAppStatus.Step = fmt.Sprint(appStepMap[currentAppStatus.Application] + 1)
|
||||
currentAppStatus.Step = fmt.Sprint(getAppStep(currentAppStatus.Application, appStepMap))
|
||||
}
|
||||
|
||||
appStatuses = append(appStatuses, currentAppStatus)
|
||||
@@ -1148,20 +1126,18 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatus(ctx con
|
||||
}
|
||||
|
||||
// check Applications that are in Waiting status and promote them to Pending if needed
|
||||
func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatusProgress(ctx context.Context, logCtx *log.Entry, applicationSet *argov1alpha1.ApplicationSet, appSyncMap map[string]bool, appStepMap map[string]int, appMap map[string]argov1alpha1.Application) ([]argov1alpha1.ApplicationSetApplicationStatus, error) {
|
||||
func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatusProgress(ctx context.Context, logCtx *log.Entry, applicationSet *argov1alpha1.ApplicationSet, appSyncMap map[string]bool, appStepMap map[string]int) ([]argov1alpha1.ApplicationSetApplicationStatus, error) {
|
||||
now := metav1.Now()
|
||||
|
||||
appStatuses := make([]argov1alpha1.ApplicationSetApplicationStatus, 0, len(applicationSet.Status.ApplicationStatus))
|
||||
|
||||
// if we have no RollingUpdate steps, clear out the existing ApplicationStatus entries
|
||||
if applicationSet.Spec.Strategy != nil && applicationSet.Spec.Strategy.Type != "" && applicationSet.Spec.Strategy.Type != "AllAtOnce" {
|
||||
if progressiveSyncsRollingSyncStrategyEnabled(applicationSet) {
|
||||
updateCountMap := []int{}
|
||||
totalCountMap := []int{}
|
||||
|
||||
length := 0
|
||||
if progressiveSyncsStrategyEnabled(applicationSet, "RollingSync") {
|
||||
length = len(applicationSet.Spec.Strategy.RollingSync.Steps)
|
||||
}
|
||||
length := len(applicationSet.Spec.Strategy.RollingSync.Steps)
|
||||
|
||||
for s := 0; s < length; s++ {
|
||||
updateCountMap = append(updateCountMap, 0)
|
||||
totalCountMap = append(totalCountMap, 0)
|
||||
@@ -1171,17 +1147,15 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatusProgress
|
||||
for _, appStatus := range applicationSet.Status.ApplicationStatus {
|
||||
totalCountMap[appStepMap[appStatus.Application]] += 1
|
||||
|
||||
if progressiveSyncsStrategyEnabled(applicationSet, "RollingSync") {
|
||||
if appStatus.Status == "Pending" || appStatus.Status == "Progressing" {
|
||||
updateCountMap[appStepMap[appStatus.Application]] += 1
|
||||
}
|
||||
if appStatus.Status == "Pending" || appStatus.Status == "Progressing" {
|
||||
updateCountMap[appStepMap[appStatus.Application]] += 1
|
||||
}
|
||||
}
|
||||
|
||||
for _, appStatus := range applicationSet.Status.ApplicationStatus {
|
||||
maxUpdateAllowed := true
|
||||
maxUpdate := &intstr.IntOrString{}
|
||||
if progressiveSyncsStrategyEnabled(applicationSet, "RollingSync") {
|
||||
if progressiveSyncsRollingSyncStrategyEnabled(applicationSet) {
|
||||
maxUpdate = applicationSet.Spec.Strategy.RollingSync.Steps[appStepMap[appStatus.Application]].MaxUpdate
|
||||
}
|
||||
|
||||
@@ -1199,7 +1173,7 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatusProgress
|
||||
|
||||
if updateCountMap[appStepMap[appStatus.Application]] >= maxUpdateVal {
|
||||
maxUpdateAllowed = false
|
||||
logCtx.Infof("Application %v is not allowed to update yet, %v/%v Applications already updating in step %v in AppSet %v", appStatus.Application, updateCountMap[appStepMap[appStatus.Application]], maxUpdateVal, appStepMap[appStatus.Application]+1, applicationSet.Name)
|
||||
logCtx.Infof("Application %v is not allowed to update yet, %v/%v Applications already updating in step %v in AppSet %v", appStatus.Application, updateCountMap[appStepMap[appStatus.Application]], maxUpdateVal, getAppStep(appStatus.Application, appStepMap), applicationSet.Name)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1208,7 +1182,7 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatusProgress
|
||||
appStatus.LastTransitionTime = &now
|
||||
appStatus.Status = "Pending"
|
||||
appStatus.Message = "Application moved to Pending status, watching for the Application resource to start Progressing."
|
||||
appStatus.Step = fmt.Sprint(appStepMap[appStatus.Application] + 1)
|
||||
appStatus.Step = fmt.Sprint(getAppStep(appStatus.Application, appStepMap))
|
||||
|
||||
updateCountMap[appStepMap[appStatus.Application]] += 1
|
||||
}
|
||||
@@ -1225,7 +1199,7 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatusProgress
|
||||
return appStatuses, nil
|
||||
}
|
||||
|
||||
func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatusConditions(ctx context.Context, applicationSet *argov1alpha1.ApplicationSet) ([]argov1alpha1.ApplicationSetCondition, error) {
|
||||
func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatusConditions(ctx context.Context, applicationSet *argov1alpha1.ApplicationSet) []argov1alpha1.ApplicationSetCondition {
|
||||
appSetProgressing := false
|
||||
for _, appStatus := range applicationSet.Status.ApplicationStatus {
|
||||
if appStatus.Status != "Healthy" {
|
||||
@@ -1250,7 +1224,7 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatusConditio
|
||||
Message: "ApplicationSet Rollout Rollout started",
|
||||
Reason: argov1alpha1.ApplicationSetReasonApplicationSetModified,
|
||||
Status: argov1alpha1.ApplicationSetConditionStatusTrue,
|
||||
}, false,
|
||||
}, true,
|
||||
)
|
||||
} else if !appSetProgressing && appSetConditionProgressing {
|
||||
_ = r.setApplicationSetStatusCondition(ctx,
|
||||
@@ -1260,11 +1234,11 @@ func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatusConditio
|
||||
Message: "ApplicationSet Rollout Rollout complete",
|
||||
Reason: argov1alpha1.ApplicationSetReasonApplicationSetRolloutComplete,
|
||||
Status: argov1alpha1.ApplicationSetConditionStatusFalse,
|
||||
}, false,
|
||||
}, true,
|
||||
)
|
||||
}
|
||||
|
||||
return applicationSet.Status.Conditions, nil
|
||||
return applicationSet.Status.Conditions
|
||||
}
|
||||
|
||||
func findApplicationStatusIndex(appStatuses []argov1alpha1.ApplicationSetApplicationStatus, application string) int {
|
||||
@@ -1290,8 +1264,29 @@ func (r *ApplicationSetReconciler) migrateStatus(ctx context.Context, appset *ar
|
||||
}
|
||||
|
||||
if update {
|
||||
if err := r.Client.Status().Update(ctx, appset); err != nil {
|
||||
return fmt.Errorf("unable to set application set status: %w", err)
|
||||
// DefaultRetry will retry 5 times with a backoff factor of 1, jitter of 0.1 and a duration of 10ms
|
||||
err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
namespacedName := types.NamespacedName{Namespace: appset.Namespace, Name: appset.Name}
|
||||
updatedAppset := &argov1alpha1.ApplicationSet{}
|
||||
if err := r.Get(ctx, namespacedName, updatedAppset); err != nil {
|
||||
if client.IgnoreNotFound(err) != nil {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("error fetching updated application set: %w", err)
|
||||
}
|
||||
|
||||
updatedAppset.Status.ApplicationStatus = appset.Status.ApplicationStatus
|
||||
|
||||
// Update the newly fetched object with new set of ApplicationStatus
|
||||
err := r.Client.Status().Update(ctx, updatedAppset)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
updatedAppset.DeepCopyInto(appset)
|
||||
return nil
|
||||
})
|
||||
if err != nil && !apierr.IsNotFound(err) {
|
||||
return fmt.Errorf("unable to set application set condition: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@@ -1305,22 +1300,35 @@ func (r *ApplicationSetReconciler) updateResourcesStatus(ctx context.Context, lo
|
||||
for _, status := range statusMap {
|
||||
statuses = append(statuses, status)
|
||||
}
|
||||
sort.Slice(statuses, func(i, j int) bool {
|
||||
return statuses[i].Name < statuses[j].Name
|
||||
})
|
||||
appset.Status.Resources = statuses
|
||||
// DefaultRetry will retry 5 times with a backoff factor of 1, jitter of 0.1 and a duration of 10ms
|
||||
err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
namespacedName := types.NamespacedName{Namespace: appset.Namespace, Name: appset.Name}
|
||||
updatedAppset := &argov1alpha1.ApplicationSet{}
|
||||
if err := r.Get(ctx, namespacedName, updatedAppset); err != nil {
|
||||
if client.IgnoreNotFound(err) != nil {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("error fetching updated application set: %w", err)
|
||||
}
|
||||
|
||||
namespacedName := types.NamespacedName{Namespace: appset.Namespace, Name: appset.Name}
|
||||
err := r.Client.Status().Update(ctx, appset)
|
||||
updatedAppset.Status.Resources = appset.Status.Resources
|
||||
|
||||
// Update the newly fetched object with new status resources
|
||||
err := r.Client.Status().Update(ctx, updatedAppset)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
updatedAppset.DeepCopyInto(appset)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
logCtx.Errorf("unable to set application set status: %v", err)
|
||||
return fmt.Errorf("unable to set application set status: %w", err)
|
||||
}
|
||||
|
||||
if err := r.Get(ctx, namespacedName, appset); err != nil {
|
||||
if client.IgnoreNotFound(err) != nil {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("error fetching updated application set: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1355,26 +1363,36 @@ func (r *ApplicationSetReconciler) setAppSetApplicationStatus(ctx context.Contex
|
||||
for i := range applicationStatuses {
|
||||
applicationSet.Status.SetApplicationStatus(applicationStatuses[i])
|
||||
}
|
||||
// DefaultRetry will retry 5 times with a backoff factor of 1, jitter of 0.1 and a duration of 10ms
|
||||
err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
updatedAppset := &argov1alpha1.ApplicationSet{}
|
||||
if err := r.Get(ctx, namespacedName, updatedAppset); err != nil {
|
||||
if client.IgnoreNotFound(err) != nil {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("error fetching updated application set: %w", err)
|
||||
}
|
||||
|
||||
// Update the newly fetched object with new set of ApplicationStatus
|
||||
err := r.Client.Status().Update(ctx, applicationSet)
|
||||
updatedAppset.Status.ApplicationStatus = applicationSet.Status.ApplicationStatus
|
||||
|
||||
// Update the newly fetched object with new set of ApplicationStatus
|
||||
err := r.Client.Status().Update(ctx, updatedAppset)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
updatedAppset.DeepCopyInto(applicationSet)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
logCtx.Errorf("unable to set application set status: %v", err)
|
||||
return fmt.Errorf("unable to set application set status: %w", err)
|
||||
}
|
||||
|
||||
if err := r.Get(ctx, namespacedName, applicationSet); err != nil {
|
||||
if client.IgnoreNotFound(err) != nil {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("error fetching updated application set: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ApplicationSetReconciler) syncValidApplications(logCtx *log.Entry, applicationSet *argov1alpha1.ApplicationSet, appSyncMap map[string]bool, appMap map[string]argov1alpha1.Application, validApps []argov1alpha1.Application) ([]argov1alpha1.Application, error) {
|
||||
func (r *ApplicationSetReconciler) syncValidApplications(logCtx *log.Entry, applicationSet *argov1alpha1.ApplicationSet, appSyncMap map[string]bool, appMap map[string]argov1alpha1.Application, validApps []argov1alpha1.Application) []argov1alpha1.Application {
|
||||
rolloutApps := []argov1alpha1.Application{}
|
||||
for i := range validApps {
|
||||
pruneEnabled := false
|
||||
@@ -1395,15 +1413,15 @@ func (r *ApplicationSetReconciler) syncValidApplications(logCtx *log.Entry, appl
|
||||
// check appSyncMap to determine which Applications are ready to be updated and which should be skipped
|
||||
if appSyncMap[validApps[i].Name] && appMap[validApps[i].Name].Status.Sync.Status == "OutOfSync" && appSetStatusPending {
|
||||
logCtx.Infof("triggering sync for application: %v, prune enabled: %v", validApps[i].Name, pruneEnabled)
|
||||
validApps[i], _ = syncApplication(validApps[i], pruneEnabled)
|
||||
validApps[i] = syncApplication(validApps[i], pruneEnabled)
|
||||
}
|
||||
rolloutApps = append(rolloutApps, validApps[i])
|
||||
}
|
||||
return rolloutApps, nil
|
||||
return rolloutApps
|
||||
}
|
||||
|
||||
// used by the RollingSync Progressive Sync strategy to trigger a sync of a particular Application resource
|
||||
func syncApplication(application argov1alpha1.Application, prune bool) (argov1alpha1.Application, error) {
|
||||
func syncApplication(application argov1alpha1.Application, prune bool) argov1alpha1.Application {
|
||||
operation := argov1alpha1.Operation{
|
||||
InitiatedBy: argov1alpha1.OperationInitiator{
|
||||
Username: "applicationset-controller",
|
||||
@@ -1429,7 +1447,7 @@ func syncApplication(application argov1alpha1.Application, prune bool) (argov1al
|
||||
}
|
||||
application.Operation = &operation
|
||||
|
||||
return application, nil
|
||||
return application
|
||||
}
|
||||
|
||||
func getOwnsHandlerPredicates(enableProgressiveSyncs bool) predicate.Funcs {
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -4,6 +4,8 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
@@ -24,29 +26,29 @@ type clusterSecretEventHandler struct {
|
||||
Client client.Client
|
||||
}
|
||||
|
||||
func (h *clusterSecretEventHandler) Create(ctx context.Context, e event.CreateEvent, q workqueue.RateLimitingInterface) {
|
||||
func (h *clusterSecretEventHandler) Create(ctx context.Context, e event.CreateEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) {
|
||||
h.queueRelatedAppGenerators(ctx, q, e.Object)
|
||||
}
|
||||
|
||||
func (h *clusterSecretEventHandler) Update(ctx context.Context, e event.UpdateEvent, q workqueue.RateLimitingInterface) {
|
||||
func (h *clusterSecretEventHandler) Update(ctx context.Context, e event.UpdateEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) {
|
||||
h.queueRelatedAppGenerators(ctx, q, e.ObjectNew)
|
||||
}
|
||||
|
||||
func (h *clusterSecretEventHandler) Delete(ctx context.Context, e event.DeleteEvent, q workqueue.RateLimitingInterface) {
|
||||
func (h *clusterSecretEventHandler) Delete(ctx context.Context, e event.DeleteEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) {
|
||||
h.queueRelatedAppGenerators(ctx, q, e.Object)
|
||||
}
|
||||
|
||||
func (h *clusterSecretEventHandler) Generic(ctx context.Context, e event.GenericEvent, q workqueue.RateLimitingInterface) {
|
||||
func (h *clusterSecretEventHandler) Generic(ctx context.Context, e event.GenericEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) {
|
||||
h.queueRelatedAppGenerators(ctx, q, e.Object)
|
||||
}
|
||||
|
||||
// addRateLimitingInterface defines the Add method of workqueue.RateLimitingInterface, allow us to easily mock
|
||||
// it for testing purposes.
|
||||
type addRateLimitingInterface interface {
|
||||
Add(item interface{})
|
||||
type addRateLimitingInterface[T comparable] interface {
|
||||
Add(item T)
|
||||
}
|
||||
|
||||
func (h *clusterSecretEventHandler) queueRelatedAppGenerators(ctx context.Context, q addRateLimitingInterface, object client.Object) {
|
||||
func (h *clusterSecretEventHandler) queueRelatedAppGenerators(ctx context.Context, q addRateLimitingInterface[reconcile.Request], object client.Object) {
|
||||
// Check for label, lookup all ApplicationSets that might match the cluster, queue them all
|
||||
if object.GetLabels()[generators.ArgoCDSecretTypeLabel] != generators.ArgoCDSecretTypeCluster {
|
||||
return
|
||||
|
||||
@@ -551,24 +551,18 @@ func TestClusterEventHandler(t *testing.T) {
|
||||
|
||||
handler.queueRelatedAppGenerators(context.Background(), &mockAddRateLimitingInterface, &test.secret)
|
||||
|
||||
assert.False(t, mockAddRateLimitingInterface.errorOccurred)
|
||||
assert.ElementsMatch(t, mockAddRateLimitingInterface.addedItems, test.expectedRequests)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Add checks the type, and adds it to the internal list of received additions
|
||||
func (obj *mockAddRateLimitingInterface) Add(item interface{}) {
|
||||
if req, ok := item.(ctrl.Request); ok {
|
||||
obj.addedItems = append(obj.addedItems, req)
|
||||
} else {
|
||||
obj.errorOccurred = true
|
||||
}
|
||||
func (obj *mockAddRateLimitingInterface) Add(item reconcile.Request) {
|
||||
obj.addedItems = append(obj.addedItems, item)
|
||||
}
|
||||
|
||||
type mockAddRateLimitingInterface struct {
|
||||
errorOccurred bool
|
||||
addedItems []ctrl.Request
|
||||
addedItems []reconcile.Request
|
||||
}
|
||||
|
||||
func TestNestedGeneratorHasClusterGenerator_NestedClusterGenerator(t *testing.T) {
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/generators"
|
||||
appsetmetrics "github.com/argoproj/argo-cd/v2/applicationset/metrics"
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/services/mocks"
|
||||
argov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
)
|
||||
@@ -60,7 +61,7 @@ func TestRequeueAfter(t *testing.T) {
|
||||
terminalGenerators := map[string]generators.Generator{
|
||||
"List": generators.NewListGenerator(),
|
||||
"Clusters": generators.NewClusterGenerator(k8sClient, ctx, appClientset, "argocd"),
|
||||
"Git": generators.NewGitGenerator(mockServer),
|
||||
"Git": generators.NewGitGenerator(mockServer, "namespace"),
|
||||
"SCMProvider": generators.NewSCMProviderGenerator(fake.NewClientBuilder().WithObjects(&corev1.Secret{}).Build(), scmConfig),
|
||||
"ClusterDecisionResource": generators.NewDuckTypeGenerator(ctx, fakeDynClient, appClientset, "argocd"),
|
||||
"PullRequest": generators.NewPullRequestGenerator(k8sClient, scmConfig),
|
||||
@@ -89,11 +90,13 @@ func TestRequeueAfter(t *testing.T) {
|
||||
}
|
||||
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).Build()
|
||||
metrics := appsetmetrics.NewFakeAppsetMetrics(client)
|
||||
r := ApplicationSetReconciler{
|
||||
Client: client,
|
||||
Scheme: scheme,
|
||||
Recorder: record.NewFakeRecorder(0),
|
||||
Generators: topLevelGenerators,
|
||||
Metrics: metrics,
|
||||
}
|
||||
|
||||
type args struct {
|
||||
|
||||
@@ -218,7 +218,7 @@ func (g *DuckTypeGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha1.A
|
||||
res = append(res, params)
|
||||
}
|
||||
} else {
|
||||
log.Warningf("clusterDecisionResource status." + statusListKey + " missing")
|
||||
log.Warningf("clusterDecisionResource status.%s missing", statusListKey)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -346,7 +346,7 @@ func getMockClusterGenerator() Generator {
|
||||
func getMockGitGenerator() Generator {
|
||||
argoCDServiceMock := mocks.Repos{}
|
||||
argoCDServiceMock.On("GetDirectories", mock.Anything, mock.Anything, mock.Anything).Return([]string{"app1", "app2", "app_3", "p1/app4"}, nil)
|
||||
gitGenerator := NewGitGenerator(&argoCDServiceMock)
|
||||
gitGenerator := NewGitGenerator(&argoCDServiceMock, "namespace")
|
||||
return gitGenerator
|
||||
}
|
||||
|
||||
|
||||
@@ -24,13 +24,16 @@ import (
|
||||
var _ Generator = (*GitGenerator)(nil)
|
||||
|
||||
type GitGenerator struct {
|
||||
repos services.Repos
|
||||
repos services.Repos
|
||||
namespace string
|
||||
}
|
||||
|
||||
func NewGitGenerator(repos services.Repos) Generator {
|
||||
func NewGitGenerator(repos services.Repos, namespace string) Generator {
|
||||
g := &GitGenerator{
|
||||
repos: repos,
|
||||
repos: repos,
|
||||
namespace: namespace,
|
||||
}
|
||||
|
||||
return g
|
||||
}
|
||||
|
||||
@@ -59,21 +62,25 @@ func (g *GitGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha1.Applic
|
||||
|
||||
noRevisionCache := appSet.RefreshRequired()
|
||||
|
||||
var project string
|
||||
if strings.Contains(appSet.Spec.Template.Spec.Project, "{{") {
|
||||
project = appSetGenerator.Git.Template.Spec.Project
|
||||
} else {
|
||||
project = appSet.Spec.Template.Spec.Project
|
||||
}
|
||||
verifyCommit := false
|
||||
|
||||
appProject := &argoprojiov1alpha1.AppProject{}
|
||||
if err := client.Get(context.TODO(), types.NamespacedName{Name: appSet.Spec.Template.Spec.Project, Namespace: appSet.Namespace}, appProject); err != nil {
|
||||
return nil, fmt.Errorf("error getting project %s: %w", project, err)
|
||||
// When the project field is templated, the contents of the git repo are required to run the git generator and get the templated value,
|
||||
// but git generator cannot be called without verifying the commit signature.
|
||||
// In this case, we skip the signature verification.
|
||||
if !strings.Contains(appSet.Spec.Template.Spec.Project, "{{") {
|
||||
project := appSet.Spec.Template.Spec.Project
|
||||
appProject := &argoprojiov1alpha1.AppProject{}
|
||||
namespace := g.namespace
|
||||
if namespace == "" {
|
||||
namespace = appSet.Namespace
|
||||
}
|
||||
if err := client.Get(context.TODO(), types.NamespacedName{Name: project, Namespace: namespace}, appProject); err != nil {
|
||||
return nil, fmt.Errorf("error getting project %s: %w", project, err)
|
||||
}
|
||||
// we need to verify the signature on the Git revision if GPG is enabled
|
||||
verifyCommit = len(appProject.Spec.SignatureKeys) > 0 && gpg.IsGPGEnabled()
|
||||
}
|
||||
|
||||
// we need to verify the signature on the Git revision if GPG is enabled
|
||||
verifyCommit := appProject.Spec.SignatureKeys != nil && len(appProject.Spec.SignatureKeys) > 0 && gpg.IsGPGEnabled()
|
||||
|
||||
var err error
|
||||
var res []map[string]interface{}
|
||||
if len(appSetGenerator.Git.Directories) != 0 {
|
||||
|
||||
@@ -323,7 +323,7 @@ func TestGitGenerateParamsFromDirectories(t *testing.T) {
|
||||
|
||||
argoCDServiceMock.On("GetDirectories", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(testCaseCopy.repoApps, testCaseCopy.repoError)
|
||||
|
||||
gitGenerator := NewGitGenerator(&argoCDServiceMock)
|
||||
gitGenerator := NewGitGenerator(&argoCDServiceMock, "")
|
||||
applicationSetInfo := argoprojiov1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set",
|
||||
@@ -624,7 +624,7 @@ func TestGitGenerateParamsFromDirectoriesGoTemplate(t *testing.T) {
|
||||
|
||||
argoCDServiceMock.On("GetDirectories", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(testCaseCopy.repoApps, testCaseCopy.repoError)
|
||||
|
||||
gitGenerator := NewGitGenerator(&argoCDServiceMock)
|
||||
gitGenerator := NewGitGenerator(&argoCDServiceMock, "")
|
||||
applicationSetInfo := argoprojiov1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set",
|
||||
@@ -989,7 +989,7 @@ cluster:
|
||||
argoCDServiceMock.On("GetFiles", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).
|
||||
Return(testCaseCopy.repoFileContents, testCaseCopy.repoPathsError)
|
||||
|
||||
gitGenerator := NewGitGenerator(&argoCDServiceMock)
|
||||
gitGenerator := NewGitGenerator(&argoCDServiceMock, "")
|
||||
applicationSetInfo := argoprojiov1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set",
|
||||
@@ -1345,7 +1345,7 @@ cluster:
|
||||
argoCDServiceMock.On("GetFiles", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).
|
||||
Return(testCaseCopy.repoFileContents, testCaseCopy.repoPathsError)
|
||||
|
||||
gitGenerator := NewGitGenerator(&argoCDServiceMock)
|
||||
gitGenerator := NewGitGenerator(&argoCDServiceMock, "")
|
||||
applicationSetInfo := argoprojiov1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set",
|
||||
@@ -1383,3 +1383,114 @@ cluster:
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGitGenerator_GenerateParams(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
directories []argoprojiov1alpha1.GitDirectoryGeneratorItem
|
||||
pathParamPrefix string
|
||||
repoApps []string
|
||||
repoPathsError error
|
||||
repoFileContents map[string][]byte
|
||||
values map[string]string
|
||||
expected []map[string]interface{}
|
||||
expectedError error
|
||||
appset argoprojiov1alpha1.ApplicationSet
|
||||
callGetDirectories bool
|
||||
}{
|
||||
{
|
||||
name: "Signature Verification - ignores templated project field",
|
||||
repoApps: []string{
|
||||
"app1",
|
||||
},
|
||||
repoPathsError: nil,
|
||||
appset: argoprojiov1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set",
|
||||
Namespace: "namespace",
|
||||
},
|
||||
Spec: argoprojiov1alpha1.ApplicationSetSpec{
|
||||
Generators: []argoprojiov1alpha1.ApplicationSetGenerator{{
|
||||
Git: &argoprojiov1alpha1.GitGenerator{
|
||||
RepoURL: "RepoURL",
|
||||
Revision: "Revision",
|
||||
Directories: []argoprojiov1alpha1.GitDirectoryGeneratorItem{{Path: "*"}},
|
||||
PathParamPrefix: "",
|
||||
Values: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
}},
|
||||
Template: argoprojiov1alpha1.ApplicationSetTemplate{
|
||||
Spec: argoprojiov1alpha1.ApplicationSpec{
|
||||
Project: "{{.project}}",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
callGetDirectories: true,
|
||||
expected: []map[string]interface{}{{"path": "app1", "path.basename": "app1", "path.basenameNormalized": "app1", "path[0]": "app1", "values.foo": "bar"}},
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Signature Verification - Checks for non-templated project field",
|
||||
repoApps: []string{
|
||||
"app1",
|
||||
},
|
||||
repoPathsError: nil,
|
||||
appset: argoprojiov1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set",
|
||||
Namespace: "namespace",
|
||||
},
|
||||
Spec: argoprojiov1alpha1.ApplicationSetSpec{
|
||||
Generators: []argoprojiov1alpha1.ApplicationSetGenerator{{
|
||||
Git: &argoprojiov1alpha1.GitGenerator{
|
||||
RepoURL: "RepoURL",
|
||||
Revision: "Revision",
|
||||
Directories: []argoprojiov1alpha1.GitDirectoryGeneratorItem{{Path: "*"}},
|
||||
PathParamPrefix: "",
|
||||
Values: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
}},
|
||||
Template: argoprojiov1alpha1.ApplicationSetTemplate{
|
||||
Spec: argoprojiov1alpha1.ApplicationSpec{
|
||||
Project: "project",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
callGetDirectories: false,
|
||||
expected: []map[string]interface{}{{"path": "app1", "path.basename": "app1", "path.basenameNormalized": "app1", "path[0]": "app1", "values.foo": "bar"}},
|
||||
expectedError: fmt.Errorf("error getting project project: appprojects.argoproj.io \"project\" not found"),
|
||||
},
|
||||
}
|
||||
for _, testCase := range cases {
|
||||
argoCDServiceMock := mocks.Repos{}
|
||||
|
||||
if testCase.callGetDirectories {
|
||||
argoCDServiceMock.On("GetDirectories", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(testCase.repoApps, testCase.repoPathsError)
|
||||
}
|
||||
gitGenerator := NewGitGenerator(&argoCDServiceMock, "namespace")
|
||||
|
||||
scheme := runtime.NewScheme()
|
||||
err := v1alpha1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
appProject := argoprojiov1alpha1.AppProject{}
|
||||
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appProject).Build()
|
||||
|
||||
got, err := gitGenerator.GenerateParams(&testCase.appset.Spec.Generators[0], &testCase.appset, client)
|
||||
|
||||
if testCase.expectedError != nil {
|
||||
require.EqualError(t, err, testCase.expectedError.Error())
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, testCase.expected, got)
|
||||
}
|
||||
|
||||
argoCDServiceMock.AssertExpectations(t)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1089,7 +1089,7 @@ func TestGitGenerator_GenerateParams_list_x_git_matrix_generator(t *testing.T) {
|
||||
repoServiceMock.On("GetFiles", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(map[string][]byte{
|
||||
"some/path.json": []byte("test: content"),
|
||||
}, nil)
|
||||
gitGenerator := NewGitGenerator(repoServiceMock)
|
||||
gitGenerator := NewGitGenerator(repoServiceMock, "")
|
||||
|
||||
matrixGenerator := NewMatrixGenerator(map[string]Generator{
|
||||
"List": listGeneratorMock,
|
||||
|
||||
@@ -694,7 +694,7 @@ func TestPluginGenerateParams(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
gotJson, err := json.Marshal(got)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, string(expectedJson), string(gotJson))
|
||||
assert.JSONEq(t, string(expectedJson), string(gotJson))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -168,7 +168,7 @@ func (g *PullRequestGenerator) selectServiceProvider(ctx context.Context, genera
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error fetching Secret Bearer token: %w", err)
|
||||
}
|
||||
return pullrequest.NewBitbucketServiceBearerToken(ctx, providerConfig.API, appToken, providerConfig.Project, providerConfig.Repo, g.scmRootCAPath, providerConfig.Insecure, caCerts)
|
||||
return pullrequest.NewBitbucketServiceBearerToken(ctx, appToken, providerConfig.API, providerConfig.Project, providerConfig.Repo, g.scmRootCAPath, providerConfig.Insecure, caCerts)
|
||||
} else if providerConfig.BasicAuth != nil {
|
||||
password, err := utils.GetSecretRef(ctx, g.client, providerConfig.BasicAuth.PasswordRef, applicationSetInfo.Namespace)
|
||||
if err != nil {
|
||||
|
||||
@@ -14,7 +14,7 @@ func GetGenerators(ctx context.Context, c client.Client, k8sClient kubernetes.In
|
||||
terminalGenerators := map[string]Generator{
|
||||
"List": NewListGenerator(),
|
||||
"Clusters": NewClusterGenerator(c, ctx, k8sClient, namespace),
|
||||
"Git": NewGitGenerator(argoCDService),
|
||||
"Git": NewGitGenerator(argoCDService, namespace),
|
||||
"SCMProvider": NewSCMProviderGenerator(c, scmConfig),
|
||||
"ClusterDecisionResource": NewDuckTypeGenerator(ctx, dynamicClient, k8sClient, namespace),
|
||||
"PullRequest": NewPullRequestGenerator(c, scmConfig),
|
||||
|
||||
22
applicationset/metrics/fake.go
Normal file
22
applicationset/metrics/fake.go
Normal file
@@ -0,0 +1,22 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
// Fake implementation for testing
|
||||
func NewFakeAppsetMetrics(client ctrlclient.WithWatch) *ApplicationsetMetrics {
|
||||
reconcileHistogram := prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "argocd_appset_reconcile",
|
||||
Help: "Application reconciliation performance in seconds.",
|
||||
// Buckets can be set later on after observing median time
|
||||
},
|
||||
[]string{"name", "namespace"},
|
||||
)
|
||||
|
||||
return &ApplicationsetMetrics{
|
||||
reconcileHistogram: reconcileHistogram,
|
||||
}
|
||||
}
|
||||
131
applicationset/metrics/metrics.go
Normal file
131
applicationset/metrics/metrics.go
Normal file
@@ -0,0 +1,131 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"sigs.k8s.io/controller-runtime/pkg/metrics"
|
||||
|
||||
argoappv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
applisters "github.com/argoproj/argo-cd/v2/pkg/client/listers/application/v1alpha1"
|
||||
metricsutil "github.com/argoproj/argo-cd/v2/util/metrics"
|
||||
)
|
||||
|
||||
var (
|
||||
descAppsetLabels *prometheus.Desc
|
||||
descAppsetDefaultLabels = []string{"namespace", "name"}
|
||||
descAppsetInfo = prometheus.NewDesc(
|
||||
"argocd_appset_info",
|
||||
"Information about applicationset",
|
||||
append(descAppsetDefaultLabels, "resource_update_status"),
|
||||
nil,
|
||||
)
|
||||
|
||||
descAppsetGeneratedApps = prometheus.NewDesc(
|
||||
"argocd_appset_owned_applications",
|
||||
"Number of applications owned by the applicationset",
|
||||
descAppsetDefaultLabels,
|
||||
nil,
|
||||
)
|
||||
)
|
||||
|
||||
type ApplicationsetMetrics struct {
|
||||
reconcileHistogram *prometheus.HistogramVec
|
||||
}
|
||||
|
||||
type appsetCollector struct {
|
||||
lister applisters.ApplicationSetLister
|
||||
// appsClientSet appclientset.Interface
|
||||
labels []string
|
||||
filter func(appset *argoappv1.ApplicationSet) bool
|
||||
}
|
||||
|
||||
func NewApplicationsetMetrics(appsetLister applisters.ApplicationSetLister, appsetLabels []string, appsetFilter func(appset *argoappv1.ApplicationSet) bool) ApplicationsetMetrics {
|
||||
reconcileHistogram := prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "argocd_appset_reconcile",
|
||||
Help: "Application reconciliation performance in seconds.",
|
||||
// Buckets can be set later on after observing median time
|
||||
},
|
||||
descAppsetDefaultLabels,
|
||||
)
|
||||
|
||||
appsetCollector := newAppsetCollector(appsetLister, appsetLabels, appsetFilter)
|
||||
|
||||
// Register collectors and metrics
|
||||
metrics.Registry.MustRegister(reconcileHistogram)
|
||||
metrics.Registry.MustRegister(appsetCollector)
|
||||
|
||||
return ApplicationsetMetrics{
|
||||
reconcileHistogram: reconcileHistogram,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *ApplicationsetMetrics) ObserveReconcile(appset *argoappv1.ApplicationSet, duration time.Duration) {
|
||||
m.reconcileHistogram.WithLabelValues(appset.Namespace, appset.Name).Observe(duration.Seconds())
|
||||
}
|
||||
|
||||
func newAppsetCollector(lister applisters.ApplicationSetLister, labels []string, filter func(appset *argoappv1.ApplicationSet) bool) *appsetCollector {
|
||||
descAppsetDefaultLabels = []string{"namespace", "name"}
|
||||
|
||||
if len(labels) > 0 {
|
||||
descAppsetLabels = prometheus.NewDesc(
|
||||
"argocd_appset_labels",
|
||||
"Applicationset labels translated to Prometheus labels",
|
||||
append(descAppsetDefaultLabels, metricsutil.NormalizeLabels("label", labels)...),
|
||||
nil,
|
||||
)
|
||||
}
|
||||
|
||||
return &appsetCollector{
|
||||
lister: lister,
|
||||
labels: labels,
|
||||
filter: filter,
|
||||
}
|
||||
}
|
||||
|
||||
// Describe implements the prometheus.Collector interface
|
||||
func (c *appsetCollector) Describe(ch chan<- *prometheus.Desc) {
|
||||
ch <- descAppsetInfo
|
||||
ch <- descAppsetGeneratedApps
|
||||
|
||||
if len(c.labels) > 0 {
|
||||
ch <- descAppsetLabels
|
||||
}
|
||||
}
|
||||
|
||||
// Collect implements the prometheus.Collector interface
|
||||
func (c *appsetCollector) Collect(ch chan<- prometheus.Metric) {
|
||||
appsets, _ := c.lister.List(labels.NewSelector())
|
||||
|
||||
for _, appset := range appsets {
|
||||
if c.filter(appset) {
|
||||
collectAppset(appset, c.labels, ch)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func collectAppset(appset *argoappv1.ApplicationSet, labelsToCollect []string, ch chan<- prometheus.Metric) {
|
||||
labelValues := make([]string, 0)
|
||||
commonLabelValues := []string{appset.Namespace, appset.Name}
|
||||
|
||||
for _, label := range labelsToCollect {
|
||||
labelValues = append(labelValues, appset.GetLabels()[label])
|
||||
}
|
||||
|
||||
resourceUpdateStatus := "Unknown"
|
||||
|
||||
for _, condition := range appset.Status.Conditions {
|
||||
if condition.Type == argoappv1.ApplicationSetConditionResourcesUpToDate {
|
||||
resourceUpdateStatus = condition.Reason
|
||||
}
|
||||
}
|
||||
|
||||
if len(labelsToCollect) > 0 {
|
||||
ch <- prometheus.MustNewConstMetric(descAppsetLabels, prometheus.GaugeValue, 1, append(commonLabelValues, labelValues...)...)
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(descAppsetInfo, prometheus.GaugeValue, 1, appset.Namespace, appset.Name, resourceUpdateStatus)
|
||||
ch <- prometheus.MustNewConstMetric(descAppsetGeneratedApps, prometheus.GaugeValue, float64(len(appset.Status.Resources)), appset.Namespace, appset.Name)
|
||||
}
|
||||
256
applicationset/metrics/metrics_test.go
Normal file
256
applicationset/metrics/metrics_test.go
Normal file
@@ -0,0 +1,256 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/utils"
|
||||
argoappv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
fake "sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
|
||||
prometheus "github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
metricsutil "github.com/argoproj/argo-cd/v2/util/metrics"
|
||||
|
||||
"sigs.k8s.io/controller-runtime/pkg/metrics"
|
||||
|
||||
"sigs.k8s.io/yaml"
|
||||
)
|
||||
|
||||
var (
|
||||
applicationsetNamespaces = []string{"argocd", "test-namespace1"}
|
||||
|
||||
filter = func(appset *argoappv1.ApplicationSet) bool {
|
||||
return utils.IsNamespaceAllowed(applicationsetNamespaces, appset.Namespace)
|
||||
}
|
||||
|
||||
collectedLabels = []string{"included/test"}
|
||||
)
|
||||
|
||||
const fakeAppsetList = `
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: test1
|
||||
namespace: argocd
|
||||
labels:
|
||||
included/test: test
|
||||
not-included.label/test: test
|
||||
spec:
|
||||
generators:
|
||||
- git:
|
||||
directories:
|
||||
- path: test/*
|
||||
repoURL: https://github.com/test/test.git
|
||||
revision: HEAD
|
||||
template:
|
||||
metadata:
|
||||
name: '{{.path.basename}}'
|
||||
spec:
|
||||
destination:
|
||||
namespace: '{{.path.basename}}'
|
||||
server: https://kubernetes.default.svc
|
||||
project: default
|
||||
source:
|
||||
path: '{{.path.path}}'
|
||||
repoURL: https://github.com/test/test.git
|
||||
targetRevision: HEAD
|
||||
status:
|
||||
resources:
|
||||
- group: argoproj.io
|
||||
health:
|
||||
status: Missing
|
||||
kind: Application
|
||||
name: test-app1
|
||||
namespace: argocd
|
||||
status: OutOfSync
|
||||
version: v1alpha1
|
||||
- group: argoproj.io
|
||||
health:
|
||||
status: Missing
|
||||
kind: Application
|
||||
name: test-app2
|
||||
namespace: argocd
|
||||
status: OutOfSync
|
||||
version: v1alpha1
|
||||
conditions:
|
||||
- lastTransitionTime: "2024-01-01T00:00:00Z"
|
||||
message: Successfully generated parameters for all Applications
|
||||
reason: ApplicationSetUpToDate
|
||||
status: "False"
|
||||
type: ErrorOccurred
|
||||
- lastTransitionTime: "2024-01-01T00:00:00Z"
|
||||
message: Successfully generated parameters for all Applications
|
||||
reason: ParametersGenerated
|
||||
status: "True"
|
||||
type: ParametersGenerated
|
||||
- lastTransitionTime: "2024-01-01T00:00:00Z"
|
||||
message: ApplicationSet up to date
|
||||
reason: ApplicationSetUpToDate
|
||||
status: "True"
|
||||
type: ResourcesUpToDate
|
||||
---
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: test2
|
||||
namespace: argocd
|
||||
labels:
|
||||
not-included.label/test: test
|
||||
spec:
|
||||
generators:
|
||||
- git:
|
||||
directories:
|
||||
- path: test/*
|
||||
repoURL: https://github.com/test/test.git
|
||||
revision: HEAD
|
||||
template:
|
||||
metadata:
|
||||
name: '{{.path.basename}}'
|
||||
spec:
|
||||
destination:
|
||||
namespace: '{{.path.basename}}'
|
||||
server: https://kubernetes.default.svc
|
||||
project: default
|
||||
source:
|
||||
path: '{{.path.path}}'
|
||||
repoURL: https://github.com/test/test.git
|
||||
targetRevision: HEAD
|
||||
---
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: should-be-filtered-out
|
||||
namespace: not-allowed
|
||||
spec:
|
||||
generators:
|
||||
- git:
|
||||
directories:
|
||||
- path: test/*
|
||||
repoURL: https://github.com/test/test.git
|
||||
revision: HEAD
|
||||
template:
|
||||
metadata:
|
||||
name: '{{.path.basename}}'
|
||||
spec:
|
||||
destination:
|
||||
namespace: '{{.path.basename}}'
|
||||
server: https://kubernetes.default.svc
|
||||
project: default
|
||||
source:
|
||||
path: '{{.path.path}}'
|
||||
repoURL: https://github.com/test/test.git
|
||||
targetRevision: HEAD
|
||||
`
|
||||
|
||||
func newFakeAppsets(fakeAppsetYAML string) []argoappv1.ApplicationSet {
|
||||
var results []argoappv1.ApplicationSet
|
||||
|
||||
appsetRawYamls := strings.Split(fakeAppsetYAML, "---")
|
||||
|
||||
for _, appsetRawYaml := range appsetRawYamls {
|
||||
var appset argoappv1.ApplicationSet
|
||||
err := yaml.Unmarshal([]byte(appsetRawYaml), &appset)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
results = append(results, appset)
|
||||
}
|
||||
|
||||
return results
|
||||
}
|
||||
|
||||
func TestApplicationsetCollector(t *testing.T) {
|
||||
appsetList := newFakeAppsets(fakeAppsetList)
|
||||
client := initializeClient(appsetList)
|
||||
metrics.Registry = prometheus.NewRegistry()
|
||||
|
||||
appsetCollector := newAppsetCollector(utils.NewAppsetLister(client), collectedLabels, filter)
|
||||
|
||||
metrics.Registry.MustRegister(appsetCollector)
|
||||
req, err := http.NewRequest("GET", "/metrics", nil)
|
||||
require.NoError(t, err)
|
||||
rr := httptest.NewRecorder()
|
||||
handler := promhttp.HandlerFor(metrics.Registry, promhttp.HandlerOpts{})
|
||||
handler.ServeHTTP(rr, req)
|
||||
|
||||
assert.Equal(t, http.StatusOK, rr.Code)
|
||||
// Test correct appset_info and owned applications
|
||||
assert.Contains(t, rr.Body.String(), `
|
||||
argocd_appset_info{name="test1",namespace="argocd",resource_update_status="ApplicationSetUpToDate"} 1
|
||||
`)
|
||||
assert.Contains(t, rr.Body.String(), `
|
||||
argocd_appset_owned_applications{name="test1",namespace="argocd"} 2
|
||||
`)
|
||||
// Test labels collection - should not include labels not included in the list of collected labels and include the ones that do.
|
||||
assert.Contains(t, rr.Body.String(), `
|
||||
argocd_appset_labels{label_included_test="test",name="test1",namespace="argocd"} 1
|
||||
`)
|
||||
assert.NotContains(t, rr.Body.String(), normalizeLabel("not-included.label/test"))
|
||||
// If collected label is not present on the applicationset the value should be empty
|
||||
assert.Contains(t, rr.Body.String(), `
|
||||
argocd_appset_labels{label_included_test="",name="test2",namespace="argocd"} 1
|
||||
`)
|
||||
// If ResourcesUpToDate condition is not present on the applicationset the status should be reported as 'Unknown'
|
||||
assert.Contains(t, rr.Body.String(), `
|
||||
argocd_appset_info{name="test2",namespace="argocd",resource_update_status="Unknown"} 1
|
||||
`)
|
||||
// If there are no resources on the applicationset the owned application gague should return 0
|
||||
assert.Contains(t, rr.Body.String(), `
|
||||
argocd_appset_owned_applications{name="test2",namespace="argocd"} 0
|
||||
`)
|
||||
// Test that filter is working
|
||||
assert.NotContains(t, rr.Body.String(), `name="should-be-filtered-out"`)
|
||||
}
|
||||
|
||||
func TestObserveReconcile(t *testing.T) {
|
||||
appsetList := newFakeAppsets(fakeAppsetList)
|
||||
client := initializeClient(appsetList)
|
||||
metrics.Registry = prometheus.NewRegistry()
|
||||
|
||||
appsetMetrics := NewApplicationsetMetrics(utils.NewAppsetLister(client), collectedLabels, filter)
|
||||
|
||||
req, err := http.NewRequest("GET", "/metrics", nil)
|
||||
require.NoError(t, err)
|
||||
rr := httptest.NewRecorder()
|
||||
handler := promhttp.HandlerFor(metrics.Registry, promhttp.HandlerOpts{})
|
||||
appsetMetrics.ObserveReconcile(&appsetList[0], 5*time.Second)
|
||||
handler.ServeHTTP(rr, req)
|
||||
assert.Contains(t, rr.Body.String(), `
|
||||
argocd_appset_reconcile_sum{name="test1",namespace="argocd"} 5
|
||||
`)
|
||||
// If there are no resources on the applicationset the owned application gague should return 0
|
||||
assert.Contains(t, rr.Body.String(), `
|
||||
argocd_appset_reconcile_count{name="test1",namespace="argocd"} 1
|
||||
`)
|
||||
}
|
||||
|
||||
func initializeClient(appsets []argoappv1.ApplicationSet) ctrlclient.WithWatch {
|
||||
scheme := runtime.NewScheme()
|
||||
err := argoappv1.AddToScheme(scheme)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
var clientObjects []ctrlclient.Object
|
||||
|
||||
for _, appset := range appsets {
|
||||
clientObjects = append(clientObjects, appset.DeepCopy())
|
||||
}
|
||||
|
||||
return fake.NewClientBuilder().WithScheme(scheme).WithObjects(clientObjects...).Build()
|
||||
}
|
||||
|
||||
func normalizeLabel(label string) string {
|
||||
return metricsutil.NormalizeLabels("label", []string{label})[0]
|
||||
}
|
||||
@@ -19,7 +19,7 @@ type BitbucketCloudPullRequest struct {
|
||||
ID int `json:"id"`
|
||||
Title string `json:"title"`
|
||||
Source BitbucketCloudPullRequestSource `json:"source"`
|
||||
Author string `json:"author"`
|
||||
Author BitbucketCloudPullRequestAuthor `json:"author"`
|
||||
}
|
||||
|
||||
type BitbucketCloudPullRequestSource struct {
|
||||
@@ -35,6 +35,11 @@ type BitbucketCloudPullRequestSourceCommit struct {
|
||||
Hash string `json:"hash"`
|
||||
}
|
||||
|
||||
// Also have display_name and uuid, but don't plan to use them.
|
||||
type BitbucketCloudPullRequestAuthor struct {
|
||||
Nickname string `json:"nickname"`
|
||||
}
|
||||
|
||||
type PullRequestResponse struct {
|
||||
Page int32 `json:"page"`
|
||||
Size int32 `json:"size"`
|
||||
@@ -134,7 +139,7 @@ func (b *BitbucketCloudService) List(_ context.Context) ([]*PullRequest, error)
|
||||
Title: pull.Title,
|
||||
Branch: pull.Source.Branch.Name,
|
||||
HeadSHA: pull.Source.Commit.Hash,
|
||||
Author: pull.Author,
|
||||
Author: pull.Author.Nickname,
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -37,7 +37,9 @@ func defaultHandlerCloud(t *testing.T) func(http.ResponseWriter, *http.Request)
|
||||
"hash": "1a8dd249c04a"
|
||||
}
|
||||
},
|
||||
"author": "testName"
|
||||
"author": {
|
||||
"nickname": "testName"
|
||||
}
|
||||
}
|
||||
]
|
||||
}`)
|
||||
@@ -154,7 +156,9 @@ func TestListPullRequestPaginationCloud(t *testing.T) {
|
||||
"hash": "1a8dd249c04a"
|
||||
}
|
||||
},
|
||||
"author": "testName"
|
||||
"author": {
|
||||
"nickname": "testName"
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 102,
|
||||
@@ -168,7 +172,9 @@ func TestListPullRequestPaginationCloud(t *testing.T) {
|
||||
"hash": "4cf807e67a6d"
|
||||
}
|
||||
},
|
||||
"author": "testName"
|
||||
"author": {
|
||||
"nickname": "testName"
|
||||
}
|
||||
}
|
||||
]
|
||||
}`, r.Host))
|
||||
@@ -191,7 +197,9 @@ func TestListPullRequestPaginationCloud(t *testing.T) {
|
||||
"hash": "6344d9623e3b"
|
||||
}
|
||||
},
|
||||
"author": "testName"
|
||||
"author": {
|
||||
"nickname": "testName"
|
||||
}
|
||||
}
|
||||
]
|
||||
}`, r.Host))
|
||||
@@ -339,7 +347,9 @@ func TestListPullRequestBranchMatchCloud(t *testing.T) {
|
||||
"hash": "1a8dd249c04a"
|
||||
}
|
||||
},
|
||||
"author": "testName"
|
||||
"author": {
|
||||
"nickname": "testName"
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 200,
|
||||
@@ -353,7 +363,9 @@ func TestListPullRequestBranchMatchCloud(t *testing.T) {
|
||||
"hash": "4cf807e67a6d"
|
||||
}
|
||||
},
|
||||
"author": "testName"
|
||||
"author": {
|
||||
"nickname": "testName"
|
||||
}
|
||||
}
|
||||
]
|
||||
}`, r.Host))
|
||||
@@ -376,7 +388,9 @@ func TestListPullRequestBranchMatchCloud(t *testing.T) {
|
||||
"hash": "6344d9623e3b"
|
||||
}
|
||||
},
|
||||
"author": "testName"
|
||||
"author": {
|
||||
"nickname": "testName"
|
||||
}
|
||||
}
|
||||
]
|
||||
}`, r.Host))
|
||||
|
||||
@@ -46,7 +46,7 @@ func (c *ExtendedClient) GetContents(repo *Repository, path string) (bool, error
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, fmt.Errorf(resp.Status)
|
||||
return false, fmt.Errorf("%s", resp.Status)
|
||||
}
|
||||
|
||||
var _ SCMProviderService = &BitBucketCloudProvider{}
|
||||
|
||||
63
applicationset/utils/applicationset_lister.go
Normal file
63
applicationset/utils/applicationset_lister.go
Normal file
@@ -0,0 +1,63 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
. "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
. "github.com/argoproj/argo-cd/v2/pkg/client/listers/application/v1alpha1"
|
||||
)
|
||||
|
||||
// Implements AppsetLister interface with controller-runtime client
|
||||
type AppsetLister struct {
|
||||
Client ctrlclient.Client
|
||||
}
|
||||
|
||||
func NewAppsetLister(client ctrlclient.Client) ApplicationSetLister {
|
||||
return &AppsetLister{Client: client}
|
||||
}
|
||||
|
||||
func (l *AppsetLister) List(selector labels.Selector) (ret []*ApplicationSet, err error) {
|
||||
return clientListAppsets(l.Client, ctrlclient.ListOptions{})
|
||||
}
|
||||
|
||||
// ApplicationSets returns an object that can list and get ApplicationSets.
|
||||
func (l *AppsetLister) ApplicationSets(namespace string) ApplicationSetNamespaceLister {
|
||||
return &appsetNamespaceLister{
|
||||
Client: l.Client,
|
||||
Namespace: namespace,
|
||||
}
|
||||
}
|
||||
|
||||
// Implements ApplicationSetNamespaceLister
|
||||
type appsetNamespaceLister struct {
|
||||
Client ctrlclient.Client
|
||||
Namespace string
|
||||
}
|
||||
|
||||
func (n *appsetNamespaceLister) List(selector labels.Selector) (ret []*ApplicationSet, err error) {
|
||||
return clientListAppsets(n.Client, ctrlclient.ListOptions{Namespace: n.Namespace})
|
||||
}
|
||||
|
||||
func (n *appsetNamespaceLister) Get(name string) (*ApplicationSet, error) {
|
||||
appset := ApplicationSet{}
|
||||
err := n.Client.Get(context.TODO(), ctrlclient.ObjectKeyFromObject(&appset), &appset)
|
||||
return &appset, err
|
||||
}
|
||||
|
||||
func clientListAppsets(client ctrlclient.Client, listOptions ctrlclient.ListOptions) (ret []*ApplicationSet, err error) {
|
||||
var appsetlist ApplicationSetList
|
||||
var results []*ApplicationSet
|
||||
|
||||
err = client.List(context.TODO(), &appsetlist, &listOptions)
|
||||
|
||||
if err == nil {
|
||||
for _, appset := range appsetlist.Items {
|
||||
results = append(results, appset.DeepCopy())
|
||||
}
|
||||
}
|
||||
|
||||
return results, err
|
||||
}
|
||||
@@ -51,9 +51,12 @@ const (
|
||||
// if we used destination name we infer the server url
|
||||
// if we used both name and server then we return an invalid spec error
|
||||
func ValidateDestination(ctx context.Context, dest *appv1.ApplicationDestination, clientset kubernetes.Interface, argoCDNamespace string) error {
|
||||
if dest.IsServerInferred() && dest.IsNameInferred() {
|
||||
return fmt.Errorf("application destination can't have both name and server inferred: %s %s", dest.Name, dest.Server)
|
||||
}
|
||||
if dest.Name != "" {
|
||||
if dest.Server == "" {
|
||||
server, err := getDestinationServer(ctx, dest.Name, clientset, argoCDNamespace)
|
||||
server, err := getDestinationBy(ctx, dest.Name, clientset, argoCDNamespace, true)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to find destination server: %w", err)
|
||||
}
|
||||
@@ -61,14 +64,25 @@ func ValidateDestination(ctx context.Context, dest *appv1.ApplicationDestination
|
||||
return fmt.Errorf("application references destination cluster %s which does not exist", dest.Name)
|
||||
}
|
||||
dest.SetInferredServer(server)
|
||||
} else if !dest.IsServerInferred() {
|
||||
} else if !dest.IsServerInferred() && !dest.IsNameInferred() {
|
||||
return fmt.Errorf("application destination can't have both name and server defined: %s %s", dest.Name, dest.Server)
|
||||
}
|
||||
} else if dest.Server != "" {
|
||||
if dest.Name == "" {
|
||||
serverName, err := getDestinationBy(ctx, dest.Server, clientset, argoCDNamespace, false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to find destination server: %w", err)
|
||||
}
|
||||
if serverName == "" {
|
||||
return fmt.Errorf("application references destination cluster %s which does not exist", dest.Server)
|
||||
}
|
||||
dest.SetInferredName(serverName)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getDestinationServer(ctx context.Context, clusterName string, clientset kubernetes.Interface, argoCDNamespace string) (string, error) {
|
||||
func getDestinationBy(ctx context.Context, cluster string, clientset kubernetes.Interface, argoCDNamespace string, byName bool) (string, error) {
|
||||
// settingsMgr := settings.NewSettingsManager(context.TODO(), clientset, namespace)
|
||||
// argoDB := db.NewDB(namespace, settingsMgr, clientset)
|
||||
// clusterList, err := argoDB.ListClusters(ctx)
|
||||
@@ -78,14 +92,17 @@ func getDestinationServer(ctx context.Context, clusterName string, clientset kub
|
||||
}
|
||||
var servers []string
|
||||
for _, c := range clusterList.Items {
|
||||
if c.Name == clusterName {
|
||||
if byName && c.Name == cluster {
|
||||
servers = append(servers, c.Server)
|
||||
}
|
||||
if !byName && c.Server == cluster {
|
||||
servers = append(servers, c.Name)
|
||||
}
|
||||
}
|
||||
if len(servers) > 1 {
|
||||
return "", fmt.Errorf("there are %d clusters with the same name: %v", len(servers), servers)
|
||||
} else if len(servers) == 0 {
|
||||
return "", fmt.Errorf("there are no clusters with this name: %s", clusterName)
|
||||
return "", fmt.Errorf("there are no clusters with this name: %s", cluster)
|
||||
}
|
||||
return servers[0], nil
|
||||
}
|
||||
@@ -132,9 +149,12 @@ func getLocalCluster(clientset kubernetes.Interface) *appv1.Cluster {
|
||||
initLocalCluster.Do(func() {
|
||||
info, err := clientset.Discovery().ServerVersion()
|
||||
if err == nil {
|
||||
// nolint:staticcheck
|
||||
localCluster.ServerVersion = fmt.Sprintf("%s.%s", info.Major, info.Minor)
|
||||
// nolint:staticcheck
|
||||
localCluster.ConnectionState = appv1.ConnectionState{Status: appv1.ConnectionStatusSuccessful}
|
||||
} else {
|
||||
// nolint:staticcheck
|
||||
localCluster.ConnectionState = appv1.ConnectionState{
|
||||
Status: appv1.ConnectionStatusFailed,
|
||||
Message: err.Error(),
|
||||
@@ -143,6 +163,7 @@ func getLocalCluster(clientset kubernetes.Interface) *appv1.Cluster {
|
||||
})
|
||||
cluster := localCluster.DeepCopy()
|
||||
now := metav1.Now()
|
||||
// nolint:staticcheck
|
||||
cluster.ConnectionState.ModifiedAt = &now
|
||||
return cluster
|
||||
}
|
||||
|
||||
@@ -92,7 +92,12 @@ func TestValidateDestination(t *testing.T) {
|
||||
Namespace: "default",
|
||||
}
|
||||
|
||||
appCond := ValidateDestination(context.Background(), &dest, nil, fakeNamespace)
|
||||
secret := createClusterSecret("my-secret", "minikube", "https://127.0.0.1:6443")
|
||||
objects := []runtime.Object{}
|
||||
objects = append(objects, secret)
|
||||
kubeclientset := fake.NewSimpleClientset(objects...)
|
||||
|
||||
appCond := ValidateDestination(context.Background(), &dest, kubeclientset, fakeNamespace)
|
||||
require.NoError(t, appCond)
|
||||
assert.False(t, dest.IsServerInferred())
|
||||
})
|
||||
|
||||
@@ -229,7 +229,7 @@ spec:
|
||||
require.NoError(t, err)
|
||||
yamlExpected, err := yaml.Marshal(tc.expectedApp)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, string(yamlExpected), string(yamlFound))
|
||||
assert.YAMLEq(t, string(yamlExpected), string(yamlFound))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -23,6 +23,7 @@ import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
argoappsv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v2/util/glob"
|
||||
)
|
||||
|
||||
var sprigFuncMap = sprig.GenericFuncMap() // a singleton for better performance
|
||||
@@ -46,6 +47,10 @@ type Renderer interface {
|
||||
|
||||
type Render struct{}
|
||||
|
||||
func IsNamespaceAllowed(namespaces []string, namespace string) bool {
|
||||
return glob.MatchStringInList(namespaces, namespace, glob.REGEXP)
|
||||
}
|
||||
|
||||
func copyValueIntoUnexported(destination, value reflect.Value) {
|
||||
reflect.NewAt(destination.Type(), unsafe.Pointer(destination.UnsafeAddr())).
|
||||
Elem().
|
||||
@@ -268,7 +273,7 @@ func (r *Render) RenderTemplateParams(tmpl *argoappsv1.Application, syncPolicy *
|
||||
// b) there IS a syncPolicy, but preserveResourcesOnDeletion is set to false
|
||||
// See TestRenderTemplateParamsFinalizers in util_test.go for test-based definition of behaviour
|
||||
if (syncPolicy == nil || !syncPolicy.PreserveResourcesOnDeletion) &&
|
||||
(replacedTmpl.ObjectMeta.Finalizers == nil || len(replacedTmpl.ObjectMeta.Finalizers) == 0) {
|
||||
len(replacedTmpl.ObjectMeta.Finalizers) == 0 {
|
||||
replacedTmpl.ObjectMeta.Finalizers = []string{"resources-finalizer.argocd.argoproj.io"}
|
||||
}
|
||||
|
||||
|
||||
186
applicationset/webhook/testdata/github-commit-event-feature-branch.json
vendored
Normal file
186
applicationset/webhook/testdata/github-commit-event-feature-branch.json
vendored
Normal file
@@ -0,0 +1,186 @@
|
||||
{
|
||||
"ref": "refs/heads/env/dev",
|
||||
"before": "d5c1ffa8e294bc18c639bfb4e0df499251034414",
|
||||
"after": "63738bb582c8b540af7bcfc18f87c575c3ed66e0",
|
||||
"created": false,
|
||||
"deleted": false,
|
||||
"forced": true,
|
||||
"base_ref": null,
|
||||
"compare": "https://github.com/org/repo/compare/d5c1ffa8e294...63738bb582c8",
|
||||
"commits": [
|
||||
{
|
||||
"id": "63738bb582c8b540af7bcfc18f87c575c3ed66e0",
|
||||
"tree_id": "64897da445207e409ad05af93b1f349ad0a4ee19",
|
||||
"distinct": true,
|
||||
"message": "Add staging-argocd-demo environment",
|
||||
"timestamp": "2018-05-04T15:40:02-07:00",
|
||||
"url": "https://github.com/org/repo/commit/63738bb582c8b540af7bcfc18f87c575c3ed66e0",
|
||||
"author": {
|
||||
"name": "Jesse Suen",
|
||||
"email": "Jesse_Suen@example.com",
|
||||
"username": "org"
|
||||
},
|
||||
"committer": {
|
||||
"name": "Jesse Suen",
|
||||
"email": "Jesse_Suen@example.com",
|
||||
"username": "org"
|
||||
},
|
||||
"added": [
|
||||
"ksapps/test-app/environments/staging-argocd-demo/main.jsonnet",
|
||||
"ksapps/test-app/environments/staging-argocd-demo/params.libsonnet"
|
||||
],
|
||||
"removed": [
|
||||
|
||||
],
|
||||
"modified": [
|
||||
"ksapps/test-app/app.yaml"
|
||||
]
|
||||
}
|
||||
],
|
||||
"head_commit": {
|
||||
"id": "63738bb582c8b540af7bcfc18f87c575c3ed66e0",
|
||||
"tree_id": "64897da445207e409ad05af93b1f349ad0a4ee19",
|
||||
"distinct": true,
|
||||
"message": "Add staging-argocd-demo environment",
|
||||
"timestamp": "2018-05-04T15:40:02-07:00",
|
||||
"url": "https://github.com/org/repo/commit/63738bb582c8b540af7bcfc18f87c575c3ed66e0",
|
||||
"author": {
|
||||
"name": "Jesse Suen",
|
||||
"email": "Jesse_Suen@example.com",
|
||||
"username": "org"
|
||||
},
|
||||
"committer": {
|
||||
"name": "Jesse Suen",
|
||||
"email": "Jesse_Suen@example.com",
|
||||
"username": "org"
|
||||
},
|
||||
"added": [
|
||||
"ksapps/test-app/environments/staging-argocd-demo/main.jsonnet",
|
||||
"ksapps/test-app/environments/staging-argocd-demo/params.libsonnet"
|
||||
],
|
||||
"removed": [
|
||||
|
||||
],
|
||||
"modified": [
|
||||
"ksapps/test-app/app.yaml"
|
||||
]
|
||||
},
|
||||
"repository": {
|
||||
"id": 123060978,
|
||||
"name": "repo",
|
||||
"full_name": "org/repo",
|
||||
"owner": {
|
||||
"name": "org",
|
||||
"email": "org@users.noreply.github.com",
|
||||
"login": "org",
|
||||
"id": 12677113,
|
||||
"avatar_url": "https://avatars0.githubusercontent.com/u/12677113?v=4",
|
||||
"gravatar_id": "",
|
||||
"url": "https://api.github.com/users/org",
|
||||
"html_url": "https://github.com/org",
|
||||
"followers_url": "https://api.github.com/users/org/followers",
|
||||
"following_url": "https://api.github.com/users/org/following{/other_user}",
|
||||
"gists_url": "https://api.github.com/users/org/gists{/gist_id}",
|
||||
"starred_url": "https://api.github.com/users/org/starred{/owner}{/repo}",
|
||||
"subscriptions_url": "https://api.github.com/users/org/subscriptions",
|
||||
"organizations_url": "https://api.github.com/users/org/orgs",
|
||||
"repos_url": "https://api.github.com/users/org/repos",
|
||||
"events_url": "https://api.github.com/users/org/events{/privacy}",
|
||||
"received_events_url": "https://api.github.com/users/org/received_events",
|
||||
"type": "User",
|
||||
"site_admin": false
|
||||
},
|
||||
"private": false,
|
||||
"html_url": "https://github.com/org/repo",
|
||||
"description": "Test Repository",
|
||||
"fork": false,
|
||||
"url": "https://github.com/org/repo",
|
||||
"forks_url": "https://api.github.com/repos/org/repo/forks",
|
||||
"keys_url": "https://api.github.com/repos/org/repo/keys{/key_id}",
|
||||
"collaborators_url": "https://api.github.com/repos/org/repo/collaborators{/collaborator}",
|
||||
"teams_url": "https://api.github.com/repos/org/repo/teams",
|
||||
"hooks_url": "https://api.github.com/repos/org/repo/hooks",
|
||||
"issue_events_url": "https://api.github.com/repos/org/repo/issues/events{/number}",
|
||||
"events_url": "https://api.github.com/repos/org/repo/events",
|
||||
"assignees_url": "https://api.github.com/repos/org/repo/assignees{/user}",
|
||||
"branches_url": "https://api.github.com/repos/org/repo/branches{/branch}",
|
||||
"tags_url": "https://api.github.com/repos/org/repo/tags",
|
||||
"blobs_url": "https://api.github.com/repos/org/repo/git/blobs{/sha}",
|
||||
"git_tags_url": "https://api.github.com/repos/org/repo/git/tags{/sha}",
|
||||
"git_refs_url": "https://api.github.com/repos/org/repo/git/refs{/sha}",
|
||||
"trees_url": "https://api.github.com/repos/org/repo/git/trees{/sha}",
|
||||
"statuses_url": "https://api.github.com/repos/org/repo/statuses/{sha}",
|
||||
"languages_url": "https://api.github.com/repos/org/repo/languages",
|
||||
"stargazers_url": "https://api.github.com/repos/org/repo/stargazers",
|
||||
"contributors_url": "https://api.github.com/repos/org/repo/contributors",
|
||||
"subscribers_url": "https://api.github.com/repos/org/repo/subscribers",
|
||||
"subscription_url": "https://api.github.com/repos/org/repo/subscription",
|
||||
"commits_url": "https://api.github.com/repos/org/repo/commits{/sha}",
|
||||
"git_commits_url": "https://api.github.com/repos/org/repo/git/commits{/sha}",
|
||||
"comments_url": "https://api.github.com/repos/org/repo/comments{/number}",
|
||||
"issue_comment_url": "https://api.github.com/repos/org/repo/issues/comments{/number}",
|
||||
"contents_url": "https://api.github.com/repos/org/repo/contents/{+path}",
|
||||
"compare_url": "https://api.github.com/repos/org/repo/compare/{base}...{head}",
|
||||
"merges_url": "https://api.github.com/repos/org/repo/merges",
|
||||
"archive_url": "https://api.github.com/repos/org/repo/{archive_format}{/ref}",
|
||||
"downloads_url": "https://api.github.com/repos/org/repo/downloads",
|
||||
"issues_url": "https://api.github.com/repos/org/repo/issues{/number}",
|
||||
"pulls_url": "https://api.github.com/repos/org/repo/pulls{/number}",
|
||||
"milestones_url": "https://api.github.com/repos/org/repo/milestones{/number}",
|
||||
"notifications_url": "https://api.github.com/repos/org/repo/notifications{?since,all,participating}",
|
||||
"labels_url": "https://api.github.com/repos/org/repo/labels{/name}",
|
||||
"releases_url": "https://api.github.com/repos/org/repo/releases{/id}",
|
||||
"deployments_url": "https://api.github.com/repos/org/repo/deployments",
|
||||
"created_at": 1519698615,
|
||||
"updated_at": "2018-05-04T22:37:55Z",
|
||||
"pushed_at": 1525473610,
|
||||
"git_url": "git://github.com/org/repo.git",
|
||||
"ssh_url": "git@github.com:org/repo.git",
|
||||
"clone_url": "https://github.com/org/repo.git",
|
||||
"svn_url": "https://github.com/org/repo",
|
||||
"homepage": null,
|
||||
"size": 538,
|
||||
"stargazers_count": 0,
|
||||
"watchers_count": 0,
|
||||
"language": null,
|
||||
"has_issues": true,
|
||||
"has_projects": true,
|
||||
"has_downloads": true,
|
||||
"has_wiki": true,
|
||||
"has_pages": false,
|
||||
"forks_count": 1,
|
||||
"mirror_url": null,
|
||||
"archived": false,
|
||||
"open_issues_count": 0,
|
||||
"license": null,
|
||||
"forks": 1,
|
||||
"open_issues": 0,
|
||||
"watchers": 0,
|
||||
"default_branch": "master",
|
||||
"stargazers": 0,
|
||||
"master_branch": "master"
|
||||
},
|
||||
"pusher": {
|
||||
"name": "org",
|
||||
"email": "org@users.noreply.github.com"
|
||||
},
|
||||
"sender": {
|
||||
"login": "org",
|
||||
"id": 12677113,
|
||||
"avatar_url": "https://avatars0.githubusercontent.com/u/12677113?v=4",
|
||||
"gravatar_id": "",
|
||||
"url": "https://api.github.com/users/org",
|
||||
"html_url": "https://github.com/org",
|
||||
"followers_url": "https://api.github.com/users/org/followers",
|
||||
"following_url": "https://api.github.com/users/org/following{/other_user}",
|
||||
"gists_url": "https://api.github.com/users/org/gists{/gist_id}",
|
||||
"starred_url": "https://api.github.com/users/org/starred{/owner}{/repo}",
|
||||
"subscriptions_url": "https://api.github.com/users/org/subscriptions",
|
||||
"organizations_url": "https://api.github.com/users/org/orgs",
|
||||
"repos_url": "https://api.github.com/users/org/repos",
|
||||
"events_url": "https://api.github.com/users/org/events{/privacy}",
|
||||
"received_events_url": "https://api.github.com/users/org/received_events",
|
||||
"type": "User",
|
||||
"site_admin": false
|
||||
}
|
||||
}
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/common"
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
argosettings "github.com/argoproj/argo-cd/v2/util/settings"
|
||||
"github.com/argoproj/argo-cd/v2/util/webhook"
|
||||
|
||||
"github.com/go-playground/webhooks/v6/azuredevops"
|
||||
"github.com/go-playground/webhooks/v6/github"
|
||||
@@ -190,11 +191,6 @@ func (h *WebhookHandler) Handler(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
}
|
||||
|
||||
func parseRevision(ref string) string {
|
||||
refParts := strings.SplitN(ref, "/", 3)
|
||||
return refParts[len(refParts)-1]
|
||||
}
|
||||
|
||||
func getGitGeneratorInfo(payload interface{}) *gitGeneratorInfo {
|
||||
var (
|
||||
webURL string
|
||||
@@ -204,16 +200,16 @@ func getGitGeneratorInfo(payload interface{}) *gitGeneratorInfo {
|
||||
switch payload := payload.(type) {
|
||||
case github.PushPayload:
|
||||
webURL = payload.Repository.HTMLURL
|
||||
revision = parseRevision(payload.Ref)
|
||||
revision = webhook.ParseRevision(payload.Ref)
|
||||
touchedHead = payload.Repository.DefaultBranch == revision
|
||||
case gitlab.PushEventPayload:
|
||||
webURL = payload.Project.WebURL
|
||||
revision = parseRevision(payload.Ref)
|
||||
revision = webhook.ParseRevision(payload.Ref)
|
||||
touchedHead = payload.Project.DefaultBranch == revision
|
||||
case azuredevops.GitPushEvent:
|
||||
// See: https://learn.microsoft.com/en-us/azure/devops/service-hooks/events?view=azure-devops#git.push
|
||||
webURL = payload.Resource.Repository.RemoteURL
|
||||
revision = parseRevision(payload.Resource.RefUpdates[0].Name)
|
||||
revision = webhook.ParseRevision(payload.Resource.RefUpdates[0].Name)
|
||||
touchedHead = payload.Resource.RefUpdates[0].Name == payload.Resource.Repository.DefaultBranch
|
||||
// unfortunately, Azure DevOps doesn't provide a list of changed files
|
||||
default:
|
||||
@@ -373,12 +369,12 @@ func shouldRefreshPluginGenerator(gen *v1alpha1.PluginGenerator) bool {
|
||||
}
|
||||
|
||||
func genRevisionHasChanged(gen *v1alpha1.GitGenerator, revision string, touchedHead bool) bool {
|
||||
targetRev := parseRevision(gen.Revision)
|
||||
targetRev := webhook.ParseRevision(gen.Revision)
|
||||
if targetRev == "HEAD" || targetRev == "" { // revision is head
|
||||
return touchedHead
|
||||
}
|
||||
|
||||
return targetRev == revision
|
||||
return targetRev == revision || gen.Revision == revision
|
||||
}
|
||||
|
||||
func gitGeneratorUsesURL(gen *v1alpha1.GitGenerator, webURL string, repoRegexp *regexp.Regexp) bool {
|
||||
|
||||
@@ -67,6 +67,15 @@ func TestWebhookHandler(t *testing.T) {
|
||||
expectedStatusCode: http.StatusOK,
|
||||
expectedRefresh: true,
|
||||
},
|
||||
{
|
||||
desc: "WebHook from a GitHub repository via Commit shorthand",
|
||||
headerKey: "X-GitHub-Event",
|
||||
headerValue: "push",
|
||||
payloadFile: "github-commit-event-feature-branch.json",
|
||||
effectedAppSets: []string{"github-shorthand", "matrix-pull-request-github-plugin", "plugin"},
|
||||
expectedStatusCode: http.StatusOK,
|
||||
expectedRefresh: true,
|
||||
},
|
||||
{
|
||||
desc: "WebHook from a GitHub repository via Commit to branch",
|
||||
headerKey: "X-GitHub-Event",
|
||||
@@ -192,6 +201,7 @@ func TestWebhookHandler(t *testing.T) {
|
||||
fakeAppWithGitGenerator("git-github", namespace, "https://github.com/org/repo"),
|
||||
fakeAppWithGitGenerator("git-gitlab", namespace, "https://gitlab/group/name"),
|
||||
fakeAppWithGitGenerator("git-azure-devops", namespace, "https://dev.azure.com/fabrikam-fiber-inc/DefaultCollection/_git/Fabrikam-Fiber-Git"),
|
||||
fakeAppWithGitGeneratorWithRevision("github-shorthand", namespace, "https://github.com/org/repo", "env/dev"),
|
||||
fakeAppWithGithubPullRequestGenerator("pull-request-github", namespace, "CodErTOcat", "Hello-World"),
|
||||
fakeAppWithGitlabPullRequestGenerator("pull-request-gitlab", namespace, "100500"),
|
||||
fakeAppWithAzureDevOpsPullRequestGenerator("pull-request-azure-devops", namespace, "DefaultCollection", "Fabrikam"),
|
||||
@@ -302,14 +312,62 @@ func mockGenerators() map[string]generators.Generator {
|
||||
}
|
||||
|
||||
func TestGenRevisionHasChanged(t *testing.T) {
|
||||
assert.True(t, genRevisionHasChanged(&v1alpha1.GitGenerator{}, "master", true))
|
||||
assert.False(t, genRevisionHasChanged(&v1alpha1.GitGenerator{}, "master", false))
|
||||
|
||||
assert.True(t, genRevisionHasChanged(&v1alpha1.GitGenerator{Revision: "dev"}, "dev", true))
|
||||
assert.False(t, genRevisionHasChanged(&v1alpha1.GitGenerator{Revision: "dev"}, "master", false))
|
||||
|
||||
assert.True(t, genRevisionHasChanged(&v1alpha1.GitGenerator{Revision: "refs/heads/dev"}, "dev", true))
|
||||
assert.False(t, genRevisionHasChanged(&v1alpha1.GitGenerator{Revision: "refs/heads/dev"}, "master", false))
|
||||
type args struct {
|
||||
gen *v1alpha1.GitGenerator
|
||||
revision string
|
||||
touchedHead bool
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want bool
|
||||
}{
|
||||
{name: "touchedHead", args: args{
|
||||
gen: &v1alpha1.GitGenerator{},
|
||||
revision: "main",
|
||||
touchedHead: true,
|
||||
}, want: true},
|
||||
{name: "didntTouchHead", args: args{
|
||||
gen: &v1alpha1.GitGenerator{},
|
||||
revision: "main",
|
||||
touchedHead: false,
|
||||
}, want: false},
|
||||
{name: "foundEqualShort", args: args{
|
||||
gen: &v1alpha1.GitGenerator{Revision: "dev"},
|
||||
revision: "dev",
|
||||
touchedHead: true,
|
||||
}, want: true},
|
||||
{name: "foundEqualLongGen", args: args{
|
||||
gen: &v1alpha1.GitGenerator{Revision: "refs/heads/dev"},
|
||||
revision: "dev",
|
||||
touchedHead: true,
|
||||
}, want: true},
|
||||
{name: "foundNotEqualLongGen", args: args{
|
||||
gen: &v1alpha1.GitGenerator{Revision: "refs/heads/dev"},
|
||||
revision: "main",
|
||||
touchedHead: true,
|
||||
}, want: false},
|
||||
{name: "foundNotEqualShort", args: args{
|
||||
gen: &v1alpha1.GitGenerator{Revision: "dev"},
|
||||
revision: "main",
|
||||
touchedHead: false,
|
||||
}, want: false},
|
||||
{name: "foundEqualTag", args: args{
|
||||
gen: &v1alpha1.GitGenerator{Revision: "v3.14.1"},
|
||||
revision: "v3.14.1",
|
||||
touchedHead: false,
|
||||
}, want: true},
|
||||
{name: "foundEqualTagLongGen", args: args{
|
||||
gen: &v1alpha1.GitGenerator{Revision: "refs/tags/v3.14.1"},
|
||||
revision: "v3.14.1",
|
||||
touchedHead: false,
|
||||
}, want: true},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.Equalf(t, tt.want, genRevisionHasChanged(tt.args.gen, tt.args.revision, tt.args.touchedHead), "genRevisionHasChanged(%v, %v, %v)", tt.args.gen, tt.args.revision, tt.args.touchedHead)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func fakeAppWithGitGenerator(name, namespace, repo string) *v1alpha1.ApplicationSet {
|
||||
@@ -331,6 +389,12 @@ func fakeAppWithGitGenerator(name, namespace, repo string) *v1alpha1.Application
|
||||
}
|
||||
}
|
||||
|
||||
func fakeAppWithGitGeneratorWithRevision(name, namespace, repo, revision string) *v1alpha1.ApplicationSet {
|
||||
appSet := fakeAppWithGitGenerator(name, namespace, repo)
|
||||
appSet.Spec.Generators[0].Git.Revision = revision
|
||||
return appSet
|
||||
}
|
||||
|
||||
func fakeAppWithGitlabPullRequestGenerator(name, namespace, projectId string) *v1alpha1.ApplicationSet {
|
||||
return &v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -711,7 +775,7 @@ func fakeAppWithMatrixAndPullRequestGeneratorWithPluginGenerator(name, namespace
|
||||
func newFakeClient(ns string) *kubefake.Clientset {
|
||||
s := runtime.NewScheme()
|
||||
s.AddKnownTypes(v1alpha1.SchemeGroupVersion, &v1alpha1.ApplicationSet{})
|
||||
return kubefake.NewSimpleClientset(&corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "argocd-cm", Namespace: ns, Labels: map[string]string{
|
||||
return kubefake.NewClientset(&corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "argocd-cm", Namespace: ns, Labels: map[string]string{
|
||||
"app.kubernetes.io/part-of": "argocd",
|
||||
}}}, &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
||||
193
assets/swagger.json
generated
193
assets/swagger.json
generated
@@ -1990,6 +1990,39 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/v1/applicationsets/generate": {
|
||||
"post": {
|
||||
"tags": [
|
||||
"ApplicationSetService"
|
||||
],
|
||||
"summary": "Generate generates",
|
||||
"operationId": "ApplicationSetService_Generate",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "body",
|
||||
"in": "body",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"$ref": "#/definitions/applicationsetApplicationSetGenerateRequest"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/applicationsetApplicationSetGenerateResponse"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/runtimeError"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/v1/applicationsets/{name}": {
|
||||
"get": {
|
||||
"tags": [
|
||||
@@ -4489,6 +4522,27 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"applicationsetApplicationSetGenerateRequest": {
|
||||
"type": "object",
|
||||
"title": "ApplicationSetGetQuery is a query for applicationset resources",
|
||||
"properties": {
|
||||
"applicationSet": {
|
||||
"$ref": "#/definitions/v1alpha1ApplicationSet"
|
||||
}
|
||||
}
|
||||
},
|
||||
"applicationsetApplicationSetGenerateResponse": {
|
||||
"type": "object",
|
||||
"title": "ApplicationSetGenerateResponse is a response for applicationset generate request",
|
||||
"properties": {
|
||||
"applications": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/v1alpha1Application"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"applicationsetApplicationSetResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -4514,6 +4568,43 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"applicationv1alpha1ResourceStatus": {
|
||||
"type": "object",
|
||||
"title": "ResourceStatus holds the current sync and health status of a resource\nTODO: describe members of this type",
|
||||
"properties": {
|
||||
"group": {
|
||||
"type": "string"
|
||||
},
|
||||
"health": {
|
||||
"$ref": "#/definitions/v1alpha1HealthStatus"
|
||||
},
|
||||
"hook": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"kind": {
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"namespace": {
|
||||
"type": "string"
|
||||
},
|
||||
"requiresPruning": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"status": {
|
||||
"type": "string"
|
||||
},
|
||||
"syncWave": {
|
||||
"type": "integer",
|
||||
"format": "int64"
|
||||
},
|
||||
"version": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"clusterClusterID": {
|
||||
"type": "object",
|
||||
"title": "ClusterID holds a cluster server URL or cluster name",
|
||||
@@ -4658,6 +4749,12 @@
|
||||
"help": {
|
||||
"$ref": "#/definitions/clusterHelp"
|
||||
},
|
||||
"impersonationEnabled": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"installationID": {
|
||||
"type": "string"
|
||||
},
|
||||
"kustomizeOptions": {
|
||||
"$ref": "#/definitions/v1alpha1KustomizeOptions"
|
||||
},
|
||||
@@ -5464,7 +5561,7 @@
|
||||
"properties": {
|
||||
"matchExpressions": {
|
||||
"type": "array",
|
||||
"title": "matchExpressions is a list of label selector requirements. The requirements are ANDed.\n+optional",
|
||||
"title": "matchExpressions is a list of label selector requirements. The requirements are ANDed.\n+optional\n+listType=atomic",
|
||||
"items": {
|
||||
"$ref": "#/definitions/v1LabelSelectorRequirement"
|
||||
}
|
||||
@@ -5492,7 +5589,7 @@
|
||||
},
|
||||
"values": {
|
||||
"type": "array",
|
||||
"title": "values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch.\n+optional",
|
||||
"title": "values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch.\n+optional\n+listType=atomic",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
@@ -5616,7 +5713,7 @@
|
||||
"type": "string"
|
||||
},
|
||||
"kubeProxyVersion": {
|
||||
"description": "KubeProxy Version reported by the node.",
|
||||
"description": "Deprecated: KubeProxy Version reported by the node.",
|
||||
"type": "string"
|
||||
},
|
||||
"kubeletVersion": {
|
||||
@@ -5665,7 +5762,7 @@
|
||||
},
|
||||
"finalizers": {
|
||||
"type": "array",
|
||||
"title": "Must be empty before the object is deleted from the registry. Each entry\nis an identifier for the responsible component that will remove the entry\nfrom the list. If the deletionTimestamp of the object is non-nil, entries\nin this list can only be removed.\nFinalizers may be processed and removed in any order. Order is NOT enforced\nbecause it introduces significant risk of stuck finalizers.\nfinalizers is a shared field, any actor with permission can reorder it.\nIf the finalizer list is processed in order, then this can lead to a situation\nin which the component responsible for the first finalizer in the list is\nwaiting for a signal (field value, external system, or other) produced by a\ncomponent responsible for a finalizer later in the list, resulting in a deadlock.\nWithout enforced ordering finalizers are free to order amongst themselves and\nare not vulnerable to ordering changes in the list.\n+optional\n+patchStrategy=merge",
|
||||
"title": "Must be empty before the object is deleted from the registry. Each entry\nis an identifier for the responsible component that will remove the entry\nfrom the list. If the deletionTimestamp of the object is non-nil, entries\nin this list can only be removed.\nFinalizers may be processed and removed in any order. Order is NOT enforced\nbecause it introduces significant risk of stuck finalizers.\nfinalizers is a shared field, any actor with permission can reorder it.\nIf the finalizer list is processed in order, then this can lead to a situation\nin which the component responsible for the first finalizer in the list is\nwaiting for a signal (field value, external system, or other) produced by a\ncomponent responsible for a finalizer later in the list, resulting in a deadlock.\nWithout enforced ordering finalizers are free to order amongst themselves and\nare not vulnerable to ordering changes in the list.\n+optional\n+patchStrategy=merge\n+listType=set",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
@@ -5687,7 +5784,7 @@
|
||||
}
|
||||
},
|
||||
"managedFields": {
|
||||
"description": "ManagedFields maps workflow-id and version to the set of fields\nthat are managed by that workflow. This is mostly for internal\nhousekeeping, and users typically shouldn't need to set or\nunderstand this field. A workflow can be the user's name, a\ncontroller's name, or the name of a specific apply path like\n\"ci-cd\". The set of fields is always in the version that the\nworkflow used when modifying the object.\n\n+optional",
|
||||
"description": "ManagedFields maps workflow-id and version to the set of fields\nthat are managed by that workflow. This is mostly for internal\nhousekeeping, and users typically shouldn't need to set or\nunderstand this field. A workflow can be the user's name, a\ncontroller's name, or the name of a specific apply path like\n\"ci-cd\". The set of fields is always in the version that the\nworkflow used when modifying the object.\n\n+optional\n+listType=atomic",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/v1ManagedFieldsEntry"
|
||||
@@ -5703,7 +5800,7 @@
|
||||
},
|
||||
"ownerReferences": {
|
||||
"type": "array",
|
||||
"title": "List of objects depended by this object. If ALL objects in the list have\nbeen deleted, this object will be garbage collected. If this object is managed by a controller,\nthen an entry in this list will point to this controller, with the controller field set to true.\nThere cannot be more than one managing controller.\n+optional\n+patchMergeKey=uid\n+patchStrategy=merge",
|
||||
"title": "List of objects depended by this object. If ALL objects in the list have\nbeen deleted, this object will be garbage collected. If this object is managed by a controller,\nthen an entry in this list will point to this controller, with the controller field set to true.\nThere cannot be more than one managing controller.\n+optional\n+patchMergeKey=uid\n+patchStrategy=merge\n+listType=map\n+listMapKey=uid",
|
||||
"items": {
|
||||
"$ref": "#/definitions/v1OwnerReference"
|
||||
}
|
||||
@@ -5879,6 +5976,13 @@
|
||||
"type": "string",
|
||||
"title": "Description contains optional project description"
|
||||
},
|
||||
"destinationServiceAccounts": {
|
||||
"description": "DestinationServiceAccounts holds information about the service accounts to be impersonated for the application sync operation for each destination.",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/v1alpha1ApplicationDestinationServiceAccount"
|
||||
}
|
||||
},
|
||||
"destinations": {
|
||||
"type": "array",
|
||||
"title": "Destinations contains list of destinations available for deployment",
|
||||
@@ -6010,6 +6114,24 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1ApplicationDestinationServiceAccount": {
|
||||
"description": "ApplicationDestinationServiceAccount holds information about the service account to be impersonated for the application sync operation.",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"defaultServiceAccount": {
|
||||
"type": "string",
|
||||
"title": "DefaultServiceAccount to be used for impersonation during the sync operation"
|
||||
},
|
||||
"namespace": {
|
||||
"description": "Namespace specifies the target namespace for the application's resources.",
|
||||
"type": "string"
|
||||
},
|
||||
"server": {
|
||||
"description": "Server specifies the URL of the target cluster's Kubernetes control plane API.",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1ApplicationList": {
|
||||
"type": "object",
|
||||
"title": "ApplicationList is list of Application resources\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object",
|
||||
@@ -6334,7 +6456,7 @@
|
||||
"description": "Resources is a list of Applications resources managed by this application set.",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/v1alpha1ResourceStatus"
|
||||
"$ref": "#/definitions/applicationv1alpha1ResourceStatus"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -6801,7 +6923,7 @@
|
||||
"type": "array",
|
||||
"title": "Resources is a list of Kubernetes resources managed by this application",
|
||||
"items": {
|
||||
"$ref": "#/definitions/v1alpha1ResourceStatus"
|
||||
"$ref": "#/definitions/applicationv1alpha1ResourceStatus"
|
||||
}
|
||||
},
|
||||
"sourceType": {
|
||||
@@ -6867,6 +6989,11 @@
|
||||
"items": {
|
||||
"$ref": "#/definitions/v1alpha1ResourceNode"
|
||||
}
|
||||
},
|
||||
"shardsCount": {
|
||||
"type": "integer",
|
||||
"format": "int64",
|
||||
"title": "ShardsCount contains total number of shards the application tree is split into"
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -8179,6 +8306,10 @@
|
||||
"type": "string",
|
||||
"title": "GithubAppPrivateKey specifies the private key PEM data for authentication via GitHub app"
|
||||
},
|
||||
"noProxy": {
|
||||
"type": "string",
|
||||
"title": "NoProxy specifies a list of targets where the proxy isn't used, applies only in cases where the proxy is applied"
|
||||
},
|
||||
"password": {
|
||||
"type": "string",
|
||||
"title": "Password for authenticating at the repo server"
|
||||
@@ -8285,6 +8416,10 @@
|
||||
"type": "string",
|
||||
"title": "Name specifies a name to be used for this repo. Only used with Helm repos"
|
||||
},
|
||||
"noProxy": {
|
||||
"type": "string",
|
||||
"title": "NoProxy specifies a list of targets where the proxy isn't used, applies only in cases where the proxy is applied"
|
||||
},
|
||||
"password": {
|
||||
"type": "string",
|
||||
"title": "Password contains the password or PAT used for authenticating at the remote repository"
|
||||
@@ -8682,43 +8817,6 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1ResourceStatus": {
|
||||
"type": "object",
|
||||
"title": "ResourceStatus holds the current sync and health status of a resource\nTODO: describe members of this type",
|
||||
"properties": {
|
||||
"group": {
|
||||
"type": "string"
|
||||
},
|
||||
"health": {
|
||||
"$ref": "#/definitions/v1alpha1HealthStatus"
|
||||
},
|
||||
"hook": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"kind": {
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"namespace": {
|
||||
"type": "string"
|
||||
},
|
||||
"requiresPruning": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"status": {
|
||||
"type": "string"
|
||||
},
|
||||
"syncWave": {
|
||||
"type": "integer",
|
||||
"format": "int64"
|
||||
},
|
||||
"version": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1RetryStrategy": {
|
||||
"type": "object",
|
||||
"title": "RetryStrategy contains information about the strategy to apply when a sync failed",
|
||||
@@ -9105,6 +9203,11 @@
|
||||
"description": "SyncOperation contains details about a sync operation.",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"autoHealAttemptsCount": {
|
||||
"type": "integer",
|
||||
"format": "int64",
|
||||
"title": "SelfHealAttemptsCount contains the number of auto-heal attempts"
|
||||
},
|
||||
"dryRun": {
|
||||
"type": "boolean",
|
||||
"title": "DryRun specifies to perform a `kubectl apply --dry-run` without actually performing the sync"
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"github.com/redis/go-redis/v9"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
|
||||
@@ -24,6 +25,7 @@ import (
|
||||
appclientset "github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned"
|
||||
"github.com/argoproj/argo-cd/v2/pkg/ratelimiter"
|
||||
"github.com/argoproj/argo-cd/v2/reposerver/apiclient"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo/normalizers"
|
||||
cacheutil "github.com/argoproj/argo-cd/v2/util/cache"
|
||||
appstatecache "github.com/argoproj/argo-cd/v2/util/cache/appstate"
|
||||
@@ -56,12 +58,16 @@ func NewCommand() *cobra.Command {
|
||||
repoServerAddress string
|
||||
repoServerTimeoutSeconds int
|
||||
selfHealTimeoutSeconds int
|
||||
selfHealBackoffTimeoutSeconds int
|
||||
selfHealBackoffFactor int
|
||||
selfHealBackoffCapSeconds int
|
||||
statusProcessors int
|
||||
operationProcessors int
|
||||
glogLevel int
|
||||
metricsPort int
|
||||
metricsCacheExpiration time.Duration
|
||||
metricsAplicationLabels []string
|
||||
metricsAplicationConditions []string
|
||||
kubectlParallelismLimit int64
|
||||
cacheSource func() (*appstatecache.Cache, error)
|
||||
redisClient *redis.Client
|
||||
@@ -77,6 +83,9 @@ func NewCommand() *cobra.Command {
|
||||
enableDynamicClusterDistribution bool
|
||||
serverSideDiff bool
|
||||
ignoreNormalizerOpts normalizers.IgnoreNormalizerOpts
|
||||
|
||||
// argocd k8s event logging flag
|
||||
enableK8sEvent []string
|
||||
)
|
||||
command := cobra.Command{
|
||||
Use: cliName,
|
||||
@@ -151,6 +160,14 @@ func NewCommand() *cobra.Command {
|
||||
kubectl := kubeutil.NewKubectl()
|
||||
clusterSharding, err := sharding.GetClusterSharding(kubeClient, settingsMgr, shardingAlgorithm, enableDynamicClusterDistribution)
|
||||
errors.CheckError(err)
|
||||
var selfHealBackoff *wait.Backoff
|
||||
if selfHealBackoffTimeoutSeconds != 0 {
|
||||
selfHealBackoff = &wait.Backoff{
|
||||
Duration: time.Duration(selfHealBackoffTimeoutSeconds) * time.Second,
|
||||
Factor: float64(selfHealBackoffFactor),
|
||||
Cap: time.Duration(selfHealBackoffCapSeconds) * time.Second,
|
||||
}
|
||||
}
|
||||
appController, err = controller.NewApplicationController(
|
||||
namespace,
|
||||
settingsMgr,
|
||||
@@ -163,10 +180,12 @@ func NewCommand() *cobra.Command {
|
||||
hardResyncDuration,
|
||||
time.Duration(appResyncJitter)*time.Second,
|
||||
time.Duration(selfHealTimeoutSeconds)*time.Second,
|
||||
selfHealBackoff,
|
||||
time.Duration(repoErrorGracePeriod)*time.Second,
|
||||
metricsPort,
|
||||
metricsCacheExpiration,
|
||||
metricsAplicationLabels,
|
||||
metricsAplicationConditions,
|
||||
kubectlParallelismLimit,
|
||||
persistResourceHealth,
|
||||
clusterSharding,
|
||||
@@ -175,6 +194,7 @@ func NewCommand() *cobra.Command {
|
||||
serverSideDiff,
|
||||
enableDynamicClusterDistribution,
|
||||
ignoreNormalizerOpts,
|
||||
enableK8sEvent,
|
||||
)
|
||||
errors.CheckError(err)
|
||||
cacheutil.CollectMetrics(redisClient, appController.GetMetricsServer())
|
||||
@@ -224,11 +244,15 @@ func NewCommand() *cobra.Command {
|
||||
command.Flags().IntVar(&glogLevel, "gloglevel", 0, "Set the glog logging level")
|
||||
command.Flags().IntVar(&metricsPort, "metrics-port", common.DefaultPortArgoCDMetrics, "Start metrics server on given port")
|
||||
command.Flags().DurationVar(&metricsCacheExpiration, "metrics-cache-expiration", env.ParseDurationFromEnv("ARGOCD_APPLICATION_CONTROLLER_METRICS_CACHE_EXPIRATION", 0*time.Second, 0, math.MaxInt64), "Prometheus metrics cache expiration (disabled by default. e.g. 24h0m0s)")
|
||||
command.Flags().IntVar(&selfHealTimeoutSeconds, "self-heal-timeout-seconds", env.ParseNumFromEnv("ARGOCD_APPLICATION_CONTROLLER_SELF_HEAL_TIMEOUT_SECONDS", 5, 0, math.MaxInt32), "Specifies timeout between application self heal attempts")
|
||||
command.Flags().IntVar(&selfHealTimeoutSeconds, "self-heal-timeout-seconds", env.ParseNumFromEnv("ARGOCD_APPLICATION_CONTROLLER_SELF_HEAL_TIMEOUT_SECONDS", 0, 0, math.MaxInt32), "Specifies timeout between application self heal attempts")
|
||||
command.Flags().IntVar(&selfHealBackoffTimeoutSeconds, "self-heal-backoff-timeout-seconds", env.ParseNumFromEnv("ARGOCD_APPLICATION_CONTROLLER_SELF_HEAL_BACKOFF_TIMEOUT_SECONDS", 2, 0, math.MaxInt32), "Specifies initial timeout of exponential backoff between self heal attempts")
|
||||
command.Flags().IntVar(&selfHealBackoffFactor, "self-heal-backoff-factor", env.ParseNumFromEnv("ARGOCD_APPLICATION_CONTROLLER_SELF_HEAL_BACKOFF_FACTOR", 3, 0, math.MaxInt32), "Specifies factor of exponential timeout between application self heal attempts")
|
||||
command.Flags().IntVar(&selfHealBackoffCapSeconds, "self-heal-backoff-cap-seconds", env.ParseNumFromEnv("ARGOCD_APPLICATION_CONTROLLER_SELF_HEAL_BACKOFF_CAP_SECONDS", 300, 0, math.MaxInt32), "Specifies max timeout of exponential backoff between application self heal attempts")
|
||||
command.Flags().Int64Var(&kubectlParallelismLimit, "kubectl-parallelism-limit", env.ParseInt64FromEnv("ARGOCD_APPLICATION_CONTROLLER_KUBECTL_PARALLELISM_LIMIT", 20, 0, math.MaxInt64), "Number of allowed concurrent kubectl fork/execs. Any value less than 1 means no limit.")
|
||||
command.Flags().BoolVar(&repoServerPlaintext, "repo-server-plaintext", env.ParseBoolFromEnv("ARGOCD_APPLICATION_CONTROLLER_REPO_SERVER_PLAINTEXT", false), "Disable TLS on connections to repo server")
|
||||
command.Flags().BoolVar(&repoServerStrictTLS, "repo-server-strict-tls", env.ParseBoolFromEnv("ARGOCD_APPLICATION_CONTROLLER_REPO_SERVER_STRICT_TLS", false), "Whether to use strict validation of the TLS cert presented by the repo server")
|
||||
command.Flags().StringSliceVar(&metricsAplicationLabels, "metrics-application-labels", []string{}, "List of Application labels that will be added to the argocd_application_labels metric")
|
||||
command.Flags().StringSliceVar(&metricsAplicationConditions, "metrics-application-conditions", []string{}, "List of Application conditions that will be added to the argocd_application_conditions metric")
|
||||
command.Flags().StringVar(&otlpAddress, "otlp-address", env.StringFromEnv("ARGOCD_APPLICATION_CONTROLLER_OTLP_ADDRESS", ""), "OpenTelemetry collector address to send traces to")
|
||||
command.Flags().BoolVar(&otlpInsecure, "otlp-insecure", env.ParseBoolFromEnv("ARGOCD_APPLICATION_CONTROLLER_OTLP_INSECURE", true), "OpenTelemetry collector insecure mode")
|
||||
command.Flags().StringToStringVar(&otlpHeaders, "otlp-headers", env.ParseStringToStringFromEnv("ARGOCD_APPLICATION_CONTROLLER_OTLP_HEADERS", map[string]string{}, ","), "List of OpenTelemetry collector extra headers sent with traces, headers are comma-separated key-value pairs(e.g. key1=value1,key2=value2)")
|
||||
@@ -248,6 +272,9 @@ func NewCommand() *cobra.Command {
|
||||
command.Flags().BoolVar(&enableDynamicClusterDistribution, "dynamic-cluster-distribution-enabled", env.ParseBoolFromEnv(common.EnvEnableDynamicClusterDistribution, false), "Enables dynamic cluster distribution.")
|
||||
command.Flags().BoolVar(&serverSideDiff, "server-side-diff-enabled", env.ParseBoolFromEnv(common.EnvServerSideDiff, false), "Feature flag to enable ServerSide diff. Default (\"false\")")
|
||||
command.Flags().DurationVar(&ignoreNormalizerOpts.JQExecutionTimeout, "ignore-normalizer-jq-execution-timeout-seconds", env.ParseDurationFromEnv("ARGOCD_IGNORE_NORMALIZER_JQ_TIMEOUT", 0*time.Second, 0, math.MaxInt64), "Set ignore normalizer JQ execution timeout")
|
||||
// argocd k8s event logging flag
|
||||
command.Flags().StringSliceVar(&enableK8sEvent, "enable-k8s-event", env.StringsFromEnv("ARGOCD_ENABLE_K8S_EVENT", argo.DefaultEnableEventList(), ","), "Enable ArgoCD to use k8s event. For disabling all events, set the value as `none`. (e.g --enable-k8s-event=none), For enabling specific events, set the value as `event reason`. (e.g --enable-k8s-event=StatusRefreshed,ResourceCreated)")
|
||||
|
||||
cacheSource = appstatecache.AddCacheFlagsToCmd(&command, cacheutil.Options{
|
||||
OnClientCreated: func(client *redis.Client) {
|
||||
redisClient = client
|
||||
|
||||
@@ -35,6 +35,7 @@ import (
|
||||
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
|
||||
|
||||
appsetmetrics "github.com/argoproj/argo-cd/v2/applicationset/metrics"
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/services"
|
||||
appv1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
appclientset "github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned"
|
||||
@@ -69,6 +70,7 @@ func NewCommand() *cobra.Command {
|
||||
allowedScmProviders []string
|
||||
globalPreservedAnnotations []string
|
||||
globalPreservedLabels []string
|
||||
metricsAplicationsetLabels []string
|
||||
enableScmProviders bool
|
||||
webhookParallelism int
|
||||
)
|
||||
@@ -167,7 +169,7 @@ func NewCommand() *cobra.Command {
|
||||
|
||||
tlsConfig := apiclient.TLSConfiguration{
|
||||
DisableTLS: repoServerPlaintext,
|
||||
StrictValidation: repoServerPlaintext,
|
||||
StrictValidation: repoServerStrictTLS,
|
||||
}
|
||||
|
||||
if !repoServerPlaintext && repoServerStrictTLS {
|
||||
@@ -194,6 +196,13 @@ func NewCommand() *cobra.Command {
|
||||
startWebhookServer(webhookHandler, webhookAddr)
|
||||
}
|
||||
|
||||
metrics := appsetmetrics.NewApplicationsetMetrics(
|
||||
utils.NewAppsetLister(mgr.GetClient()),
|
||||
metricsAplicationsetLabels,
|
||||
func(appset *appv1alpha1.ApplicationSet) bool {
|
||||
return utils.IsNamespaceAllowed(applicationSetNamespaces, appset.Namespace)
|
||||
})
|
||||
|
||||
if err = (&controllers.ApplicationSetReconciler{
|
||||
Generators: topLevelGenerators,
|
||||
Client: mgr.GetClient(),
|
||||
@@ -211,7 +220,7 @@ func NewCommand() *cobra.Command {
|
||||
SCMRootCAPath: scmRootCAPath,
|
||||
GlobalPreservedAnnotations: globalPreservedAnnotations,
|
||||
GlobalPreservedLabels: globalPreservedLabels,
|
||||
Cache: mgr.GetCache(),
|
||||
Metrics: &metrics,
|
||||
}).SetupWithManager(mgr, enableProgressiveSyncs, maxConcurrentReconciliations); err != nil {
|
||||
log.Error(err, "unable to create controller", "controller", "ApplicationSet")
|
||||
os.Exit(1)
|
||||
@@ -253,6 +262,7 @@ func NewCommand() *cobra.Command {
|
||||
command.Flags().StringSliceVar(&globalPreservedAnnotations, "preserved-annotations", env.StringsFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_GLOBAL_PRESERVED_ANNOTATIONS", []string{}, ","), "Sets global preserved field values for annotations")
|
||||
command.Flags().StringSliceVar(&globalPreservedLabels, "preserved-labels", env.StringsFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_GLOBAL_PRESERVED_LABELS", []string{}, ","), "Sets global preserved field values for labels")
|
||||
command.Flags().IntVar(&webhookParallelism, "webhook-parallelism-limit", env.ParseNumFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_WEBHOOK_PARALLELISM_LIMIT", 50, 1, 1000), "Number of webhook requests processed concurrently")
|
||||
command.Flags().StringSliceVar(&metricsAplicationsetLabels, "metrics-applicationset-labels", []string{}, "List of Application labels that will be added to the argocd_applicationset_labels metric")
|
||||
return &command
|
||||
}
|
||||
|
||||
@@ -260,7 +270,7 @@ func startWebhookServer(webhookHandler *webhook.WebhookHandler, webhookAddr stri
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc("/api/webhook", webhookHandler.Handler)
|
||||
go func() {
|
||||
log.Info("Starting webhook server")
|
||||
log.Infof("Starting webhook server %s", webhookAddr)
|
||||
err := http.ListenAndServe(webhookAddr, mux)
|
||||
if err != nil {
|
||||
log.Error(err, "failed to start webhook server")
|
||||
|
||||
@@ -136,8 +136,8 @@ func NewRunDexCommand() *cobra.Command {
|
||||
}
|
||||
|
||||
clientConfig = cli.AddKubectlFlagsToCmd(&command)
|
||||
command.Flags().StringVar(&cmdutil.LogFormat, "logformat", "text", "Set the logging format. One of: text|json")
|
||||
command.Flags().StringVar(&cmdutil.LogLevel, "loglevel", "info", "Set the logging level. One of: debug|info|warn|error")
|
||||
command.Flags().StringVar(&cmdutil.LogFormat, "logformat", env.StringFromEnv("ARGOCD_DEX_SERVER_LOGFORMAT", "text"), "Set the logging format. One of: text|json")
|
||||
command.Flags().StringVar(&cmdutil.LogLevel, "loglevel", env.StringFromEnv("ARGOCD_DEX_SERVER_LOGLEVEL", "info"), "Set the logging level. One of: debug|info|warn|error")
|
||||
command.Flags().BoolVar(&disableTLS, "disable-tls", env.ParseBoolFromEnv("ARGOCD_DEX_SERVER_DISABLE_TLS", false), "Disable TLS on the HTTP endpoint")
|
||||
return &command
|
||||
}
|
||||
@@ -204,8 +204,8 @@ func NewGenDexConfigCommand() *cobra.Command {
|
||||
}
|
||||
|
||||
clientConfig = cli.AddKubectlFlagsToCmd(&command)
|
||||
command.Flags().StringVar(&cmdutil.LogFormat, "logformat", "text", "Set the logging format. One of: text|json")
|
||||
command.Flags().StringVar(&cmdutil.LogLevel, "loglevel", "info", "Set the logging level. One of: debug|info|warn|error")
|
||||
command.Flags().StringVar(&cmdutil.LogFormat, "logformat", env.StringFromEnv("ARGOCD_DEX_SERVER_LOGFORMAT", "text"), "Set the logging format. One of: text|json")
|
||||
command.Flags().StringVar(&cmdutil.LogLevel, "loglevel", env.StringFromEnv("ARGOCD_DEX_SERVER_LOGLEVEL", "info"), "Set the logging level. One of: debug|info|warn|error")
|
||||
command.Flags().StringVarP(&out, "out", "o", "", "Output to the specified file instead of stdout")
|
||||
command.Flags().BoolVar(&disableTLS, "disable-tls", env.ParseBoolFromEnv("ARGOCD_DEX_SERVER_DISABLE_TLS", false), "Disable TLS on the HTTP endpoint")
|
||||
return &command
|
||||
|
||||
@@ -1,10 +1,14 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/common"
|
||||
"github.com/argoproj/argo-cd/v2/reposerver/apiclient"
|
||||
@@ -62,7 +66,8 @@ func NewCommand() *cobra.Command {
|
||||
Use: "controller",
|
||||
Short: "Starts Argo CD Notifications controller",
|
||||
RunE: func(c *cobra.Command, args []string) error {
|
||||
ctx := c.Context()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
vers := common.GetVersion()
|
||||
namespace, _, err := clientConfig.Namespace()
|
||||
@@ -146,6 +151,17 @@ func NewCommand() *cobra.Command {
|
||||
return fmt.Errorf("failed to initialize controller: %w", err)
|
||||
}
|
||||
|
||||
sigCh := make(chan os.Signal, 1)
|
||||
signal.Notify(sigCh, os.Interrupt, syscall.SIGTERM)
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
s := <-sigCh
|
||||
log.Printf("got signal %v, attempting graceful shutdown", s)
|
||||
cancel()
|
||||
}()
|
||||
|
||||
go ctrl.Run(ctx, processorsCount)
|
||||
<-ctx.Done()
|
||||
return nil
|
||||
@@ -159,7 +175,7 @@ func NewCommand() *cobra.Command {
|
||||
command.Flags().StringVar(&logFormat, "logformat", env.StringFromEnv("ARGOCD_NOTIFICATIONS_CONTROLLER_LOGFORMAT", "text"), "Set the logging format. One of: text|json")
|
||||
command.Flags().IntVar(&metricsPort, "metrics-port", defaultMetricsPort, "Metrics port")
|
||||
command.Flags().StringVar(&argocdRepoServer, "argocd-repo-server", common.DefaultRepoServerAddr, "Argo CD repo server address")
|
||||
command.Flags().BoolVar(&argocdRepoServerPlaintext, "argocd-repo-server-plaintext", false, "Use a plaintext client (non-TLS) to connect to repository server")
|
||||
command.Flags().BoolVar(&argocdRepoServerPlaintext, "argocd-repo-server-plaintext", env.ParseBoolFromEnv("ARGOCD_NOTIFICATION_CONTROLLER_REPO_SERVER_PLAINTEXT", false), "Use a plaintext client (non-TLS) to connect to repository server")
|
||||
command.Flags().BoolVar(&argocdRepoServerStrictTLS, "argocd-repo-server-strict-tls", false, "Perform strict validation of TLS certificates when connecting to repo server")
|
||||
command.Flags().StringVar(&configMapName, "config-map-name", "argocd-notifications-cm", "Set notifications ConfigMap name")
|
||||
command.Flags().StringVar(&secretName, "secret-name", "argocd-notifications-secret", "Set notifications Secret name")
|
||||
|
||||
@@ -8,6 +8,8 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/redis/go-redis/v9"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
|
||||
"github.com/argoproj/pkg/stats"
|
||||
log "github.com/sirupsen/logrus"
|
||||
@@ -25,6 +27,7 @@ import (
|
||||
reposervercache "github.com/argoproj/argo-cd/v2/reposerver/cache"
|
||||
"github.com/argoproj/argo-cd/v2/server"
|
||||
servercache "github.com/argoproj/argo-cd/v2/server/cache"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo"
|
||||
cacheutil "github.com/argoproj/argo-cd/v2/util/cache"
|
||||
"github.com/argoproj/argo-cd/v2/util/cli"
|
||||
"github.com/argoproj/argo-cd/v2/util/dex"
|
||||
@@ -89,6 +92,9 @@ func NewCommand() *cobra.Command {
|
||||
scmRootCAPath string
|
||||
allowedScmProviders []string
|
||||
enableScmProviders bool
|
||||
|
||||
// argocd k8s event logging flag
|
||||
enableK8sEvent []string
|
||||
)
|
||||
command := &cobra.Command{
|
||||
Use: cliName,
|
||||
@@ -142,9 +148,14 @@ func NewCommand() *cobra.Command {
|
||||
|
||||
dynamicClient := dynamic.NewForConfigOrDie(config)
|
||||
|
||||
controllerClient, err := client.New(config, client.Options{})
|
||||
scheme := runtime.NewScheme()
|
||||
_ = clientgoscheme.AddToScheme(scheme)
|
||||
_ = v1alpha1.AddToScheme(scheme)
|
||||
|
||||
controllerClient, err := client.New(config, client.Options{Scheme: scheme})
|
||||
errors.CheckError(err)
|
||||
controllerClient = client.NewDryRunClient(controllerClient)
|
||||
controllerClient = client.NewNamespacedClient(controllerClient, namespace)
|
||||
|
||||
// Load CA information to use for validating connections to the
|
||||
// repository server, if strict TLS validation was requested.
|
||||
@@ -223,6 +234,7 @@ func NewCommand() *cobra.Command {
|
||||
ApplicationNamespaces: applicationNamespaces,
|
||||
EnableProxyExtension: enableProxyExtension,
|
||||
WebhookParallelism: webhookParallelism,
|
||||
EnableK8sEvent: enableK8sEvent,
|
||||
}
|
||||
|
||||
appsetOpts := server.ApplicationSetOpts{
|
||||
@@ -297,6 +309,7 @@ func NewCommand() *cobra.Command {
|
||||
command.Flags().StringSliceVar(&applicationNamespaces, "application-namespaces", env.StringsFromEnv("ARGOCD_APPLICATION_NAMESPACES", []string{}, ","), "List of additional namespaces where application resources can be managed in")
|
||||
command.Flags().BoolVar(&enableProxyExtension, "enable-proxy-extension", env.ParseBoolFromEnv("ARGOCD_SERVER_ENABLE_PROXY_EXTENSION", false), "Enable Proxy Extension feature")
|
||||
command.Flags().IntVar(&webhookParallelism, "webhook-parallelism-limit", env.ParseNumFromEnv("ARGOCD_SERVER_WEBHOOK_PARALLELISM_LIMIT", 50, 1, 1000), "Number of webhook requests processed concurrently")
|
||||
command.Flags().StringSliceVar(&enableK8sEvent, "enable-k8s-event", env.StringsFromEnv("ARGOCD_ENABLE_K8S_EVENT", argo.DefaultEnableEventList(), ","), "Enable ArgoCD to use k8s event. For disabling all events, set the value as `none`. (e.g --enable-k8s-event=none), For enabling specific events, set the value as `event reason`. (e.g --enable-k8s-event=StatusRefreshed,ResourceCreated)")
|
||||
|
||||
// Flags related to the applicationSet component.
|
||||
command.Flags().StringVar(&scmRootCAPath, "appset-scm-root-ca-path", env.StringFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_SCM_ROOT_CA_PATH", ""), "Provide Root CA Path for self-signed TLS Certificates")
|
||||
|
||||
@@ -1,10 +1,13 @@
|
||||
package admin
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
@@ -83,11 +86,12 @@ func newArgoCDClientsets(config *rest.Config, namespace string) *argoCDClientset
|
||||
dynamicIf, err := dynamic.NewForConfig(config)
|
||||
errors.CheckError(err)
|
||||
return &argoCDClientsets{
|
||||
configMaps: dynamicIf.Resource(configMapResource).Namespace(namespace),
|
||||
secrets: dynamicIf.Resource(secretResource).Namespace(namespace),
|
||||
applications: dynamicIf.Resource(applicationsResource).Namespace(namespace),
|
||||
configMaps: dynamicIf.Resource(configMapResource).Namespace(namespace),
|
||||
secrets: dynamicIf.Resource(secretResource).Namespace(namespace),
|
||||
// To support applications and applicationsets in any namespace we will watch all namespaces and filter them afterwards
|
||||
applications: dynamicIf.Resource(applicationsResource),
|
||||
projects: dynamicIf.Resource(appprojectsResource).Namespace(namespace),
|
||||
applicationSets: dynamicIf.Resource(appplicationSetResource).Namespace(namespace),
|
||||
applicationSets: dynamicIf.Resource(appplicationSetResource),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -186,7 +190,11 @@ func isArgoCDConfigMap(name string) bool {
|
||||
// specsEqual returns if the spec, data, labels, annotations, and finalizers of the two
|
||||
// supplied objects are equal, indicating that no update is necessary during importing
|
||||
func specsEqual(left, right unstructured.Unstructured) bool {
|
||||
if !reflect.DeepEqual(left.GetAnnotations(), right.GetAnnotations()) {
|
||||
leftAnnotation := left.GetAnnotations()
|
||||
rightAnnotation := right.GetAnnotations()
|
||||
delete(leftAnnotation, apiv1.LastAppliedConfigAnnotation)
|
||||
delete(rightAnnotation, apiv1.LastAppliedConfigAnnotation)
|
||||
if !reflect.DeepEqual(leftAnnotation, rightAnnotation) {
|
||||
return false
|
||||
}
|
||||
if !reflect.DeepEqual(left.GetLabels(), right.GetLabels()) {
|
||||
@@ -218,3 +226,52 @@ func specsEqual(left, right unstructured.Unstructured) bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type argocdAdditonalNamespaces struct {
|
||||
applicationNamespaces []string
|
||||
applicationsetNamespaces []string
|
||||
}
|
||||
|
||||
const (
|
||||
applicationsetNamespacesCmdParamsKey = "applicationsetcontroller.namespaces"
|
||||
applicationNamespacesCmdParamsKey = "application.namespaces"
|
||||
)
|
||||
|
||||
// Get additional namespaces from argocd-cmd-params
|
||||
func getAdditionalNamespaces(ctx context.Context, argocdClientsets *argoCDClientsets) *argocdAdditonalNamespaces {
|
||||
applicationNamespaces := make([]string, 0)
|
||||
applicationsetNamespaces := make([]string, 0)
|
||||
|
||||
un, err := argocdClientsets.configMaps.Get(ctx, common.ArgoCDCmdParamsConfigMapName, v1.GetOptions{})
|
||||
errors.CheckError(err)
|
||||
var cm apiv1.ConfigMap
|
||||
err = runtime.DefaultUnstructuredConverter.FromUnstructured(un.Object, &cm)
|
||||
errors.CheckError(err)
|
||||
|
||||
namespacesListFromString := func(namespaces string) []string {
|
||||
listOfNamespaces := []string{}
|
||||
|
||||
ss := strings.Split(namespaces, ",")
|
||||
|
||||
for _, namespace := range ss {
|
||||
if namespace != "" {
|
||||
listOfNamespaces = append(listOfNamespaces, strings.TrimSpace(namespace))
|
||||
}
|
||||
}
|
||||
|
||||
return listOfNamespaces
|
||||
}
|
||||
|
||||
if strNamespaces, ok := cm.Data[applicationNamespacesCmdParamsKey]; ok {
|
||||
applicationNamespaces = namespacesListFromString(strNamespaces)
|
||||
}
|
||||
|
||||
if strNamespaces, ok := cm.Data[applicationsetNamespacesCmdParamsKey]; ok {
|
||||
applicationsetNamespaces = namespacesListFromString(strNamespaces)
|
||||
}
|
||||
|
||||
return &argocdAdditonalNamespaces{
|
||||
applicationNamespaces: applicationNamespaces,
|
||||
applicationsetNamespaces: applicationsetNamespaces,
|
||||
}
|
||||
}
|
||||
|
||||
75
cmd/argocd/commands/admin/admin_test.go
Normal file
75
cmd/argocd/commands/admin/admin_test.go
Normal file
@@ -0,0 +1,75 @@
|
||||
package admin
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
dynfake "k8s.io/client-go/dynamic/fake"
|
||||
)
|
||||
|
||||
func TestGetAdditionalNamespaces(t *testing.T) {
|
||||
createArgoCDCmdCMWithKeys := func(data map[string]interface{}) *unstructured.Unstructured {
|
||||
return &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"apiVersion": "v1",
|
||||
"kind": "ConfigMap",
|
||||
"metadata": map[string]interface{}{
|
||||
"name": "argocd-cmd-params-cm",
|
||||
"namespace": "argocd",
|
||||
},
|
||||
"data": data,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
CmdParamsKeys map[string]interface{}
|
||||
expected argocdAdditonalNamespaces
|
||||
description string
|
||||
}{
|
||||
{
|
||||
description: "empty configmap should return no additional namespaces",
|
||||
CmdParamsKeys: map[string]interface{}{},
|
||||
expected: argocdAdditonalNamespaces{applicationNamespaces: []string{}, applicationsetNamespaces: []string{}},
|
||||
},
|
||||
{
|
||||
description: "empty strings in respective keys in cm shoud return empty namespace list",
|
||||
CmdParamsKeys: map[string]interface{}{applicationsetNamespacesCmdParamsKey: "", applicationNamespacesCmdParamsKey: ""},
|
||||
expected: argocdAdditonalNamespaces{applicationNamespaces: []string{}, applicationsetNamespaces: []string{}},
|
||||
},
|
||||
{
|
||||
description: "when only one of the keys in the cm is set only correct respective list of namespaces should be returned",
|
||||
CmdParamsKeys: map[string]interface{}{applicationNamespacesCmdParamsKey: "foo, bar*"},
|
||||
expected: argocdAdditonalNamespaces{applicationsetNamespaces: []string{}, applicationNamespaces: []string{"foo", "bar*"}},
|
||||
},
|
||||
{
|
||||
description: "when only one of the keys in the cm is set only correct respective list of namespaces should be returned",
|
||||
CmdParamsKeys: map[string]interface{}{applicationsetNamespacesCmdParamsKey: "foo, bar*"},
|
||||
expected: argocdAdditonalNamespaces{applicationNamespaces: []string{}, applicationsetNamespaces: []string{"foo", "bar*"}},
|
||||
},
|
||||
{
|
||||
description: "whitespaces are removed for both multiple and single namespace",
|
||||
CmdParamsKeys: map[string]interface{}{applicationNamespacesCmdParamsKey: " bar ", applicationsetNamespacesCmdParamsKey: " foo , bar* "},
|
||||
expected: argocdAdditonalNamespaces{applicationNamespaces: []string{"bar"}, applicationsetNamespaces: []string{"foo", "bar*"}},
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range testCases {
|
||||
fakeDynClient := dynfake.NewSimpleDynamicClient(runtime.NewScheme(), createArgoCDCmdCMWithKeys(c.CmdParamsKeys))
|
||||
|
||||
argoCDClientsets := &argoCDClientsets{
|
||||
configMaps: fakeDynClient.Resource(configMapResource).Namespace("argocd"),
|
||||
applications: fakeDynClient.Resource(schema.GroupVersionResource{}),
|
||||
applicationSets: fakeDynClient.Resource(schema.GroupVersionResource{}),
|
||||
secrets: fakeDynClient.Resource(schema.GroupVersionResource{}),
|
||||
projects: fakeDynClient.Resource(schema.GroupVersionResource{}),
|
||||
}
|
||||
|
||||
result := getAdditionalNamespaces(context.TODO(), argoCDClientsets)
|
||||
assert.Equal(t, c.expected, *result)
|
||||
}
|
||||
}
|
||||
@@ -387,7 +387,7 @@ func reconcileApplications(
|
||||
return true
|
||||
}, func(r *http.Request) error {
|
||||
return nil
|
||||
}, []string{})
|
||||
}, []string{}, []string{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -20,13 +20,16 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application"
|
||||
"github.com/argoproj/argo-cd/v2/util/cli"
|
||||
"github.com/argoproj/argo-cd/v2/util/errors"
|
||||
secutil "github.com/argoproj/argo-cd/v2/util/security"
|
||||
)
|
||||
|
||||
// NewExportCommand defines a new command for exporting Kubernetes and Argo CD resources.
|
||||
func NewExportCommand() *cobra.Command {
|
||||
var (
|
||||
clientConfig clientcmd.ClientConfig
|
||||
out string
|
||||
clientConfig clientcmd.ClientConfig
|
||||
out string
|
||||
applicationNamespaces []string
|
||||
applicationsetNamespaces []string
|
||||
)
|
||||
command := cobra.Command{
|
||||
Use: "export",
|
||||
@@ -58,34 +61,47 @@ func NewExportCommand() *cobra.Command {
|
||||
acdClients := newArgoCDClientsets(config, namespace)
|
||||
acdConfigMap, err := acdClients.configMaps.Get(ctx, common.ArgoCDConfigMapName, v1.GetOptions{})
|
||||
errors.CheckError(err)
|
||||
export(writer, *acdConfigMap)
|
||||
export(writer, *acdConfigMap, namespace)
|
||||
acdRBACConfigMap, err := acdClients.configMaps.Get(ctx, common.ArgoCDRBACConfigMapName, v1.GetOptions{})
|
||||
errors.CheckError(err)
|
||||
export(writer, *acdRBACConfigMap)
|
||||
export(writer, *acdRBACConfigMap, namespace)
|
||||
acdKnownHostsConfigMap, err := acdClients.configMaps.Get(ctx, common.ArgoCDKnownHostsConfigMapName, v1.GetOptions{})
|
||||
errors.CheckError(err)
|
||||
export(writer, *acdKnownHostsConfigMap)
|
||||
export(writer, *acdKnownHostsConfigMap, namespace)
|
||||
acdTLSCertsConfigMap, err := acdClients.configMaps.Get(ctx, common.ArgoCDTLSCertsConfigMapName, v1.GetOptions{})
|
||||
errors.CheckError(err)
|
||||
export(writer, *acdTLSCertsConfigMap)
|
||||
export(writer, *acdTLSCertsConfigMap, namespace)
|
||||
|
||||
referencedSecrets := getReferencedSecrets(*acdConfigMap)
|
||||
secrets, err := acdClients.secrets.List(ctx, v1.ListOptions{})
|
||||
errors.CheckError(err)
|
||||
for _, secret := range secrets.Items {
|
||||
if isArgoCDSecret(referencedSecrets, secret) {
|
||||
export(writer, secret)
|
||||
export(writer, secret, namespace)
|
||||
}
|
||||
}
|
||||
projects, err := acdClients.projects.List(ctx, v1.ListOptions{})
|
||||
errors.CheckError(err)
|
||||
for _, proj := range projects.Items {
|
||||
export(writer, proj)
|
||||
export(writer, proj, namespace)
|
||||
}
|
||||
|
||||
additionalNamespaces := getAdditionalNamespaces(ctx, acdClients)
|
||||
|
||||
if len(applicationNamespaces) == 0 {
|
||||
applicationNamespaces = additionalNamespaces.applicationNamespaces
|
||||
}
|
||||
if len(applicationsetNamespaces) == 0 {
|
||||
applicationsetNamespaces = additionalNamespaces.applicationsetNamespaces
|
||||
}
|
||||
|
||||
applications, err := acdClients.applications.List(ctx, v1.ListOptions{})
|
||||
errors.CheckError(err)
|
||||
for _, app := range applications.Items {
|
||||
export(writer, app)
|
||||
// Export application only if it is in one of the enabled namespaces
|
||||
if secutil.IsNamespaceEnabled(app.GetNamespace(), namespace, applicationNamespaces) {
|
||||
export(writer, app, namespace)
|
||||
}
|
||||
}
|
||||
applicationSets, err := acdClients.applicationSets.List(ctx, v1.ListOptions{})
|
||||
if err != nil && !apierr.IsNotFound(err) {
|
||||
@@ -97,7 +113,9 @@ func NewExportCommand() *cobra.Command {
|
||||
}
|
||||
if applicationSets != nil {
|
||||
for _, appSet := range applicationSets.Items {
|
||||
export(writer, appSet)
|
||||
if secutil.IsNamespaceEnabled(appSet.GetNamespace(), namespace, applicationsetNamespaces) {
|
||||
export(writer, appSet, namespace)
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -105,18 +123,22 @@ func NewExportCommand() *cobra.Command {
|
||||
|
||||
clientConfig = cli.AddKubectlFlagsToCmd(&command)
|
||||
command.Flags().StringVarP(&out, "out", "o", "-", "Output to the specified file instead of stdout")
|
||||
|
||||
command.Flags().StringSliceVarP(&applicationNamespaces, "application-namespaces", "", []string{}, fmt.Sprintf("Comma separated list of namespace globs to export applications from. If not provided value from '%s' in %s will be used,if it's not defined only applications from Argo CD namespace will be exported", applicationNamespacesCmdParamsKey, common.ArgoCDCmdParamsConfigMapName))
|
||||
command.Flags().StringSliceVarP(&applicationsetNamespaces, "applicationset-namespaces", "", []string{}, fmt.Sprintf("Comma separated list of namespace globs to export applicationsets from. If not provided value from '%s' in %s will be used,if it's not defined only applicationsets from Argo CD namespace will be exported", applicationsetNamespacesCmdParamsKey, common.ArgoCDCmdParamsConfigMapName))
|
||||
return &command
|
||||
}
|
||||
|
||||
// NewImportCommand defines a new command for exporting Kubernetes and Argo CD resources.
|
||||
func NewImportCommand() *cobra.Command {
|
||||
var (
|
||||
clientConfig clientcmd.ClientConfig
|
||||
prune bool
|
||||
dryRun bool
|
||||
verbose bool
|
||||
stopOperation bool
|
||||
clientConfig clientcmd.ClientConfig
|
||||
prune bool
|
||||
dryRun bool
|
||||
verbose bool
|
||||
stopOperation bool
|
||||
ignoreTracking bool
|
||||
applicationNamespaces []string
|
||||
applicationsetNamespaces []string
|
||||
)
|
||||
command := cobra.Command{
|
||||
Use: "import SOURCE",
|
||||
@@ -135,6 +157,8 @@ func NewImportCommand() *cobra.Command {
|
||||
namespace, _, err := clientConfig.Namespace()
|
||||
errors.CheckError(err)
|
||||
acdClients := newArgoCDClientsets(config, namespace)
|
||||
client, err := dynamic.NewForConfig(config)
|
||||
errors.CheckError(err)
|
||||
|
||||
var input []byte
|
||||
if in := args[0]; in == "-" {
|
||||
@@ -148,6 +172,15 @@ func NewImportCommand() *cobra.Command {
|
||||
dryRunMsg = " (dry run)"
|
||||
}
|
||||
|
||||
additionalNamespaces := getAdditionalNamespaces(ctx, acdClients)
|
||||
|
||||
if len(applicationNamespaces) == 0 {
|
||||
applicationNamespaces = additionalNamespaces.applicationNamespaces
|
||||
}
|
||||
if len(applicationsetNamespaces) == 0 {
|
||||
applicationsetNamespaces = additionalNamespaces.applicationsetNamespaces
|
||||
}
|
||||
|
||||
// pruneObjects tracks live objects and it's current resource version. any remaining
|
||||
// items in this map indicates the resource should be pruned since it no longer appears
|
||||
// in the backup
|
||||
@@ -159,7 +192,7 @@ func NewImportCommand() *cobra.Command {
|
||||
var referencedSecrets map[string]bool
|
||||
for _, cm := range configMaps.Items {
|
||||
if isArgoCDConfigMap(cm.GetName()) {
|
||||
pruneObjects[kube.ResourceKey{Group: "", Kind: "ConfigMap", Name: cm.GetName()}] = cm
|
||||
pruneObjects[kube.ResourceKey{Group: "", Kind: "ConfigMap", Name: cm.GetName(), Namespace: cm.GetNamespace()}] = cm
|
||||
}
|
||||
if cm.GetName() == common.ArgoCDConfigMapName {
|
||||
referencedSecrets = getReferencedSecrets(cm)
|
||||
@@ -170,18 +203,20 @@ func NewImportCommand() *cobra.Command {
|
||||
errors.CheckError(err)
|
||||
for _, secret := range secrets.Items {
|
||||
if isArgoCDSecret(referencedSecrets, secret) {
|
||||
pruneObjects[kube.ResourceKey{Group: "", Kind: "Secret", Name: secret.GetName()}] = secret
|
||||
pruneObjects[kube.ResourceKey{Group: "", Kind: "Secret", Name: secret.GetName(), Namespace: secret.GetNamespace()}] = secret
|
||||
}
|
||||
}
|
||||
applications, err := acdClients.applications.List(ctx, v1.ListOptions{})
|
||||
errors.CheckError(err)
|
||||
for _, app := range applications.Items {
|
||||
pruneObjects[kube.ResourceKey{Group: application.Group, Kind: application.ApplicationKind, Name: app.GetName()}] = app
|
||||
if secutil.IsNamespaceEnabled(app.GetNamespace(), namespace, applicationNamespaces) {
|
||||
pruneObjects[kube.ResourceKey{Group: application.Group, Kind: application.ApplicationKind, Name: app.GetName(), Namespace: app.GetNamespace()}] = app
|
||||
}
|
||||
}
|
||||
projects, err := acdClients.projects.List(ctx, v1.ListOptions{})
|
||||
errors.CheckError(err)
|
||||
for _, proj := range projects.Items {
|
||||
pruneObjects[kube.ResourceKey{Group: application.Group, Kind: application.AppProjectKind, Name: proj.GetName()}] = proj
|
||||
pruneObjects[kube.ResourceKey{Group: application.Group, Kind: application.AppProjectKind, Name: proj.GetName(), Namespace: proj.GetNamespace()}] = proj
|
||||
}
|
||||
applicationSets, err := acdClients.applicationSets.List(ctx, v1.ListOptions{})
|
||||
if apierr.IsForbidden(err) || apierr.IsNotFound(err) {
|
||||
@@ -191,7 +226,9 @@ func NewImportCommand() *cobra.Command {
|
||||
}
|
||||
if applicationSets != nil {
|
||||
for _, appSet := range applicationSets.Items {
|
||||
pruneObjects[kube.ResourceKey{Group: application.Group, Kind: application.ApplicationSetKind, Name: appSet.GetName()}] = appSet
|
||||
if secutil.IsNamespaceEnabled(appSet.GetNamespace(), namespace, applicationsetNamespaces) {
|
||||
pruneObjects[kube.ResourceKey{Group: application.Group, Kind: application.ApplicationSetKind, Name: appSet.GetName(), Namespace: appSet.GetNamespace()}] = appSet
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -200,22 +237,41 @@ func NewImportCommand() *cobra.Command {
|
||||
errors.CheckError(err)
|
||||
for _, bakObj := range backupObjects {
|
||||
gvk := bakObj.GroupVersionKind()
|
||||
key := kube.ResourceKey{Group: gvk.Group, Kind: gvk.Kind, Name: bakObj.GetName()}
|
||||
// For objects without namespace, assume they belong in ArgoCD namespace
|
||||
if bakObj.GetNamespace() == "" {
|
||||
bakObj.SetNamespace(namespace)
|
||||
}
|
||||
key := kube.ResourceKey{Group: gvk.Group, Kind: gvk.Kind, Name: bakObj.GetName(), Namespace: bakObj.GetNamespace()}
|
||||
liveObj, exists := pruneObjects[key]
|
||||
delete(pruneObjects, key)
|
||||
var dynClient dynamic.ResourceInterface
|
||||
switch bakObj.GetKind() {
|
||||
case "Secret":
|
||||
dynClient = acdClients.secrets
|
||||
dynClient = client.Resource(secretResource).Namespace(bakObj.GetNamespace())
|
||||
case "ConfigMap":
|
||||
dynClient = acdClients.configMaps
|
||||
dynClient = client.Resource(configMapResource).Namespace(bakObj.GetNamespace())
|
||||
case application.AppProjectKind:
|
||||
dynClient = acdClients.projects
|
||||
dynClient = client.Resource(appprojectsResource).Namespace(bakObj.GetNamespace())
|
||||
case application.ApplicationKind:
|
||||
dynClient = acdClients.applications
|
||||
dynClient = client.Resource(applicationsResource).Namespace(bakObj.GetNamespace())
|
||||
// If application is not in one of the allowed namespaces do not import it
|
||||
if !secutil.IsNamespaceEnabled(bakObj.GetNamespace(), namespace, applicationNamespaces) {
|
||||
continue
|
||||
}
|
||||
case application.ApplicationSetKind:
|
||||
dynClient = acdClients.applicationSets
|
||||
dynClient = client.Resource(appplicationSetResource).Namespace(bakObj.GetNamespace())
|
||||
// If applicationset is not in one of the allowed namespaces do not import it
|
||||
if !secutil.IsNamespaceEnabled(bakObj.GetNamespace(), namespace, applicationsetNamespaces) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// If there is a live object, remove the tracking annotations/label that might conflict
|
||||
// when argo is managed with an application.
|
||||
if ignoreTracking && exists {
|
||||
updateTracking(bakObj, &liveObj)
|
||||
}
|
||||
|
||||
if !exists {
|
||||
isForbidden := false
|
||||
if !dryRun {
|
||||
@@ -228,7 +284,7 @@ func NewImportCommand() *cobra.Command {
|
||||
}
|
||||
}
|
||||
if !isForbidden {
|
||||
fmt.Printf("%s/%s %s created%s\n", gvk.Group, gvk.Kind, bakObj.GetName(), dryRunMsg)
|
||||
fmt.Printf("%s/%s %s in namespace %s created%s\n", gvk.Group, gvk.Kind, bakObj.GetName(), bakObj.GetNamespace(), dryRunMsg)
|
||||
}
|
||||
} else if specsEqual(*bakObj, liveObj) && checkAppHasNoNeedToStopOperation(liveObj, stopOperation) {
|
||||
if verbose {
|
||||
@@ -247,7 +303,7 @@ func NewImportCommand() *cobra.Command {
|
||||
}
|
||||
}
|
||||
if !isForbidden {
|
||||
fmt.Printf("%s/%s %s updated%s\n", gvk.Group, gvk.Kind, bakObj.GetName(), dryRunMsg)
|
||||
fmt.Printf("%s/%s %s in namespace %s updated%s\n", gvk.Group, gvk.Kind, bakObj.GetName(), bakObj.GetNamespace(), dryRunMsg)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -258,11 +314,11 @@ func NewImportCommand() *cobra.Command {
|
||||
var dynClient dynamic.ResourceInterface
|
||||
switch key.Kind {
|
||||
case "Secret":
|
||||
dynClient = acdClients.secrets
|
||||
dynClient = client.Resource(secretResource).Namespace(liveObj.GetNamespace())
|
||||
case application.AppProjectKind:
|
||||
dynClient = acdClients.projects
|
||||
dynClient = client.Resource(appprojectsResource).Namespace(liveObj.GetNamespace())
|
||||
case application.ApplicationKind:
|
||||
dynClient = acdClients.applications
|
||||
dynClient = client.Resource(applicationsResource).Namespace(liveObj.GetNamespace())
|
||||
if !dryRun {
|
||||
if finalizers := liveObj.GetFinalizers(); len(finalizers) > 0 {
|
||||
newLive := liveObj.DeepCopy()
|
||||
@@ -274,7 +330,7 @@ func NewImportCommand() *cobra.Command {
|
||||
}
|
||||
}
|
||||
case application.ApplicationSetKind:
|
||||
dynClient = acdClients.applicationSets
|
||||
dynClient = client.Resource(appplicationSetResource).Namespace(liveObj.GetNamespace())
|
||||
default:
|
||||
log.Fatalf("Unexpected kind '%s' in prune list", key.Kind)
|
||||
}
|
||||
@@ -301,8 +357,11 @@ func NewImportCommand() *cobra.Command {
|
||||
clientConfig = cli.AddKubectlFlagsToCmd(&command)
|
||||
command.Flags().BoolVar(&dryRun, "dry-run", false, "Print what will be performed")
|
||||
command.Flags().BoolVar(&prune, "prune", false, "Prune secrets, applications and projects which do not appear in the backup")
|
||||
command.Flags().BoolVar(&ignoreTracking, "ignore-tracking", false, "Do not update the tracking annotation if the resource is already tracked")
|
||||
command.Flags().BoolVar(&verbose, "verbose", false, "Verbose output (versus only changed output)")
|
||||
command.Flags().BoolVar(&stopOperation, "stop-operation", false, "Stop any existing operations")
|
||||
command.Flags().StringSliceVarP(&applicationNamespaces, "application-namespaces", "", []string{}, fmt.Sprintf("Comma separated list of namespace globs to which import of applications is allowed. If not provided value from '%s' in %s will be used,if it's not defined only applications without an explicit namespace will be imported to the Argo CD namespace", applicationNamespacesCmdParamsKey, common.ArgoCDCmdParamsConfigMapName))
|
||||
command.Flags().StringSliceVarP(&applicationsetNamespaces, "applicationset-namespaces", "", []string{}, fmt.Sprintf("Comma separated list of namespace globs which import of applicationsets is allowed. If not provided value from '%s' in %s will be used,if it's not defined only applicationsets without an explicit namespace will be imported to the Argo CD namespace", applicationsetNamespacesCmdParamsKey, common.ArgoCDCmdParamsConfigMapName))
|
||||
|
||||
return &command
|
||||
}
|
||||
@@ -320,13 +379,14 @@ func checkAppHasNoNeedToStopOperation(liveObj unstructured.Unstructured, stopOpe
|
||||
}
|
||||
|
||||
// export writes the unstructured object and removes extraneous cruft from output before writing
|
||||
func export(w io.Writer, un unstructured.Unstructured) {
|
||||
func export(w io.Writer, un unstructured.Unstructured, argocdNamespace string) {
|
||||
name := un.GetName()
|
||||
finalizers := un.GetFinalizers()
|
||||
apiVersion := un.GetAPIVersion()
|
||||
kind := un.GetKind()
|
||||
labels := un.GetLabels()
|
||||
annotations := un.GetAnnotations()
|
||||
namespace := un.GetNamespace()
|
||||
unstructured.RemoveNestedField(un.Object, "metadata")
|
||||
un.SetName(name)
|
||||
un.SetFinalizers(finalizers)
|
||||
@@ -334,6 +394,9 @@ func export(w io.Writer, un unstructured.Unstructured) {
|
||||
un.SetKind(kind)
|
||||
un.SetLabels(labels)
|
||||
un.SetAnnotations(annotations)
|
||||
if namespace != argocdNamespace {
|
||||
un.SetNamespace(namespace)
|
||||
}
|
||||
data, err := yaml.Marshal(un.Object)
|
||||
errors.CheckError(err)
|
||||
_, err = w.Write(data)
|
||||
@@ -368,3 +431,32 @@ func updateLive(bak, live *unstructured.Unstructured, stopOperation bool) *unstr
|
||||
}
|
||||
return newLive
|
||||
}
|
||||
|
||||
// updateTracking will update the tracking label and annotation in the bak resources to the
|
||||
// value of the live resource.
|
||||
func updateTracking(bak, live *unstructured.Unstructured) {
|
||||
// update the common annotation
|
||||
bakAnnotations := bak.GetAnnotations()
|
||||
liveAnnotations := live.GetAnnotations()
|
||||
if liveAnnotations != nil && bakAnnotations != nil {
|
||||
if v, ok := liveAnnotations[common.AnnotationKeyAppInstance]; ok {
|
||||
if _, ok := bakAnnotations[common.AnnotationKeyAppInstance]; ok {
|
||||
bakAnnotations[common.AnnotationKeyAppInstance] = v
|
||||
bak.SetAnnotations(bakAnnotations)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// update the common label
|
||||
// A custom label can be set, but it is impossible to know which instance is managing the application
|
||||
bakLabels := bak.GetLabels()
|
||||
liveLabels := live.GetLabels()
|
||||
if liveLabels != nil && bakLabels != nil {
|
||||
if v, ok := liveLabels[common.LabelKeyAppInstance]; ok {
|
||||
if _, ok := bakLabels[common.LabelKeyAppInstance]; ok {
|
||||
bakLabels[common.LabelKeyAppInstance] = v
|
||||
bak.SetLabels(bakLabels)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
87
cmd/argocd/commands/admin/backup_test.go
Normal file
87
cmd/argocd/commands/admin/backup_test.go
Normal file
@@ -0,0 +1,87 @@
|
||||
package admin
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
"github.com/stretchr/testify/assert"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/common"
|
||||
)
|
||||
|
||||
func newBackupObject(trackingValue string, trackingLabel bool, trackingAnnotation bool) *unstructured.Unstructured {
|
||||
cm := v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "my-configmap",
|
||||
Namespace: "namespace",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
}
|
||||
if trackingLabel {
|
||||
cm.SetLabels(map[string]string{
|
||||
common.LabelKeyAppInstance: trackingValue,
|
||||
})
|
||||
}
|
||||
if trackingAnnotation {
|
||||
cm.SetAnnotations(map[string]string{
|
||||
common.AnnotationKeyAppInstance: trackingValue,
|
||||
})
|
||||
}
|
||||
return kube.MustToUnstructured(&cm)
|
||||
}
|
||||
|
||||
func Test_updateTracking(t *testing.T) {
|
||||
type args struct {
|
||||
bak *unstructured.Unstructured
|
||||
live *unstructured.Unstructured
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
expected *unstructured.Unstructured
|
||||
}{
|
||||
{
|
||||
name: "update annotation when present in live",
|
||||
args: args{
|
||||
bak: newBackupObject("bak", false, true),
|
||||
live: newBackupObject("live", false, true),
|
||||
},
|
||||
expected: newBackupObject("live", false, true),
|
||||
},
|
||||
{
|
||||
name: "update default label when present in live",
|
||||
args: args{
|
||||
bak: newBackupObject("bak", true, true),
|
||||
live: newBackupObject("live", true, true),
|
||||
},
|
||||
expected: newBackupObject("live", true, true),
|
||||
},
|
||||
{
|
||||
name: "do not update if live object does not have tracking",
|
||||
args: args{
|
||||
bak: newBackupObject("bak", true, true),
|
||||
live: newBackupObject("live", false, false),
|
||||
},
|
||||
expected: newBackupObject("bak", true, true),
|
||||
},
|
||||
{
|
||||
name: "do not update if bak object does not have tracking",
|
||||
args: args{
|
||||
bak: newBackupObject("bak", false, false),
|
||||
live: newBackupObject("live", true, true),
|
||||
},
|
||||
expected: newBackupObject("bak", false, false),
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
updateTracking(tt.args.bak, tt.args.live)
|
||||
assert.Equal(t, tt.expected, tt.args.bak)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -183,13 +183,12 @@ func getControllerReplicas(ctx context.Context, kubeClient *kubernetes.Clientset
|
||||
|
||||
func NewClusterShardsCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
var (
|
||||
shard int
|
||||
replicas int
|
||||
shardingAlgorithm string
|
||||
clientConfig clientcmd.ClientConfig
|
||||
cacheSrc func() (*appstatecache.Cache, error)
|
||||
portForwardRedis bool
|
||||
redisCompressionStr string
|
||||
shard int
|
||||
replicas int
|
||||
shardingAlgorithm string
|
||||
clientConfig clientcmd.ClientConfig
|
||||
cacheSrc func() (*appstatecache.Cache, error)
|
||||
portForwardRedis bool
|
||||
)
|
||||
command := cobra.Command{
|
||||
Use: "shards",
|
||||
@@ -213,7 +212,7 @@ func NewClusterShardsCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comm
|
||||
if replicas == 0 {
|
||||
return
|
||||
}
|
||||
clusters, err := loadClusters(ctx, kubeClient, appClient, replicas, shardingAlgorithm, namespace, portForwardRedis, cacheSrc, shard, clientOpts.RedisName, clientOpts.RedisHaProxyName, redisCompressionStr)
|
||||
clusters, err := loadClusters(ctx, kubeClient, appClient, replicas, shardingAlgorithm, namespace, portForwardRedis, cacheSrc, shard, clientOpts.RedisName, clientOpts.RedisHaProxyName, clientOpts.RedisCompression)
|
||||
errors.CheckError(err)
|
||||
if len(clusters) == 0 {
|
||||
return
|
||||
@@ -234,7 +233,6 @@ func NewClusterShardsCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comm
|
||||
// we can ignore unchecked error here as the command will be parsed again and checked when command.Execute() is run later
|
||||
// nolint:errcheck
|
||||
command.ParseFlags(os.Args[1:])
|
||||
redisCompressionStr, _ = command.Flags().GetString(cacheutil.CLIFlagRedisCompress)
|
||||
return &command
|
||||
}
|
||||
|
||||
@@ -466,13 +464,12 @@ func NewClusterDisableNamespacedMode() *cobra.Command {
|
||||
|
||||
func NewClusterStatsCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
var (
|
||||
shard int
|
||||
replicas int
|
||||
shardingAlgorithm string
|
||||
clientConfig clientcmd.ClientConfig
|
||||
cacheSrc func() (*appstatecache.Cache, error)
|
||||
portForwardRedis bool
|
||||
redisCompressionStr string
|
||||
shard int
|
||||
replicas int
|
||||
shardingAlgorithm string
|
||||
clientConfig clientcmd.ClientConfig
|
||||
cacheSrc func() (*appstatecache.Cache, error)
|
||||
portForwardRedis bool
|
||||
)
|
||||
command := cobra.Command{
|
||||
Use: "stats",
|
||||
@@ -502,7 +499,7 @@ argocd admin cluster stats target-cluster`,
|
||||
replicas, err = getControllerReplicas(ctx, kubeClient, namespace, clientOpts.AppControllerName)
|
||||
errors.CheckError(err)
|
||||
}
|
||||
clusters, err := loadClusters(ctx, kubeClient, appClient, replicas, shardingAlgorithm, namespace, portForwardRedis, cacheSrc, shard, clientOpts.RedisName, clientOpts.RedisHaProxyName, redisCompressionStr)
|
||||
clusters, err := loadClusters(ctx, kubeClient, appClient, replicas, shardingAlgorithm, namespace, portForwardRedis, cacheSrc, shard, clientOpts.RedisName, clientOpts.RedisHaProxyName, clientOpts.RedisCompression)
|
||||
errors.CheckError(err)
|
||||
|
||||
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
|
||||
@@ -524,7 +521,6 @@ argocd admin cluster stats target-cluster`,
|
||||
// we can ignore unchecked error here as the command will be parsed again and checked when command.Execute() is run later
|
||||
// nolint:errcheck
|
||||
command.ParseFlags(os.Args[1:])
|
||||
redisCompressionStr, _ = command.Flags().GetString(cacheutil.CLIFlagRedisCompress)
|
||||
return &command
|
||||
}
|
||||
|
||||
|
||||
@@ -12,17 +12,14 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/cmd/argocd/commands/initialize"
|
||||
"github.com/argoproj/argo-cd/v2/common"
|
||||
argocdclient "github.com/argoproj/argo-cd/v2/pkg/apiclient"
|
||||
"github.com/argoproj/argo-cd/v2/util/cache"
|
||||
"github.com/argoproj/argo-cd/v2/util/env"
|
||||
"github.com/argoproj/argo-cd/v2/util/errors"
|
||||
)
|
||||
|
||||
func NewDashboardCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
var (
|
||||
port int
|
||||
address string
|
||||
compressionStr string
|
||||
clientConfig clientcmd.ClientConfig
|
||||
port int
|
||||
address string
|
||||
clientConfig clientcmd.ClientConfig
|
||||
)
|
||||
cmd := &cobra.Command{
|
||||
Use: "dashboard",
|
||||
@@ -30,10 +27,8 @@ func NewDashboardCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
ctx := cmd.Context()
|
||||
|
||||
compression, err := cache.CompressionTypeFromString(compressionStr)
|
||||
errors.CheckError(err)
|
||||
clientOpts.Core = true
|
||||
errors.CheckError(headless.MaybeStartLocalServer(ctx, clientOpts, initialize.RetrieveContextIfChanged(cmd.Flag("context")), &port, &address, compression, clientConfig))
|
||||
errors.CheckError(headless.MaybeStartLocalServer(ctx, clientOpts, initialize.RetrieveContextIfChanged(cmd.Flag("context")), &port, &address, clientConfig))
|
||||
println(fmt.Sprintf("Argo CD UI is available at http://%s:%d", address, port))
|
||||
<-ctx.Done()
|
||||
},
|
||||
@@ -50,6 +45,5 @@ $ argocd admin dashboard --redis-compress gzip
|
||||
clientConfig = cli.AddKubectlFlagsToSet(cmd.Flags())
|
||||
cmd.Flags().IntVar(&port, "port", common.DefaultPortAPIServer, "Listen on given port")
|
||||
cmd.Flags().StringVar(&address, "address", common.DefaultAddressAdminDashboard, "Listen on given address")
|
||||
cmd.Flags().StringVar(&compressionStr, "redis-compress", env.StringFromEnv("REDIS_COMPRESSION", string(cache.RedisCompressionGZip)), "Enable this if the application controller is configured with redis compression enabled. (possible values: gzip, none)")
|
||||
return cmd
|
||||
}
|
||||
|
||||
@@ -35,7 +35,8 @@ func NewNotificationsCommand() *cobra.Command {
|
||||
"notifications",
|
||||
"argocd admin notifications",
|
||||
applications,
|
||||
settings.GetFactorySettings(argocdService, "argocd-notifications-secret", "argocd-notifications-cm", false), func(clientConfig clientcmd.ClientConfig) {
|
||||
settings.GetFactorySettingsForCLI(&argocdService, "argocd-notifications-secret", "argocd-notifications-cm", false),
|
||||
func(clientConfig clientcmd.ClientConfig) {
|
||||
k8sCfg, err := clientConfig.ClientConfig()
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to parse k8s config: %v", err)
|
||||
|
||||
@@ -159,7 +159,7 @@ func NewSettingsCommand() *cobra.Command {
|
||||
|
||||
command.AddCommand(NewValidateSettingsCommand(&opts))
|
||||
command.AddCommand(NewResourceOverridesCommand(&opts))
|
||||
command.AddCommand(NewRBACCommand())
|
||||
command.AddCommand(NewRBACCommand(&opts))
|
||||
|
||||
opts.clientConfig = cli.AddKubectlFlagsToCmd(command)
|
||||
command.PersistentFlags().StringVar(&opts.argocdCMPath, "argocd-cm-path", "", "Path to local argocd-cm.yaml file")
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/server/rbacpolicy"
|
||||
"github.com/argoproj/argo-cd/v2/util/assets"
|
||||
"github.com/argoproj/argo-cd/v2/util/cli"
|
||||
"github.com/argoproj/argo-cd/v2/util/errors"
|
||||
"github.com/argoproj/argo-cd/v2/util/rbac"
|
||||
)
|
||||
|
||||
@@ -28,7 +29,7 @@ type rbacTrait struct {
|
||||
}
|
||||
|
||||
// Provide a mapping of short-hand resource names to their RBAC counterparts
|
||||
var resourceMap map[string]string = map[string]string{
|
||||
var resourceMap = map[string]string{
|
||||
"account": rbacpolicy.ResourceAccounts,
|
||||
"app": rbacpolicy.ResourceApplications,
|
||||
"apps": rbacpolicy.ResourceApplications,
|
||||
@@ -52,8 +53,17 @@ var resourceMap map[string]string = map[string]string{
|
||||
"repository": rbacpolicy.ResourceRepositories,
|
||||
}
|
||||
|
||||
var projectScoped = map[string]bool{
|
||||
rbacpolicy.ResourceApplications: true,
|
||||
rbacpolicy.ResourceApplicationSets: true,
|
||||
rbacpolicy.ResourceLogs: true,
|
||||
rbacpolicy.ResourceExec: true,
|
||||
rbacpolicy.ResourceClusters: true,
|
||||
rbacpolicy.ResourceRepositories: true,
|
||||
}
|
||||
|
||||
// List of allowed RBAC resources
|
||||
var validRBACResourcesActions map[string]actionTraitMap = map[string]actionTraitMap{
|
||||
var validRBACResourcesActions = map[string]actionTraitMap{
|
||||
rbacpolicy.ResourceAccounts: accountsActions,
|
||||
rbacpolicy.ResourceApplications: applicationsActions,
|
||||
rbacpolicy.ResourceApplicationSets: defaultCRUDActions,
|
||||
@@ -109,7 +119,7 @@ var extensionActions = actionTraitMap{
|
||||
}
|
||||
|
||||
// NewRBACCommand is the command for 'rbac'
|
||||
func NewRBACCommand() *cobra.Command {
|
||||
func NewRBACCommand(cmdCtx commandContext) *cobra.Command {
|
||||
command := &cobra.Command{
|
||||
Use: "rbac",
|
||||
Short: "Validate and test RBAC configuration",
|
||||
@@ -117,13 +127,13 @@ func NewRBACCommand() *cobra.Command {
|
||||
c.HelpFunc()(c, args)
|
||||
},
|
||||
}
|
||||
command.AddCommand(NewRBACCanCommand())
|
||||
command.AddCommand(NewRBACCanCommand(cmdCtx))
|
||||
command.AddCommand(NewRBACValidateCommand())
|
||||
return command
|
||||
}
|
||||
|
||||
// NewRBACCanCommand is the command for 'rbac can-role'
|
||||
func NewRBACCanCommand() *cobra.Command {
|
||||
// NewRBACCanCommand is the command for 'rbac can'
|
||||
func NewRBACCanCommand(cmdCtx commandContext) *cobra.Command {
|
||||
var (
|
||||
policyFile string
|
||||
defaultRole string
|
||||
@@ -175,11 +185,6 @@ argocd admin settings rbac can someuser create application 'default/app' --defau
|
||||
subResource = args[3]
|
||||
}
|
||||
|
||||
userPolicy := ""
|
||||
builtinPolicy := ""
|
||||
|
||||
var newDefaultRole string
|
||||
|
||||
namespace, nsOverride, err := clientConfig.Namespace()
|
||||
if err != nil {
|
||||
log.Fatalf("could not create k8s client: %v", err)
|
||||
@@ -203,6 +208,7 @@ argocd admin settings rbac can someuser create application 'default/app' --defau
|
||||
userPolicy, newDefaultRole, matchMode := getPolicy(ctx, policyFile, realClientset, namespace)
|
||||
|
||||
// Use built-in policy as augmentation if requested
|
||||
builtinPolicy := ""
|
||||
if useBuiltin {
|
||||
builtinPolicy = assets.BuiltinPolicyCSV
|
||||
}
|
||||
@@ -213,7 +219,30 @@ argocd admin settings rbac can someuser create application 'default/app' --defau
|
||||
defaultRole = newDefaultRole
|
||||
}
|
||||
|
||||
res := checkPolicy(subject, action, resource, subResource, builtinPolicy, userPolicy, defaultRole, matchMode, strict)
|
||||
// Logs RBAC will be enforced only if an internal var serverRBACLogEnforceEnable
|
||||
// (representing server.rbac.log.enforce.enable env var in argocd-cm)
|
||||
// is defined and has a "true" value
|
||||
// Otherwise, no RBAC enforcement for logs will take place (meaning, 'can' request on a logs resource will result in "yes",
|
||||
// even if there is no explicit RBAC allow, or if there is an explicit RBAC deny)
|
||||
var isLogRbacEnforced func() bool
|
||||
if nsOverride && policyFile == "" {
|
||||
if resolveRBACResourceName(resource) == rbacpolicy.ResourceLogs {
|
||||
isLogRbacEnforced = func() bool {
|
||||
if opts, ok := cmdCtx.(*settingsOpts); ok {
|
||||
opts.loadClusterSettings = true
|
||||
opts.clientConfig = clientConfig
|
||||
settingsMgr, err := opts.createSettingsManager(ctx)
|
||||
errors.CheckError(err)
|
||||
logEnforceEnable, err := settingsMgr.GetServerRBACLogEnforceEnable()
|
||||
errors.CheckError(err)
|
||||
return logEnforceEnable
|
||||
}
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
res := checkPolicy(subject, action, resource, subResource, builtinPolicy, userPolicy, defaultRole, matchMode, strict, isLogRbacEnforced)
|
||||
|
||||
if res {
|
||||
if !quiet {
|
||||
fmt.Println("Yes")
|
||||
@@ -359,20 +388,16 @@ func getPolicyFromFile(policyFile string) (string, string, string, error) {
|
||||
// Retrieve policy information from a ConfigMap
|
||||
func getPolicyFromConfigMap(cm *corev1.ConfigMap) (string, string, string) {
|
||||
var (
|
||||
userPolicy string
|
||||
defaultRole string
|
||||
ok bool
|
||||
)
|
||||
userPolicy, ok = cm.Data[rbac.ConfigMapPolicyCSVKey]
|
||||
if !ok {
|
||||
userPolicy = ""
|
||||
}
|
||||
|
||||
defaultRole, ok = cm.Data[rbac.ConfigMapPolicyDefaultKey]
|
||||
if !ok {
|
||||
defaultRole = ""
|
||||
}
|
||||
|
||||
return userPolicy, defaultRole, cm.Data[rbac.ConfigMapMatchModeKey]
|
||||
return rbac.PolicyCSV(cm.Data), defaultRole, cm.Data[rbac.ConfigMapMatchModeKey]
|
||||
}
|
||||
|
||||
// getPolicyConfigMap fetches the RBAC config map from K8s cluster
|
||||
@@ -386,7 +411,7 @@ func getPolicyConfigMap(ctx context.Context, client kubernetes.Interface, namesp
|
||||
|
||||
// checkPolicy checks whether given subject is allowed to execute specified
|
||||
// action against specified resource
|
||||
func checkPolicy(subject, action, resource, subResource, builtinPolicy, userPolicy, defaultRole, matchMode string, strict bool) bool {
|
||||
func checkPolicy(subject, action, resource, subResource, builtinPolicy, userPolicy, defaultRole, matchMode string, strict bool, isLogRbacEnforced func() bool) bool {
|
||||
enf := rbac.NewEnforcer(nil, "argocd", "argocd-rbac-cm", nil)
|
||||
enf.SetDefaultRole(defaultRole)
|
||||
enf.SetMatchMode(matchMode)
|
||||
@@ -420,15 +445,19 @@ func checkPolicy(subject, action, resource, subResource, builtinPolicy, userPoli
|
||||
}
|
||||
}
|
||||
|
||||
// Application resources have a special notation - for simplicity's sake,
|
||||
// Some project scoped resources have a special notation - for simplicity's sake,
|
||||
// if user gives no sub-resource (or specifies simple '*'), we construct
|
||||
// the required notation by setting subresource to '*/*'.
|
||||
if realResource == rbacpolicy.ResourceApplications {
|
||||
if projectScoped[realResource] {
|
||||
if subResource == "*" || subResource == "" {
|
||||
subResource = "*/*"
|
||||
}
|
||||
}
|
||||
|
||||
if realResource == rbacpolicy.ResourceLogs {
|
||||
if isLogRbacEnforced != nil && !isLogRbacEnforced() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return enf.Enforce(subject, realResource, action, subResource)
|
||||
}
|
||||
|
||||
|
||||
@@ -130,6 +130,16 @@ func Test_PolicyFromYAML(t *testing.T) {
|
||||
require.NotEmpty(t, uPol)
|
||||
require.Equal(t, "role:unknown", dRole)
|
||||
require.Empty(t, matchMode)
|
||||
require.True(t, checkPolicy("my-org:team-qa", "update", "project", "foo",
|
||||
"", uPol, dRole, matchMode, true, nil))
|
||||
}
|
||||
|
||||
func trueLogRbacEnforce() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func falseLogRbacEnforce() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func Test_PolicyFromK8s(t *testing.T) {
|
||||
@@ -153,43 +163,105 @@ func Test_PolicyFromK8s(t *testing.T) {
|
||||
require.Equal(t, "", matchMode)
|
||||
|
||||
t.Run("get applications", func(t *testing.T) {
|
||||
ok := checkPolicy("role:user", "get", "applications", "*/*", assets.BuiltinPolicyCSV, uPol, dRole, "", true)
|
||||
ok := checkPolicy("role:user", "get", "applications", "*/*", assets.BuiltinPolicyCSV, uPol, dRole, "", true, nil)
|
||||
require.True(t, ok)
|
||||
})
|
||||
t.Run("get clusters", func(t *testing.T) {
|
||||
ok := checkPolicy("role:user", "get", "clusters", "*", assets.BuiltinPolicyCSV, uPol, dRole, "", true)
|
||||
ok := checkPolicy("role:user", "get", "clusters", "*", assets.BuiltinPolicyCSV, uPol, dRole, "", true, nil)
|
||||
require.True(t, ok)
|
||||
})
|
||||
t.Run("get certificates", func(t *testing.T) {
|
||||
ok := checkPolicy("role:user", "get", "certificates", "*", assets.BuiltinPolicyCSV, uPol, dRole, "", true)
|
||||
ok := checkPolicy("role:user", "get", "certificates", "*", assets.BuiltinPolicyCSV, uPol, dRole, "", true, nil)
|
||||
require.False(t, ok)
|
||||
})
|
||||
t.Run("get certificates by default role", func(t *testing.T) {
|
||||
ok := checkPolicy("role:user", "get", "certificates", "*", assets.BuiltinPolicyCSV, uPol, "role:readonly", "glob", true)
|
||||
ok := checkPolicy("role:user", "get", "certificates", "*", assets.BuiltinPolicyCSV, uPol, "role:readonly", "glob", true, nil)
|
||||
require.True(t, ok)
|
||||
})
|
||||
t.Run("get certificates by default role without builtin policy", func(t *testing.T) {
|
||||
ok := checkPolicy("role:user", "get", "certificates", "*", "", uPol, "role:readonly", "glob", true)
|
||||
ok := checkPolicy("role:user", "get", "certificates", "*", "", uPol, "role:readonly", "glob", true, nil)
|
||||
require.False(t, ok)
|
||||
})
|
||||
t.Run("use regex match mode instead of glob", func(t *testing.T) {
|
||||
ok := checkPolicy("role:user", "get", "certificates", ".*", assets.BuiltinPolicyCSV, uPol, "role:readonly", "regex", true)
|
||||
ok := checkPolicy("role:user", "get", "certificates", ".*", assets.BuiltinPolicyCSV, uPol, "role:readonly", "regex", true, nil)
|
||||
require.False(t, ok)
|
||||
})
|
||||
t.Run("get logs", func(t *testing.T) {
|
||||
ok := checkPolicy("role:test", "get", "logs", "*/*", assets.BuiltinPolicyCSV, uPol, dRole, "", true)
|
||||
ok := checkPolicy("role:test", "get", "logs", "*/*", assets.BuiltinPolicyCSV, uPol, dRole, "", true, nil)
|
||||
require.True(t, ok)
|
||||
})
|
||||
// no function is provided to check if logs rbac is enforced or not, so the policy permissions are queried to determine if no-such-user can get logs
|
||||
t.Run("no-such-user get logs", func(t *testing.T) {
|
||||
ok := checkPolicy("no-such-user", "get", "logs", "*/*", assets.BuiltinPolicyCSV, uPol, dRole, "", true, nil)
|
||||
require.False(t, ok)
|
||||
})
|
||||
// logs rbac policy is enforced, and no-such-user is not granted logs permission in user policy, so the result should be false (cannot get logs)
|
||||
t.Run("no-such-user get logs rbac enforced", func(t *testing.T) {
|
||||
ok := checkPolicy("no-such-user", "get", "logs", "*/*", assets.BuiltinPolicyCSV, uPol, dRole, "", true, trueLogRbacEnforce)
|
||||
require.False(t, ok)
|
||||
})
|
||||
// no-such-user is not granted logs permission in user policy, but logs rbac policy is not enforced, so logs permission is open to all
|
||||
t.Run("no-such-user get logs rbac not enforced", func(t *testing.T) {
|
||||
ok := checkPolicy("no-such-user", "get", "logs", "*/*", assets.BuiltinPolicyCSV, uPol, dRole, "", true, falseLogRbacEnforce)
|
||||
require.True(t, ok)
|
||||
})
|
||||
// no function is provided to check if logs rbac is enforced or not, so the policy permissions are queried to determine if log-deny-user can get logs
|
||||
t.Run("log-deny-user get logs", func(t *testing.T) {
|
||||
ok := checkPolicy("log-deny-user", "get", "logs", "*/*", assets.BuiltinPolicyCSV, uPol, dRole, "", true, nil)
|
||||
require.False(t, ok)
|
||||
})
|
||||
// logs rbac policy is enforced, and log-deny-user is denied logs permission in user policy, so the result should be false (cannot get logs)
|
||||
t.Run("log-deny-user get logs rbac enforced", func(t *testing.T) {
|
||||
ok := checkPolicy("log-deny-user", "get", "logs", "*/*", assets.BuiltinPolicyCSV, uPol, dRole, "", true, trueLogRbacEnforce)
|
||||
require.False(t, ok)
|
||||
})
|
||||
// log-deny-user is denied logs permission in user policy, but logs rbac policy is not enforced, so logs permission is open to all
|
||||
t.Run("log-deny-user get logs rbac not enforced", func(t *testing.T) {
|
||||
ok := checkPolicy("log-deny-user", "get", "logs", "*/*", assets.BuiltinPolicyCSV, uPol, dRole, "", true, falseLogRbacEnforce)
|
||||
require.True(t, ok)
|
||||
})
|
||||
// no function is provided to check if logs rbac is enforced or not, so the policy permissions are queried to determine if log-allow-user can get logs
|
||||
t.Run("log-allow-user get logs", func(t *testing.T) {
|
||||
ok := checkPolicy("log-allow-user", "get", "logs", "*/*", assets.BuiltinPolicyCSV, uPol, dRole, "", true, nil)
|
||||
require.True(t, ok)
|
||||
})
|
||||
// logs rbac policy is enforced, and log-allow-user is granted logs permission in user policy, so the result should be true (can get logs)
|
||||
t.Run("log-allow-user get logs rbac enforced", func(t *testing.T) {
|
||||
ok := checkPolicy("log-allow-user", "get", "logs", "*/*", assets.BuiltinPolicyCSV, uPol, dRole, "", true, trueLogRbacEnforce)
|
||||
require.True(t, ok)
|
||||
})
|
||||
// log-allow-user is granted logs permission in user policy, and logs rbac policy is not enforced, so logs permission is open to all
|
||||
t.Run("log-allow-user get logs rbac not enforced", func(t *testing.T) {
|
||||
ok := checkPolicy("log-allow-user", "get", "logs", "*/*", assets.BuiltinPolicyCSV, uPol, dRole, "", true, falseLogRbacEnforce)
|
||||
require.True(t, ok)
|
||||
})
|
||||
t.Run("get logs", func(t *testing.T) {
|
||||
ok := checkPolicy("role:test", "get", "logs", "*", assets.BuiltinPolicyCSV, uPol, dRole, "", true, nil)
|
||||
require.True(t, ok)
|
||||
})
|
||||
t.Run("get logs", func(t *testing.T) {
|
||||
ok := checkPolicy("role:test", "get", "logs", "", assets.BuiltinPolicyCSV, uPol, dRole, "", true, nil)
|
||||
require.True(t, ok)
|
||||
})
|
||||
t.Run("create exec", func(t *testing.T) {
|
||||
ok := checkPolicy("role:test", "create", "exec", "*/*", assets.BuiltinPolicyCSV, uPol, dRole, "", true)
|
||||
ok := checkPolicy("role:test", "create", "exec", "*/*", assets.BuiltinPolicyCSV, uPol, dRole, "", true, nil)
|
||||
require.True(t, ok)
|
||||
})
|
||||
t.Run("create applicationsets", func(t *testing.T) {
|
||||
ok := checkPolicy("role:user", "create", "applicationsets", "*/*", assets.BuiltinPolicyCSV, uPol, dRole, "", true)
|
||||
ok := checkPolicy("role:user", "create", "applicationsets", "*/*", assets.BuiltinPolicyCSV, uPol, dRole, "", true, nil)
|
||||
require.True(t, ok)
|
||||
})
|
||||
// trueLogRbacEnforce or falseLogRbacEnforce should not affect non-logs resources
|
||||
t.Run("create applicationsets with trueLogRbacEnforce", func(t *testing.T) {
|
||||
ok := checkPolicy("role:user", "create", "applicationsets", "*/*", assets.BuiltinPolicyCSV, uPol, dRole, "", true, trueLogRbacEnforce)
|
||||
require.True(t, ok)
|
||||
})
|
||||
t.Run("create applicationsets with falseLogRbacEnforce", func(t *testing.T) {
|
||||
ok := checkPolicy("role:user", "create", "applicationsets", "*/*", assets.BuiltinPolicyCSV, uPol, dRole, "", true, trueLogRbacEnforce)
|
||||
require.True(t, ok)
|
||||
})
|
||||
t.Run("delete applicationsets", func(t *testing.T) {
|
||||
ok := checkPolicy("role:user", "delete", "applicationsets", "*/*", assets.BuiltinPolicyCSV, uPol, dRole, "", true)
|
||||
ok := checkPolicy("role:user", "delete", "applicationsets", "*/*", assets.BuiltinPolicyCSV, uPol, dRole, "", true, nil)
|
||||
require.True(t, ok)
|
||||
})
|
||||
}
|
||||
@@ -229,49 +301,49 @@ p, role:readonly, certificates, get, .*, allow
|
||||
p, role:, certificates, get, .*, allow`
|
||||
|
||||
t.Run("get applications", func(t *testing.T) {
|
||||
ok := checkPolicy("role:user", "get", "applications", ".*/.*", builtInPolicy, uPol, dRole, "regex", true)
|
||||
ok := checkPolicy("role:user", "get", "applications", ".*/.*", builtInPolicy, uPol, dRole, "regex", true, nil)
|
||||
require.True(t, ok)
|
||||
})
|
||||
t.Run("get clusters", func(t *testing.T) {
|
||||
ok := checkPolicy("role:user", "get", "clusters", ".*", builtInPolicy, uPol, dRole, "regex", true)
|
||||
ok := checkPolicy("role:user", "get", "clusters", ".*", builtInPolicy, uPol, dRole, "regex", true, nil)
|
||||
require.True(t, ok)
|
||||
})
|
||||
t.Run("get certificates", func(t *testing.T) {
|
||||
ok := checkPolicy("role:user", "get", "certificates", ".*", builtInPolicy, uPol, dRole, "regex", true)
|
||||
ok := checkPolicy("role:user", "get", "certificates", ".*", builtInPolicy, uPol, dRole, "regex", true, nil)
|
||||
require.False(t, ok)
|
||||
})
|
||||
t.Run("get certificates by default role", func(t *testing.T) {
|
||||
ok := checkPolicy("role:user", "get", "certificates", ".*", builtInPolicy, uPol, "role:readonly", "regex", true)
|
||||
ok := checkPolicy("role:user", "get", "certificates", ".*", builtInPolicy, uPol, "role:readonly", "regex", true, nil)
|
||||
require.True(t, ok)
|
||||
})
|
||||
t.Run("get certificates by default role without builtin policy", func(t *testing.T) {
|
||||
ok := checkPolicy("role:user", "get", "certificates", ".*", "", uPol, "role:readonly", "regex", true)
|
||||
ok := checkPolicy("role:user", "get", "certificates", ".*", "", uPol, "role:readonly", "regex", true, nil)
|
||||
require.False(t, ok)
|
||||
})
|
||||
t.Run("use glob match mode instead of regex", func(t *testing.T) {
|
||||
ok := checkPolicy("role:user", "get", "certificates", ".+", builtInPolicy, uPol, dRole, "glob", true)
|
||||
ok := checkPolicy("role:user", "get", "certificates", ".+", builtInPolicy, uPol, dRole, "glob", true, nil)
|
||||
require.False(t, ok)
|
||||
})
|
||||
t.Run("get logs via glob match mode", func(t *testing.T) {
|
||||
ok := checkPolicy("role:user", "get", "logs", ".*/.*", builtInPolicy, uPol, dRole, "glob", true)
|
||||
ok := checkPolicy("role:user", "get", "logs", ".*/.*", builtInPolicy, uPol, dRole, "glob", true, nil)
|
||||
require.True(t, ok)
|
||||
})
|
||||
t.Run("create exec", func(t *testing.T) {
|
||||
ok := checkPolicy("role:user", "create", "exec", ".*/.*", builtInPolicy, uPol, dRole, "regex", true)
|
||||
ok := checkPolicy("role:user", "create", "exec", ".*/.*", builtInPolicy, uPol, dRole, "regex", true, nil)
|
||||
require.True(t, ok)
|
||||
})
|
||||
t.Run("create applicationsets", func(t *testing.T) {
|
||||
ok := checkPolicy("role:user", "create", "applicationsets", ".*/.*", builtInPolicy, uPol, dRole, "regex", true)
|
||||
ok := checkPolicy("role:user", "create", "applicationsets", ".*/.*", builtInPolicy, uPol, dRole, "regex", true, nil)
|
||||
require.True(t, ok)
|
||||
})
|
||||
t.Run("delete applicationsets", func(t *testing.T) {
|
||||
ok := checkPolicy("role:user", "delete", "applicationsets", ".*/.*", builtInPolicy, uPol, dRole, "regex", true)
|
||||
ok := checkPolicy("role:user", "delete", "applicationsets", ".*/.*", builtInPolicy, uPol, dRole, "regex", true, nil)
|
||||
require.True(t, ok)
|
||||
})
|
||||
}
|
||||
|
||||
func TestNewRBACCanCommand(t *testing.T) {
|
||||
command := NewRBACCanCommand()
|
||||
command := NewRBACCanCommand(&settingsOpts{})
|
||||
|
||||
require.NotNil(t, command)
|
||||
assert.Equal(t, "can", command.Name())
|
||||
|
||||
@@ -12,6 +12,10 @@ data:
|
||||
p, role:user, applicationsets, delete, */*, allow
|
||||
p, role:user, logs, get, */*, allow
|
||||
g, test, role:user
|
||||
policy.overlay.csv: |
|
||||
p, role:tester, applications, *, */*, allow
|
||||
p, role:tester, projects, *, *, allow
|
||||
g, my-org:team-qa, role:tester
|
||||
policy.default: role:unknown
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
|
||||
@@ -10,4 +10,6 @@ p, role:user, applicationsets, delete, */*, allow
|
||||
p, role:test, certificates, get, *, allow
|
||||
p, role:test, logs, get, */*, allow
|
||||
p, role:test, exec, create, */*, allow
|
||||
p, log-allow-user, logs, get, */*, allow
|
||||
p, log-deny-user, logs, get, */*, deny
|
||||
g, test, role:user
|
||||
|
||||
|
@@ -294,7 +294,7 @@ func parentChildDetails(appIf application.ApplicationServiceClient, ctx context.
|
||||
return mapUidToNode, mapParentToChild, parentNode
|
||||
}
|
||||
|
||||
func printHeader(acdClient argocdclient.Client, app *argoappv1.Application, ctx context.Context, windows *argoappv1.SyncWindows, showOperation bool, showParams bool) {
|
||||
func printHeader(acdClient argocdclient.Client, app *argoappv1.Application, ctx context.Context, windows *argoappv1.SyncWindows, showOperation bool, showParams bool, sourcePosition int) {
|
||||
aURL := appURL(ctx, acdClient, app.Name)
|
||||
printAppSummaryTable(app, aURL, windows)
|
||||
|
||||
@@ -309,20 +309,21 @@ func printHeader(acdClient argocdclient.Client, app *argoappv1.Application, ctx
|
||||
fmt.Println()
|
||||
printOperationResult(app.Status.OperationState)
|
||||
}
|
||||
if !app.Spec.HasMultipleSources() && showParams {
|
||||
printParams(app)
|
||||
if showParams {
|
||||
printParams(app, sourcePosition)
|
||||
}
|
||||
}
|
||||
|
||||
// NewApplicationGetCommand returns a new instance of an `argocd app get` command
|
||||
func NewApplicationGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
var (
|
||||
refresh bool
|
||||
hardRefresh bool
|
||||
output string
|
||||
showParams bool
|
||||
showOperation bool
|
||||
appNamespace string
|
||||
refresh bool
|
||||
hardRefresh bool
|
||||
output string
|
||||
showParams bool
|
||||
showOperation bool
|
||||
appNamespace string
|
||||
sourcePosition int
|
||||
)
|
||||
command := &cobra.Command{
|
||||
Use: "get APPNAME",
|
||||
@@ -343,6 +344,9 @@ func NewApplicationGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Com
|
||||
# Show application parameters and overrides
|
||||
argocd app get my-app --show-params
|
||||
|
||||
# Show application parameters and overrides for a source at position 1 under spec.sources of app my-app
|
||||
argocd app get my-app --show-params --source-position 1
|
||||
|
||||
# Refresh application data when retrieving
|
||||
argocd app get my-app --refresh
|
||||
|
||||
@@ -373,9 +377,18 @@ func NewApplicationGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Com
|
||||
Refresh: getRefreshType(refresh, hardRefresh),
|
||||
AppNamespace: &appNs,
|
||||
})
|
||||
|
||||
errors.CheckError(err)
|
||||
|
||||
// check for source position if --show-params is set
|
||||
if app.Spec.HasMultipleSources() && showParams {
|
||||
if sourcePosition <= 0 {
|
||||
errors.CheckError(fmt.Errorf("Source position should be specified and must be greater than 0 for applications with multiple sources"))
|
||||
}
|
||||
if len(app.Spec.GetSources()) < sourcePosition {
|
||||
errors.CheckError(fmt.Errorf("Source position should be less than the number of sources in the application"))
|
||||
}
|
||||
}
|
||||
|
||||
pConn, projIf := headless.NewClientOrDie(clientOpts, c).NewProjectClientOrDie()
|
||||
defer argoio.Close(pConn)
|
||||
proj, err := projIf.Get(ctx, &projectpkg.ProjectQuery{Name: app.Spec.Project})
|
||||
@@ -388,7 +401,7 @@ func NewApplicationGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Com
|
||||
err := PrintResource(app, output)
|
||||
errors.CheckError(err)
|
||||
case "wide", "":
|
||||
printHeader(acdClient, app, ctx, windows, showOperation, showParams)
|
||||
printHeader(acdClient, app, ctx, windows, showOperation, showParams, sourcePosition)
|
||||
if len(app.Status.Resources) > 0 {
|
||||
fmt.Println()
|
||||
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
|
||||
@@ -396,14 +409,14 @@ func NewApplicationGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Com
|
||||
_ = w.Flush()
|
||||
}
|
||||
case "tree":
|
||||
printHeader(acdClient, app, ctx, windows, showOperation, showParams)
|
||||
printHeader(acdClient, app, ctx, windows, showOperation, showParams, sourcePosition)
|
||||
mapUidToNode, mapParentToChild, parentNode, mapNodeNameToResourceState := resourceParentChild(ctx, acdClient, appName, appNs)
|
||||
if len(mapUidToNode) > 0 {
|
||||
fmt.Println()
|
||||
printTreeView(mapUidToNode, mapParentToChild, parentNode, mapNodeNameToResourceState)
|
||||
}
|
||||
case "tree=detailed":
|
||||
printHeader(acdClient, app, ctx, windows, showOperation, showParams)
|
||||
printHeader(acdClient, app, ctx, windows, showOperation, showParams, sourcePosition)
|
||||
mapUidToNode, mapParentToChild, parentNode, mapNodeNameToResourceState := resourceParentChild(ctx, acdClient, appName, appNs)
|
||||
if len(mapUidToNode) > 0 {
|
||||
fmt.Println()
|
||||
@@ -420,6 +433,7 @@ func NewApplicationGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Com
|
||||
command.Flags().BoolVar(&refresh, "refresh", false, "Refresh application data when retrieving")
|
||||
command.Flags().BoolVar(&hardRefresh, "hard-refresh", false, "Refresh application data as well as target manifests cache")
|
||||
command.Flags().StringVarP(&appNamespace, "app-namespace", "N", "", "Only get application from namespace")
|
||||
command.Flags().IntVar(&sourcePosition, "source-position", -1, "Position of the source from the list of sources of the app. Counting starts at 1.")
|
||||
return command
|
||||
}
|
||||
|
||||
@@ -572,8 +586,8 @@ func printAppSummaryTable(app *argoappv1.Application, appURL string, windows *ar
|
||||
var status string
|
||||
var allow, deny, inactiveAllows bool
|
||||
if windows.HasWindows() {
|
||||
active := windows.Active()
|
||||
if active.HasWindows() {
|
||||
active, err := windows.Active()
|
||||
if err == nil && active.HasWindows() {
|
||||
for _, w := range *active {
|
||||
if w.Kind == "deny" {
|
||||
deny = true
|
||||
@@ -582,13 +596,14 @@ func printAppSummaryTable(app *argoappv1.Application, appURL string, windows *ar
|
||||
}
|
||||
}
|
||||
}
|
||||
if windows.InactiveAllows().HasWindows() {
|
||||
inactiveAllowWindows, err := windows.InactiveAllows()
|
||||
if err == nil && inactiveAllowWindows.HasWindows() {
|
||||
inactiveAllows = true
|
||||
}
|
||||
|
||||
s := windows.CanSync(true)
|
||||
if deny || !deny && !allow && inactiveAllows {
|
||||
if s {
|
||||
s, err := windows.CanSync(true)
|
||||
if err == nil && s {
|
||||
status = "Manual Allowed"
|
||||
} else {
|
||||
status = "Sync Denied"
|
||||
@@ -701,9 +716,22 @@ func truncateString(str string, num int) string {
|
||||
}
|
||||
|
||||
// printParams prints parameters and overrides
|
||||
func printParams(app *argoappv1.Application) {
|
||||
if app.Spec.GetSource().Helm != nil {
|
||||
printHelmParams(app.Spec.GetSource().Helm)
|
||||
func printParams(app *argoappv1.Application, sourcePosition int) {
|
||||
var source *argoappv1.ApplicationSource
|
||||
|
||||
if app.Spec.HasMultipleSources() {
|
||||
// Get the source by the sourcePosition whose params you'd like to print
|
||||
source = app.Spec.GetSourcePtrByPosition(sourcePosition)
|
||||
if source == nil {
|
||||
source = &argoappv1.ApplicationSource{}
|
||||
}
|
||||
} else {
|
||||
src := app.Spec.GetSource()
|
||||
source = &src
|
||||
}
|
||||
|
||||
if source.Helm != nil {
|
||||
printHelmParams(source.Helm)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -793,9 +821,9 @@ func NewApplicationSetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Com
|
||||
errors.CheckError(err)
|
||||
},
|
||||
}
|
||||
command.Flags().IntVar(&sourcePosition, "source-position", -1, "Position of the source from the list of sources of the app. Counting starts at 1.")
|
||||
cmdutil.AddAppFlags(command, &appOpts)
|
||||
command.Flags().StringVarP(&appNamespace, "app-namespace", "N", "", "Set application parameters in namespace")
|
||||
command.Flags().IntVar(&sourcePosition, "source-position", -1, "Position of the source from the list of sources of the app. Counting starts at 1.")
|
||||
return command
|
||||
}
|
||||
|
||||
@@ -1255,7 +1283,7 @@ func findandPrintDiff(ctx context.Context, app *argoappv1.Application, proj *arg
|
||||
if diffOptions.local != "" {
|
||||
localObjs := groupObjsByKey(getLocalObjects(ctx, app, proj, diffOptions.local, diffOptions.localRepoRoot, argoSettings.AppLabelKey, diffOptions.cluster.Info.ServerVersion, diffOptions.cluster.Info.APIVersions, argoSettings.KustomizeOptions, argoSettings.TrackingMethod), liveObjs, app.Spec.Destination.Namespace)
|
||||
items = groupObjsForDiff(resources, localObjs, items, argoSettings, app.InstanceName(argoSettings.ControllerNamespace), app.Spec.Destination.Namespace)
|
||||
} else if diffOptions.revision != "" || (diffOptions.revisions != nil && len(diffOptions.revisions) > 0) {
|
||||
} else if diffOptions.revision != "" || len(diffOptions.revisions) > 0 {
|
||||
var unstructureds []*unstructured.Unstructured
|
||||
for _, mfst := range diffOptions.res.Manifests {
|
||||
obj, err := argoappv1.UnmarshalToUnstructured(mfst)
|
||||
@@ -1348,7 +1376,7 @@ func groupObjsForDiff(resources *application.ManagedResourcesResponse, objs map[
|
||||
}
|
||||
if local, ok := objs[key]; ok || live != nil {
|
||||
if local != nil && !kube.IsCRD(local) {
|
||||
err = resourceTracking.SetAppInstance(local, argoSettings.AppLabelKey, appName, namespace, argoappv1.TrackingMethod(argoSettings.GetTrackingMethod()))
|
||||
err = resourceTracking.SetAppInstance(local, argoSettings.AppLabelKey, appName, namespace, argoappv1.TrackingMethod(argoSettings.GetTrackingMethod()), argoSettings.GetInstallationID())
|
||||
errors.CheckError(err)
|
||||
}
|
||||
|
||||
@@ -1906,7 +1934,7 @@ func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
if len(projects) != 0 {
|
||||
errMsg += fmt.Sprintf(" projects %v", projects)
|
||||
}
|
||||
log.Fatalf(errMsg)
|
||||
log.Fatal(errMsg)
|
||||
}
|
||||
|
||||
for _, i := range list.Items {
|
||||
@@ -2849,6 +2877,7 @@ func NewApplicationManifestsCommand(clientOpts *argocdclient.ClientOptions) *cob
|
||||
errors.CheckError(err)
|
||||
|
||||
proj := getProject(c, clientOpts, ctx, app.Spec.Project)
|
||||
// nolint:staticcheck
|
||||
unstructureds = getLocalObjects(context.Background(), app, proj.Project, local, localRepoRoot, argoSettings.AppLabelKey, cluster.ServerVersion, cluster.Info.APIVersions, argoSettings.KustomizeOptions, argoSettings.TrackingMethod)
|
||||
} else if len(revisions) > 0 && len(sourcePositions) > 0 {
|
||||
q := application.ApplicationManifestQuery{
|
||||
|
||||
@@ -36,7 +36,7 @@ func TestPrintTreeViewAppResources(t *testing.T) {
|
||||
buf := &bytes.Buffer{}
|
||||
w := tabwriter.NewWriter(buf, 0, 0, 2, ' ', 0)
|
||||
|
||||
printTreeViewAppResourcesNotOrphaned(nodeMapping, mapParentToChild, parentNode, false, false, w)
|
||||
printTreeViewAppResourcesNotOrphaned(nodeMapping, mapParentToChild, parentNode, w)
|
||||
if err := w.Flush(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -77,7 +77,7 @@ func TestPrintTreeViewDetailedAppResources(t *testing.T) {
|
||||
buf := &bytes.Buffer{}
|
||||
w := tabwriter.NewWriter(buf, 0, 0, 2, ' ', 0)
|
||||
|
||||
printDetailedTreeViewAppResourcesNotOrphaned(nodeMapping, mapParentToChild, parentNode, false, false, w)
|
||||
printDetailedTreeViewAppResourcesNotOrphaned(nodeMapping, mapParentToChild, parentNode, w)
|
||||
if err := w.Flush(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -175,25 +175,25 @@ func parentChildInfo(nodes []v1alpha1.ResourceNode) (map[string]v1alpha1.Resourc
|
||||
return mapUidToNode, mapParentToChild, parentNode
|
||||
}
|
||||
|
||||
func printDetailedTreeViewAppResourcesNotOrphaned(nodeMapping map[string]v1alpha1.ResourceNode, parentChildMapping map[string][]string, parentNodes map[string]struct{}, orphaned bool, listAll bool, w *tabwriter.Writer) {
|
||||
func printDetailedTreeViewAppResourcesNotOrphaned(nodeMapping map[string]v1alpha1.ResourceNode, parentChildMapping map[string][]string, parentNodes map[string]struct{}, w *tabwriter.Writer) {
|
||||
for uid := range parentNodes {
|
||||
detailedTreeViewAppResourcesNotOrphaned("", nodeMapping, parentChildMapping, nodeMapping[uid], w)
|
||||
}
|
||||
}
|
||||
|
||||
func printDetailedTreeViewAppResourcesOrphaned(nodeMapping map[string]v1alpha1.ResourceNode, parentChildMapping map[string][]string, parentNodes map[string]struct{}, orphaned bool, listAll bool, w *tabwriter.Writer) {
|
||||
func printDetailedTreeViewAppResourcesOrphaned(nodeMapping map[string]v1alpha1.ResourceNode, parentChildMapping map[string][]string, parentNodes map[string]struct{}, w *tabwriter.Writer) {
|
||||
for uid := range parentNodes {
|
||||
detailedTreeViewAppResourcesOrphaned("", nodeMapping, parentChildMapping, nodeMapping[uid], w)
|
||||
}
|
||||
}
|
||||
|
||||
func printTreeViewAppResourcesNotOrphaned(nodeMapping map[string]v1alpha1.ResourceNode, parentChildMapping map[string][]string, parentNodes map[string]struct{}, orphaned bool, listAll bool, w *tabwriter.Writer) {
|
||||
func printTreeViewAppResourcesNotOrphaned(nodeMapping map[string]v1alpha1.ResourceNode, parentChildMapping map[string][]string, parentNodes map[string]struct{}, w *tabwriter.Writer) {
|
||||
for uid := range parentNodes {
|
||||
treeViewAppResourcesNotOrphaned("", nodeMapping, parentChildMapping, nodeMapping[uid], w)
|
||||
}
|
||||
}
|
||||
|
||||
func printTreeViewAppResourcesOrphaned(nodeMapping map[string]v1alpha1.ResourceNode, parentChildMapping map[string][]string, parentNodes map[string]struct{}, orphaned bool, listAll bool, w *tabwriter.Writer) {
|
||||
func printTreeViewAppResourcesOrphaned(nodeMapping map[string]v1alpha1.ResourceNode, parentChildMapping map[string][]string, parentNodes map[string]struct{}, w *tabwriter.Writer) {
|
||||
for uid := range parentNodes {
|
||||
treeViewAppResourcesOrphaned("", nodeMapping, parentChildMapping, nodeMapping[uid], w)
|
||||
}
|
||||
@@ -206,24 +206,24 @@ func printResources(listAll bool, orphaned bool, appResourceTree *v1alpha1.Appli
|
||||
|
||||
if !orphaned || listAll {
|
||||
mapUidToNode, mapParentToChild, parentNode := parentChildInfo(appResourceTree.Nodes)
|
||||
printDetailedTreeViewAppResourcesNotOrphaned(mapUidToNode, mapParentToChild, parentNode, orphaned, listAll, w)
|
||||
printDetailedTreeViewAppResourcesNotOrphaned(mapUidToNode, mapParentToChild, parentNode, w)
|
||||
}
|
||||
|
||||
if orphaned || listAll {
|
||||
mapUidToNode, mapParentToChild, parentNode := parentChildInfo(appResourceTree.OrphanedNodes)
|
||||
printDetailedTreeViewAppResourcesOrphaned(mapUidToNode, mapParentToChild, parentNode, orphaned, listAll, w)
|
||||
printDetailedTreeViewAppResourcesOrphaned(mapUidToNode, mapParentToChild, parentNode, w)
|
||||
}
|
||||
} else if output == "tree" {
|
||||
fmt.Fprintf(w, "GROUP\tKIND\tNAMESPACE\tNAME\tORPHANED\n")
|
||||
|
||||
if !orphaned || listAll {
|
||||
mapUidToNode, mapParentToChild, parentNode := parentChildInfo(appResourceTree.Nodes)
|
||||
printTreeViewAppResourcesNotOrphaned(mapUidToNode, mapParentToChild, parentNode, orphaned, listAll, w)
|
||||
printTreeViewAppResourcesNotOrphaned(mapUidToNode, mapParentToChild, parentNode, w)
|
||||
}
|
||||
|
||||
if orphaned || listAll {
|
||||
mapUidToNode, mapParentToChild, parentNode := parentChildInfo(appResourceTree.OrphanedNodes)
|
||||
printTreeViewAppResourcesOrphaned(mapUidToNode, mapParentToChild, parentNode, orphaned, listAll, w)
|
||||
printTreeViewAppResourcesOrphaned(mapUidToNode, mapParentToChild, parentNode, w)
|
||||
}
|
||||
} else {
|
||||
headers := []interface{}{"GROUP", "KIND", "NAMESPACE", "NAME", "ORPHANED"}
|
||||
|
||||
@@ -918,35 +918,83 @@ func TestPrintAppConditions(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPrintParams(t *testing.T) {
|
||||
output, _ := captureOutput(func() error {
|
||||
app := &v1alpha1.Application{
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
Helm: &v1alpha1.ApplicationSourceHelm{
|
||||
Parameters: []v1alpha1.HelmParameter{
|
||||
{
|
||||
Name: "name1",
|
||||
Value: "value1",
|
||||
},
|
||||
{
|
||||
Name: "name2",
|
||||
Value: "value2",
|
||||
},
|
||||
{
|
||||
Name: "name3",
|
||||
Value: "value3",
|
||||
testCases := []struct {
|
||||
name string
|
||||
app *v1alpha1.Application
|
||||
sourcePosition int
|
||||
expectedOutput string
|
||||
}{
|
||||
{
|
||||
name: "Single Source application with valid helm parameters",
|
||||
app: &v1alpha1.Application{
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
Helm: &v1alpha1.ApplicationSourceHelm{
|
||||
Parameters: []v1alpha1.HelmParameter{
|
||||
{
|
||||
Name: "name1",
|
||||
Value: "value1",
|
||||
},
|
||||
{
|
||||
Name: "name2",
|
||||
Value: "value2",
|
||||
},
|
||||
{
|
||||
Name: "name3",
|
||||
Value: "value3",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
printParams(app)
|
||||
return nil
|
||||
})
|
||||
expectation := "\n\nNAME VALUE\nname1 value1\nname2 value2\nname3 value3\n"
|
||||
if output != expectation {
|
||||
t.Fatalf("Incorrect print params output %q, should be %q", output, expectation)
|
||||
sourcePosition: -1,
|
||||
expectedOutput: "\n\nNAME VALUE\nname1 value1\nname2 value2\nname3 value3\n",
|
||||
},
|
||||
{
|
||||
name: "Multi-source application with a valid Source Position",
|
||||
app: &v1alpha1.Application{
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Sources: []v1alpha1.ApplicationSource{
|
||||
{
|
||||
Helm: &v1alpha1.ApplicationSourceHelm{
|
||||
Parameters: []v1alpha1.HelmParameter{
|
||||
{
|
||||
Name: "nameA",
|
||||
Value: "valueA",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Helm: &v1alpha1.ApplicationSourceHelm{
|
||||
Parameters: []v1alpha1.HelmParameter{
|
||||
{
|
||||
Name: "nameB",
|
||||
Value: "valueB",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
sourcePosition: 1,
|
||||
expectedOutput: "\n\nNAME VALUE\nnameA valueA\n",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
output, _ := captureOutput(func() error {
|
||||
printParams(tc.app, tc.sourcePosition)
|
||||
return nil
|
||||
})
|
||||
|
||||
if output != tc.expectedOutput {
|
||||
t.Fatalf("Incorrect print params output %q, should be %q\n", output, tc.expectedOutput)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1335,6 +1383,14 @@ func TestFilterAppResources(t *testing.T) {
|
||||
Namespace: "",
|
||||
Exclude: true,
|
||||
}
|
||||
// apps:ReplicaSet:*
|
||||
includeAllReplicaSetResource = v1alpha1.SyncOperationResource{
|
||||
Group: "apps",
|
||||
Kind: "ReplicaSet",
|
||||
Name: "*",
|
||||
Namespace: "",
|
||||
Exclude: false,
|
||||
}
|
||||
// apps:ReplicaSet:replicaSet-name1
|
||||
includeReplicaSet1Resource = v1alpha1.SyncOperationResource{
|
||||
Group: "apps",
|
||||
@@ -1407,13 +1463,13 @@ func TestFilterAppResources(t *testing.T) {
|
||||
{
|
||||
testName: "Include ReplicaSet replicaSet-name1 resource and exclude all service resources",
|
||||
selectedResources: []*v1alpha1.SyncOperationResource{&excludeAllServiceResources, &includeReplicaSet1Resource},
|
||||
expectedResult: []*v1alpha1.SyncOperationResource{&replicaSet1, &replicaSet2, &job, &deployment},
|
||||
expectedResult: []*v1alpha1.SyncOperationResource{&replicaSet1},
|
||||
},
|
||||
// --resource !apps:ReplicaSet:replicaSet-name2 --resource !*:Service:*
|
||||
{
|
||||
testName: "Exclude ReplicaSet replicaSet-name2 resource and all service resources",
|
||||
selectedResources: []*v1alpha1.SyncOperationResource{&excludeReplicaSet2Resource, &excludeAllServiceResources},
|
||||
expectedResult: []*v1alpha1.SyncOperationResource{&replicaSet1, &replicaSet2, &job, &service1, &service2, &deployment},
|
||||
expectedResult: []*v1alpha1.SyncOperationResource{&replicaSet1, &job, &deployment},
|
||||
},
|
||||
// --resource !apps:ReplicaSet:replicaSet-name2
|
||||
{
|
||||
@@ -1427,6 +1483,12 @@ func TestFilterAppResources(t *testing.T) {
|
||||
selectedResources: []*v1alpha1.SyncOperationResource{&includeReplicaSet1Resource},
|
||||
expectedResult: []*v1alpha1.SyncOperationResource{&replicaSet1},
|
||||
},
|
||||
// --resource apps:ReplicaSet:* --resource !apps:ReplicaSet:replicaSet-name2
|
||||
{
|
||||
testName: "Include All ReplicaSet resource and exclude replicaSet-name1 resource",
|
||||
selectedResources: []*v1alpha1.SyncOperationResource{&includeAllReplicaSetResource, &excludeReplicaSet2Resource},
|
||||
expectedResult: []*v1alpha1.SyncOperationResource{&replicaSet1},
|
||||
},
|
||||
// --resource !*:Service:*
|
||||
{
|
||||
testName: "Exclude Service resources",
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
"google.golang.org/grpc/codes"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/cmd/argocd/commands/admin"
|
||||
"github.com/argoproj/argo-cd/v2/cmd/argocd/commands/headless"
|
||||
cmdutil "github.com/argoproj/argo-cd/v2/cmd/util"
|
||||
argocdclient "github.com/argoproj/argo-cd/v2/pkg/apiclient"
|
||||
@@ -53,6 +54,7 @@ func NewAppSetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
command.AddCommand(NewApplicationSetCreateCommand(clientOpts))
|
||||
command.AddCommand(NewApplicationSetListCommand(clientOpts))
|
||||
command.AddCommand(NewApplicationSetDeleteCommand(clientOpts))
|
||||
command.AddCommand(NewApplicationSetGenerateCommand(clientOpts))
|
||||
return command
|
||||
}
|
||||
|
||||
@@ -208,6 +210,75 @@ func NewApplicationSetCreateCommand(clientOpts *argocdclient.ClientOptions) *cob
|
||||
return command
|
||||
}
|
||||
|
||||
// NewApplicationSetGenerateCommand returns a new instance of an `argocd appset generate` command
|
||||
func NewApplicationSetGenerateCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
var output string
|
||||
command := &cobra.Command{
|
||||
Use: "generate",
|
||||
Short: "Generate apps of ApplicationSet rendered templates",
|
||||
Example: templates.Examples(`
|
||||
# Generate apps of ApplicationSet rendered templates
|
||||
argocd appset generate <filename or URL> (<filename or URL>...)
|
||||
`),
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
ctx := c.Context()
|
||||
|
||||
if len(args) == 0 {
|
||||
c.HelpFunc()(c, args)
|
||||
os.Exit(1)
|
||||
}
|
||||
argocdClient := headless.NewClientOrDie(clientOpts, c)
|
||||
fileUrl := args[0]
|
||||
appsets, err := cmdutil.ConstructApplicationSet(fileUrl)
|
||||
errors.CheckError(err)
|
||||
|
||||
if len(appsets) != 1 {
|
||||
fmt.Printf("Input file must contain one ApplicationSet")
|
||||
os.Exit(1)
|
||||
}
|
||||
appset := appsets[0]
|
||||
if appset.Name == "" {
|
||||
err := fmt.Errorf("Error generating apps for ApplicationSet %s. ApplicationSet does not have Name field set", appset)
|
||||
errors.CheckError(err)
|
||||
}
|
||||
|
||||
conn, appIf := argocdClient.NewApplicationSetClientOrDie()
|
||||
defer argoio.Close(conn)
|
||||
|
||||
req := applicationset.ApplicationSetGenerateRequest{
|
||||
ApplicationSet: appset,
|
||||
}
|
||||
resp, err := appIf.Generate(ctx, &req)
|
||||
errors.CheckError(err)
|
||||
|
||||
var appsList []arogappsetv1.Application
|
||||
for i := range resp.Applications {
|
||||
appsList = append(appsList, *resp.Applications[i])
|
||||
}
|
||||
|
||||
switch output {
|
||||
case "yaml", "json":
|
||||
var resources []interface{}
|
||||
for i := range appsList {
|
||||
app := appsList[i]
|
||||
// backfill api version and kind because k8s client always return empty values for these fields
|
||||
app.APIVersion = arogappsetv1.ApplicationSchemaGroupVersionKind.GroupVersion().String()
|
||||
app.Kind = arogappsetv1.ApplicationSchemaGroupVersionKind.Kind
|
||||
resources = append(resources, app)
|
||||
}
|
||||
|
||||
cobra.CheckErr(admin.PrintResources(output, os.Stdout, resources...))
|
||||
case "wide", "":
|
||||
printApplicationTable(appsList, &output)
|
||||
default:
|
||||
errors.CheckError(fmt.Errorf("unknown output format: %s", output))
|
||||
}
|
||||
},
|
||||
}
|
||||
command.Flags().StringVarP(&output, "output", "o", "wide", "Output format. One of: json|yaml|wide")
|
||||
return command
|
||||
}
|
||||
|
||||
// NewApplicationSetListCommand returns a new instance of an `argocd appset list` command
|
||||
func NewApplicationSetListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
var (
|
||||
|
||||
@@ -34,6 +34,10 @@ const (
|
||||
clusterFieldName = "name"
|
||||
// cluster field is 'namespaces'
|
||||
clusterFieldNamespaces = "namespaces"
|
||||
// cluster field is 'labels'
|
||||
clusterFieldLabel = "labels"
|
||||
// cluster field is 'annotations'
|
||||
clusterFieldAnnotation = "annotations"
|
||||
// indicates managing all namespaces
|
||||
allNamespaces = "*"
|
||||
)
|
||||
@@ -220,6 +224,8 @@ func NewClusterSetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command
|
||||
var (
|
||||
clusterOptions cmdutil.ClusterOptions
|
||||
clusterName string
|
||||
labels []string
|
||||
annotations []string
|
||||
)
|
||||
command := &cobra.Command{
|
||||
Use: "set NAME",
|
||||
@@ -238,17 +244,25 @@ func NewClusterSetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command
|
||||
conn, clusterIf := headless.NewClientOrDie(clientOpts, c).NewClusterClientOrDie()
|
||||
defer io.Close(conn)
|
||||
// checks the fields that needs to be updated
|
||||
updatedFields := checkFieldsToUpdate(clusterOptions)
|
||||
updatedFields := checkFieldsToUpdate(clusterOptions, labels, annotations)
|
||||
namespaces := clusterOptions.Namespaces
|
||||
// check if all namespaces have to be considered
|
||||
if len(namespaces) == 1 && strings.EqualFold(namespaces[0], allNamespaces) {
|
||||
namespaces[0] = ""
|
||||
}
|
||||
// parse the labels you're receiving from the label flag
|
||||
labelsMap, err := label.Parse(labels)
|
||||
errors.CheckError(err)
|
||||
// parse the annotations you're receiving from the annotation flag
|
||||
annotationsMap, err := label.Parse(annotations)
|
||||
errors.CheckError(err)
|
||||
if updatedFields != nil {
|
||||
clusterUpdateRequest := clusterpkg.ClusterUpdateRequest{
|
||||
Cluster: &argoappv1.Cluster{
|
||||
Name: clusterOptions.Name,
|
||||
Namespaces: namespaces,
|
||||
Name: clusterOptions.Name,
|
||||
Namespaces: namespaces,
|
||||
Labels: labelsMap,
|
||||
Annotations: annotationsMap,
|
||||
},
|
||||
UpdatedFields: updatedFields,
|
||||
Id: &clusterpkg.ClusterID{
|
||||
@@ -266,11 +280,13 @@ func NewClusterSetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command
|
||||
}
|
||||
command.Flags().StringVar(&clusterOptions.Name, "name", "", "Overwrite the cluster name")
|
||||
command.Flags().StringArrayVar(&clusterOptions.Namespaces, "namespace", nil, "List of namespaces which are allowed to manage. Specify '*' to manage all namespaces")
|
||||
command.Flags().StringArrayVar(&labels, "label", nil, "Set metadata labels (e.g. --label key=value)")
|
||||
command.Flags().StringArrayVar(&annotations, "annotation", nil, "Set metadata annotations (e.g. --annotation key=value)")
|
||||
return command
|
||||
}
|
||||
|
||||
// checkFieldsToUpdate returns the fields that needs to be updated
|
||||
func checkFieldsToUpdate(clusterOptions cmdutil.ClusterOptions) []string {
|
||||
func checkFieldsToUpdate(clusterOptions cmdutil.ClusterOptions, labels []string, annotations []string) []string {
|
||||
var updatedFields []string
|
||||
if clusterOptions.Name != "" {
|
||||
updatedFields = append(updatedFields, clusterFieldName)
|
||||
@@ -278,6 +294,12 @@ func checkFieldsToUpdate(clusterOptions cmdutil.ClusterOptions) []string {
|
||||
if clusterOptions.Namespaces != nil {
|
||||
updatedFields = append(updatedFields, clusterFieldNamespaces)
|
||||
}
|
||||
if labels != nil {
|
||||
updatedFields = append(updatedFields, clusterFieldLabel)
|
||||
}
|
||||
if annotations != nil {
|
||||
updatedFields = append(updatedFields, clusterFieldAnnotation)
|
||||
}
|
||||
return updatedFields
|
||||
}
|
||||
|
||||
@@ -341,6 +363,7 @@ func printClusterDetails(clusters []argoappv1.Cluster) {
|
||||
fmt.Printf("Cluster information\n\n")
|
||||
fmt.Printf(" Server URL: %s\n", cluster.Server)
|
||||
fmt.Printf(" Server Name: %s\n", strWithDefault(cluster.Name, "-"))
|
||||
// nolint:staticcheck
|
||||
fmt.Printf(" Server Version: %s\n", cluster.ServerVersion)
|
||||
fmt.Printf(" Namespaces: %s\n", formatNamespaces(cluster))
|
||||
fmt.Printf("\nTLS configuration\n\n")
|
||||
@@ -433,6 +456,7 @@ func printClusterTable(clusters []argoappv1.Cluster) {
|
||||
if len(c.Namespaces) > 0 {
|
||||
server = fmt.Sprintf("%s (%d namespaces)", c.Server, len(c.Namespaces))
|
||||
}
|
||||
// nolint:staticcheck
|
||||
_, _ = fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\n", server, c.Name, c.ServerVersion, c.ConnectionState.Status, c.ConnectionState.Message, c.Project)
|
||||
}
|
||||
_ = w.Flush()
|
||||
|
||||
@@ -81,14 +81,14 @@ func Test_PrintResource(t *testing.T) {
|
||||
return err
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expectYamlSingle, str)
|
||||
assert.YAMLEq(t, expectYamlSingle, str)
|
||||
|
||||
str, err = captureOutput(func() error {
|
||||
err := PrintResource(testResource, "json")
|
||||
return err
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expectJsonSingle, str)
|
||||
assert.JSONEq(t, expectJsonSingle, str)
|
||||
|
||||
err = PrintResource(testResource, "unknown")
|
||||
require.Error(t, err)
|
||||
@@ -116,28 +116,28 @@ func Test_PrintResourceList(t *testing.T) {
|
||||
return err
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expectYamlList, str)
|
||||
assert.YAMLEq(t, expectYamlList, str)
|
||||
|
||||
str, err = captureOutput(func() error {
|
||||
err := PrintResourceList(testResource, "json", false)
|
||||
return err
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expectJsonList, str)
|
||||
assert.JSONEq(t, expectJsonList, str)
|
||||
|
||||
str, err = captureOutput(func() error {
|
||||
err := PrintResourceList(testResource2, "yaml", true)
|
||||
return err
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expectYamlSingle, str)
|
||||
assert.YAMLEq(t, expectYamlSingle, str)
|
||||
|
||||
str, err = captureOutput(func() error {
|
||||
err := PrintResourceList(testResource2, "json", true)
|
||||
return err
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expectJsonSingle, str)
|
||||
assert.JSONEq(t, expectJsonSingle, str)
|
||||
|
||||
err = PrintResourceList(testResource, "unknown", false)
|
||||
require.Error(t, err)
|
||||
|
||||
@@ -14,8 +14,10 @@ import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
runtimeUtil "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
cache2 "k8s.io/client-go/tools/cache"
|
||||
@@ -48,6 +50,7 @@ type forwardCacheClient struct {
|
||||
err error
|
||||
redisHaProxyName string
|
||||
redisName string
|
||||
redisPassword string
|
||||
}
|
||||
|
||||
func (c *forwardCacheClient) doLazy(action func(client cache.CacheClient) error) error {
|
||||
@@ -64,7 +67,7 @@ func (c *forwardCacheClient) doLazy(action func(client cache.CacheClient) error)
|
||||
return
|
||||
}
|
||||
|
||||
redisClient := redis.NewClient(&redis.Options{Addr: fmt.Sprintf("localhost:%d", redisPort)})
|
||||
redisClient := redis.NewClient(&redis.Options{Addr: fmt.Sprintf("localhost:%d", redisPort), Password: c.redisPassword})
|
||||
c.client = cache.NewRedisCache(redisClient, time.Hour, c.compression)
|
||||
})
|
||||
if c.err != nil {
|
||||
@@ -126,7 +129,7 @@ func (c *forwardRepoClientset) NewRepoServerClient() (io.Closer, repoapiclient.R
|
||||
}
|
||||
repoServerName := c.repoServerName
|
||||
repoServererviceLabelSelector := common.LabelKeyComponentRepoServer + "=" + common.LabelValueComponentRepoServer
|
||||
repoServerServices, err := c.kubeClientset.CoreV1().Services(c.namespace).List(context.Background(), v1.ListOptions{LabelSelector: repoServererviceLabelSelector})
|
||||
repoServerServices, err := c.kubeClientset.CoreV1().Services(c.namespace).List(context.Background(), metaV1.ListOptions{LabelSelector: repoServererviceLabelSelector})
|
||||
if err != nil {
|
||||
c.err = err
|
||||
return
|
||||
@@ -174,7 +177,7 @@ func testAPI(ctx context.Context, clientOpts *apiclient.ClientOptions) error {
|
||||
//
|
||||
// If the clientOpts enables core mode, but the local config does not have core mode enabled, this function will
|
||||
// not start the local server.
|
||||
func MaybeStartLocalServer(ctx context.Context, clientOpts *apiclient.ClientOptions, ctxStr string, port *int, address *string, compression cache.RedisCompressionType, clientConfig clientcmd.ClientConfig) error {
|
||||
func MaybeStartLocalServer(ctx context.Context, clientOpts *apiclient.ClientOptions, ctxStr string, port *int, address *string, clientConfig clientcmd.ClientConfig) error {
|
||||
if clientConfig == nil {
|
||||
flags := pflag.NewFlagSet("tmp", pflag.ContinueOnError)
|
||||
clientConfig = cli.AddKubectlFlagsToSet(flags)
|
||||
@@ -201,7 +204,7 @@ func MaybeStartLocalServer(ctx context.Context, clientOpts *apiclient.ClientOpti
|
||||
}
|
||||
|
||||
// get rid of logging error handler
|
||||
runtime.ErrorHandlers = runtime.ErrorHandlers[1:]
|
||||
runtimeUtil.ErrorHandlers = runtimeUtil.ErrorHandlers[1:]
|
||||
cli.SetLogLevel(log.ErrorLevel.String())
|
||||
log.SetLevel(log.ErrorLevel)
|
||||
os.Setenv(v1alpha1.EnvVarFakeInClusterConfig, "true")
|
||||
@@ -236,7 +239,18 @@ func MaybeStartLocalServer(ctx context.Context, clientOpts *apiclient.ClientOpti
|
||||
return fmt.Errorf("error creating kubernetes dynamic clientset: %w", err)
|
||||
}
|
||||
|
||||
controllerClientset, err := client.New(restConfig, client.Options{})
|
||||
scheme := runtime.NewScheme()
|
||||
err = v1alpha1.AddToScheme(scheme)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error adding argo resources to scheme: %w", err)
|
||||
}
|
||||
err = corev1.AddToScheme(scheme)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error adding corev1 resources to scheme: %w", err)
|
||||
}
|
||||
controllerClientset, err := client.New(restConfig, client.Options{
|
||||
Scheme: scheme,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating kubernetes controller clientset: %w", err)
|
||||
}
|
||||
@@ -251,12 +265,12 @@ func MaybeStartLocalServer(ctx context.Context, clientOpts *apiclient.ClientOpti
|
||||
if err != nil {
|
||||
return fmt.Errorf("error running miniredis: %w", err)
|
||||
}
|
||||
appstateCache := appstatecache.NewCache(cache.NewCache(&forwardCacheClient{namespace: namespace, context: ctxStr, compression: compression, redisHaProxyName: clientOpts.RedisHaProxyName, redisName: clientOpts.RedisName}), time.Hour)
|
||||
|
||||
redisOptions := &redis.Options{Addr: mr.Addr()}
|
||||
if err = common.SetOptionalRedisPasswordFromKubeConfig(ctx, kubeClientset, namespace, redisOptions); err != nil {
|
||||
log.Warnf("Failed to fetch & set redis password for namespace %s: %v", namespace, err)
|
||||
}
|
||||
|
||||
appstateCache := appstatecache.NewCache(cache.NewCache(&forwardCacheClient{namespace: namespace, context: ctxStr, compression: cache.RedisCompressionType(clientOpts.RedisCompression), redisHaProxyName: clientOpts.RedisHaProxyName, redisName: clientOpts.RedisName, redisPassword: redisOptions.Password}), time.Hour)
|
||||
srv := server.NewServer(ctx, server.ArgoCDServerOpts{
|
||||
EnableGZip: false,
|
||||
Namespace: namespace,
|
||||
@@ -307,7 +321,7 @@ func NewClientOrDie(opts *apiclient.ClientOptions, c *cobra.Command) apiclient.C
|
||||
ctxStr := initialize.RetrieveContextIfChanged(c.Flag("context"))
|
||||
// If we're in core mode, start the API server on the fly and configure the client `opts` to use it.
|
||||
// If we're not in core mode, this function call will do nothing.
|
||||
err := MaybeStartLocalServer(ctx, opts, ctxStr, nil, nil, cache.RedisCompressionNone, nil)
|
||||
err := MaybeStartLocalServer(ctx, opts, ctxStr, nil, nil, nil)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"slices"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
@@ -80,6 +81,8 @@ func NewProjectCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
command.AddCommand(NewProjectRemoveOrphanedIgnoreCommand(clientOpts))
|
||||
command.AddCommand(NewProjectAddSourceNamespace(clientOpts))
|
||||
command.AddCommand(NewProjectRemoveSourceNamespace(clientOpts))
|
||||
command.AddCommand(NewProjectAddDestinationServiceAccountCommand(clientOpts))
|
||||
command.AddCommand(NewProjectRemoveDestinationServiceAccountCommand(clientOpts))
|
||||
return command
|
||||
}
|
||||
|
||||
@@ -799,7 +802,7 @@ func printProjectNames(projects []v1alpha1.AppProject) {
|
||||
// Print table of project info
|
||||
func printProjectTable(projects []v1alpha1.AppProject) {
|
||||
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
|
||||
fmt.Fprintf(w, "NAME\tDESCRIPTION\tDESTINATIONS\tSOURCES\tCLUSTER-RESOURCE-WHITELIST\tNAMESPACE-RESOURCE-BLACKLIST\tSIGNATURE-KEYS\tORPHANED-RESOURCES\n")
|
||||
fmt.Fprintf(w, "NAME\tDESCRIPTION\tDESTINATIONS\tSOURCES\tCLUSTER-RESOURCE-WHITELIST\tNAMESPACE-RESOURCE-BLACKLIST\tSIGNATURE-KEYS\tORPHANED-RESOURCES\tDESTINATION-SERVICE-ACCOUNTS\n")
|
||||
for _, p := range projects {
|
||||
printProjectLine(w, &p)
|
||||
}
|
||||
@@ -855,7 +858,7 @@ func formatOrphanedResources(p *v1alpha1.AppProject) string {
|
||||
}
|
||||
|
||||
func printProjectLine(w io.Writer, p *v1alpha1.AppProject) {
|
||||
var destinations, sourceRepos, clusterWhitelist, namespaceBlacklist, signatureKeys string
|
||||
var destinations, destinationServiceAccounts, sourceRepos, clusterWhitelist, namespaceBlacklist, signatureKeys string
|
||||
switch len(p.Spec.Destinations) {
|
||||
case 0:
|
||||
destinations = "<none>"
|
||||
@@ -864,6 +867,14 @@ func printProjectLine(w io.Writer, p *v1alpha1.AppProject) {
|
||||
default:
|
||||
destinations = fmt.Sprintf("%d destinations", len(p.Spec.Destinations))
|
||||
}
|
||||
switch len(p.Spec.DestinationServiceAccounts) {
|
||||
case 0:
|
||||
destinationServiceAccounts = "<none>"
|
||||
case 1:
|
||||
destinationServiceAccounts = fmt.Sprintf("%s,%s,%s", p.Spec.DestinationServiceAccounts[0].Server, p.Spec.DestinationServiceAccounts[0].Namespace, p.Spec.DestinationServiceAccounts[0].DefaultServiceAccount)
|
||||
default:
|
||||
destinationServiceAccounts = fmt.Sprintf("%d destinationServiceAccounts", len(p.Spec.DestinationServiceAccounts))
|
||||
}
|
||||
switch len(p.Spec.SourceRepos) {
|
||||
case 0:
|
||||
sourceRepos = "<none>"
|
||||
@@ -892,7 +903,7 @@ func printProjectLine(w io.Writer, p *v1alpha1.AppProject) {
|
||||
default:
|
||||
signatureKeys = fmt.Sprintf("%d key(s)", len(p.Spec.SignatureKeys))
|
||||
}
|
||||
fmt.Fprintf(w, "%s\t%s\t%v\t%v\t%v\t%v\t%v\t%v\n", p.Name, p.Spec.Description, destinations, sourceRepos, clusterWhitelist, namespaceBlacklist, signatureKeys, formatOrphanedResources(p))
|
||||
fmt.Fprintf(w, "%s\t%s\t%v\t%v\t%v\t%v\t%v\t%v\t%v\n", p.Name, p.Spec.Description, destinations, sourceRepos, clusterWhitelist, namespaceBlacklist, signatureKeys, formatOrphanedResources(p), destinationServiceAccounts)
|
||||
}
|
||||
|
||||
func printProject(p *v1alpha1.AppProject, scopedRepositories []*v1alpha1.Repository, scopedClusters []*v1alpha1.Cluster) {
|
||||
@@ -1082,3 +1093,122 @@ func NewProjectEditCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comman
|
||||
}
|
||||
return command
|
||||
}
|
||||
|
||||
// NewProjectAddDestinationServiceAccountCommand returns a new instance of an `argocd proj add-destination-service-account` command
|
||||
func NewProjectAddDestinationServiceAccountCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
var serviceAccountNamespace string
|
||||
|
||||
buildApplicationDestinationServiceAccount := func(destination string, namespace string, serviceAccount string, serviceAccountNamespace string) v1alpha1.ApplicationDestinationServiceAccount {
|
||||
if serviceAccountNamespace != "" {
|
||||
return v1alpha1.ApplicationDestinationServiceAccount{
|
||||
Server: destination,
|
||||
Namespace: namespace,
|
||||
DefaultServiceAccount: fmt.Sprintf("%s:%s", serviceAccountNamespace, serviceAccount),
|
||||
}
|
||||
} else {
|
||||
return v1alpha1.ApplicationDestinationServiceAccount{
|
||||
Server: destination,
|
||||
Namespace: namespace,
|
||||
DefaultServiceAccount: serviceAccount,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
command := &cobra.Command{
|
||||
Use: "add-destination-service-account PROJECT SERVER NAMESPACE SERVICE_ACCOUNT",
|
||||
Short: "Add project destination's default service account",
|
||||
Example: templates.Examples(`
|
||||
# Add project destination service account (SERVICE_ACCOUNT) for a server URL (SERVER) in the specified namespace (NAMESPACE) on the project with name PROJECT
|
||||
argocd proj add-destination-service-account PROJECT SERVER NAMESPACE SERVICE_ACCOUNT
|
||||
|
||||
# Add project destination service account (SERVICE_ACCOUNT) from a different namespace
|
||||
argocd proj add-destination PROJECT SERVER NAMESPACE SERVICE_ACCOUNT --service-account-namespace <service_account_namespace>
|
||||
`),
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
ctx := c.Context()
|
||||
|
||||
if len(args) != 4 {
|
||||
c.HelpFunc()(c, args)
|
||||
os.Exit(1)
|
||||
}
|
||||
projName := args[0]
|
||||
server := args[1]
|
||||
namespace := args[2]
|
||||
serviceAccount := args[3]
|
||||
|
||||
if strings.Contains(serviceAccountNamespace, "*") {
|
||||
log.Fatal("service-account-namespace for DestinationServiceAccount must not contain wildcards")
|
||||
}
|
||||
|
||||
if strings.Contains(serviceAccount, "*") {
|
||||
log.Fatal("ServiceAccount for DestinationServiceAccount must not contain wildcards")
|
||||
}
|
||||
|
||||
destinationServiceAccount := buildApplicationDestinationServiceAccount(server, namespace, serviceAccount, serviceAccountNamespace)
|
||||
conn, projIf := headless.NewClientOrDie(clientOpts, c).NewProjectClientOrDie()
|
||||
defer argoio.Close(conn)
|
||||
|
||||
proj, err := projIf.Get(ctx, &projectpkg.ProjectQuery{Name: projName})
|
||||
errors.CheckError(err)
|
||||
|
||||
for _, dest := range proj.Spec.DestinationServiceAccounts {
|
||||
dstServerExist := destinationServiceAccount.Server != "" && dest.Server == destinationServiceAccount.Server
|
||||
dstServiceAccountExist := destinationServiceAccount.DefaultServiceAccount != "" && dest.DefaultServiceAccount == destinationServiceAccount.DefaultServiceAccount
|
||||
if dest.Namespace == destinationServiceAccount.Namespace && dstServerExist && dstServiceAccountExist {
|
||||
log.Fatal("Specified destination service account is already defined in project")
|
||||
}
|
||||
}
|
||||
proj.Spec.DestinationServiceAccounts = append(proj.Spec.DestinationServiceAccounts, destinationServiceAccount)
|
||||
_, err = projIf.Update(ctx, &projectpkg.ProjectUpdateRequest{Project: proj})
|
||||
errors.CheckError(err)
|
||||
},
|
||||
}
|
||||
command.Flags().StringVar(&serviceAccountNamespace, "service-account-namespace", "", "Use service-account-namespace as namespace where the service account is present")
|
||||
return command
|
||||
}
|
||||
|
||||
// NewProjectRemoveDestinationCommand returns a new instance of an `argocd proj remove-destination-service-account` command
|
||||
func NewProjectRemoveDestinationServiceAccountCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
command := &cobra.Command{
|
||||
Use: "remove-destination-service-account PROJECT SERVER NAMESPACE SERVICE_ACCOUNT",
|
||||
Short: "Remove default destination service account from the project",
|
||||
Example: templates.Examples(`
|
||||
# Remove the destination service account (SERVICE_ACCOUNT) from the specified destination (SERVER and NAMESPACE combination) on the project with name PROJECT
|
||||
argocd proj remove-destination-service-account PROJECT SERVER NAMESPACE SERVICE_ACCOUNT
|
||||
`),
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
ctx := c.Context()
|
||||
|
||||
if len(args) != 4 {
|
||||
c.HelpFunc()(c, args)
|
||||
os.Exit(1)
|
||||
}
|
||||
projName := args[0]
|
||||
server := args[1]
|
||||
namespace := args[2]
|
||||
serviceAccount := args[3]
|
||||
conn, projIf := headless.NewClientOrDie(clientOpts, c).NewProjectClientOrDie()
|
||||
defer argoio.Close(conn)
|
||||
|
||||
proj, err := projIf.Get(ctx, &projectpkg.ProjectQuery{Name: projName})
|
||||
errors.CheckError(err)
|
||||
|
||||
originalLength := len(proj.Spec.DestinationServiceAccounts)
|
||||
proj.Spec.DestinationServiceAccounts = slices.DeleteFunc(proj.Spec.DestinationServiceAccounts,
|
||||
func(destServiceAccount v1alpha1.ApplicationDestinationServiceAccount) bool {
|
||||
return destServiceAccount.Namespace == namespace &&
|
||||
destServiceAccount.Server == server &&
|
||||
destServiceAccount.DefaultServiceAccount == serviceAccount
|
||||
},
|
||||
)
|
||||
if originalLength != len(proj.Spec.DestinationServiceAccounts) {
|
||||
_, err = projIf.Update(ctx, &projectpkg.ProjectUpdateRequest{Project: proj})
|
||||
errors.CheckError(err)
|
||||
} else {
|
||||
log.Fatal("Specified destination service account does not exist in project")
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
return command
|
||||
}
|
||||
|
||||
@@ -352,9 +352,10 @@ func printSyncWindows(proj *v1alpha1.AppProject) {
|
||||
fmt.Fprintf(w, fmtStr, headers...)
|
||||
if proj.Spec.SyncWindows.HasWindows() {
|
||||
for i, window := range proj.Spec.SyncWindows {
|
||||
isActive, _ := window.Active()
|
||||
vals := []interface{}{
|
||||
strconv.Itoa(i),
|
||||
formatBoolOutput(window.Active()),
|
||||
formatBoolOutput(isActive),
|
||||
window.Kind,
|
||||
window.Schedule,
|
||||
window.Duration,
|
||||
|
||||
@@ -178,6 +178,7 @@ func NewRepoAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
repoOpts.Repo.GithubAppInstallationId = repoOpts.GithubAppInstallationId
|
||||
repoOpts.Repo.GitHubAppEnterpriseBaseURL = repoOpts.GitHubAppEnterpriseBaseURL
|
||||
repoOpts.Repo.Proxy = repoOpts.Proxy
|
||||
repoOpts.Repo.NoProxy = repoOpts.NoProxy
|
||||
repoOpts.Repo.ForceHttpBasicAuth = repoOpts.ForceHttpBasicAuth
|
||||
|
||||
if repoOpts.Repo.Type == "helm" && repoOpts.Repo.Name == "" {
|
||||
|
||||
@@ -187,6 +187,7 @@ func NewRepoCredsAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comma
|
||||
command.Flags().StringVar(&repo.Type, "type", common.DefaultRepoType, "type of the repository, \"git\" or \"helm\"")
|
||||
command.Flags().StringVar(&gcpServiceAccountKeyPath, "gcp-service-account-key-path", "", "service account key for the Google Cloud Platform")
|
||||
command.Flags().BoolVar(&repo.ForceHttpBasicAuth, "force-http-basic-auth", false, "whether to force basic auth when connecting via HTTP")
|
||||
command.Flags().StringVar(&repo.Proxy, "proxy-url", "", "If provided, this URL will be used to connect via proxy")
|
||||
return command
|
||||
}
|
||||
|
||||
|
||||
@@ -3,6 +3,8 @@ package commands
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/util/cache"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
|
||||
@@ -80,12 +82,15 @@ func NewCommand() *cobra.Command {
|
||||
command.PersistentFlags().StringVar(&clientOpts.PortForwardNamespace, "port-forward-namespace", config.GetFlag("port-forward-namespace", ""), "Namespace name which should be used for port forwarding")
|
||||
command.PersistentFlags().IntVar(&clientOpts.HttpRetryMax, "http-retry-max", config.GetIntFlag("http-retry-max", 0), "Maximum number of retries to establish http connection to Argo CD server")
|
||||
command.PersistentFlags().BoolVar(&clientOpts.Core, "core", config.GetBoolFlag("core"), "If set to true then CLI talks directly to Kubernetes instead of talking to Argo CD API server")
|
||||
command.PersistentFlags().StringVar(&clientOpts.Context, "argocd-context", "", "The name of the Argo-CD server context to use")
|
||||
command.PersistentFlags().StringVar(&clientOpts.ServerName, "server-name", env.StringFromEnv(common.EnvServerName, common.DefaultServerName), fmt.Sprintf("Name of the Argo CD API server; set this or the %s environment variable when the server's name label differs from the default, for example when installing via the Helm chart", common.EnvServerName))
|
||||
command.PersistentFlags().StringVar(&clientOpts.AppControllerName, "controller-name", env.StringFromEnv(common.EnvAppControllerName, common.DefaultApplicationControllerName), fmt.Sprintf("Name of the Argo CD Application controller; set this or the %s environment variable when the controller's name label differs from the default, for example when installing via the Helm chart", common.EnvAppControllerName))
|
||||
command.PersistentFlags().StringVar(&clientOpts.RedisHaProxyName, "redis-haproxy-name", env.StringFromEnv(common.EnvRedisHaProxyName, common.DefaultRedisHaProxyName), fmt.Sprintf("Name of the Redis HA Proxy; set this or the %s environment variable when the HA Proxy's name label differs from the default, for example when installing via the Helm chart", common.EnvRedisHaProxyName))
|
||||
command.PersistentFlags().StringVar(&clientOpts.RedisName, "redis-name", env.StringFromEnv(common.EnvRedisName, common.DefaultRedisName), fmt.Sprintf("Name of the Redis deployment; set this or the %s environment variable when the Redis's name label differs from the default, for example when installing via the Helm chart", common.EnvRedisName))
|
||||
command.PersistentFlags().StringVar(&clientOpts.RepoServerName, "repo-server-name", env.StringFromEnv(common.EnvRepoServerName, common.DefaultRepoServerName), fmt.Sprintf("Name of the Argo CD Repo server; set this or the %s environment variable when the server's name label differs from the default, for example when installing via the Helm chart", common.EnvRepoServerName))
|
||||
|
||||
command.PersistentFlags().StringVar(&clientOpts.RedisCompression, "redis-compress", env.StringFromEnv("REDIS_COMPRESSION", string(cache.RedisCompressionGZip)), "Enable this if the application controller is configured with redis compression enabled. (possible values: gzip, none)")
|
||||
|
||||
clientOpts.KubeOverrides = &clientcmd.ConfigOverrides{}
|
||||
command.PersistentFlags().StringVar(&clientOpts.KubeOverrides.CurrentContext, "kube-context", "", "Directs the command to the given kube-context")
|
||||
|
||||
|
||||
10
cmd/main.go
10
cmd/main.go
@@ -4,6 +4,8 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/cmd/util"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
appcontroller "github.com/argoproj/argo-cd/v2/cmd/argocd-application-controller/commands"
|
||||
@@ -29,9 +31,12 @@ func main() {
|
||||
if val := os.Getenv(binaryNameEnv); val != "" {
|
||||
binaryName = val
|
||||
}
|
||||
|
||||
isCLI := false
|
||||
switch binaryName {
|
||||
case "argocd", "argocd-linux-amd64", "argocd-darwin-amd64", "argocd-windows-amd64.exe":
|
||||
command = cli.NewCommand()
|
||||
isCLI = true
|
||||
case "argocd-server":
|
||||
command = apiserver.NewCommand()
|
||||
case "argocd-application-controller":
|
||||
@@ -40,19 +45,24 @@ func main() {
|
||||
command = reposerver.NewCommand()
|
||||
case "argocd-cmp-server":
|
||||
command = cmpserver.NewCommand()
|
||||
isCLI = true
|
||||
case "argocd-dex":
|
||||
command = dex.NewCommand()
|
||||
case "argocd-notifications":
|
||||
command = notification.NewCommand()
|
||||
case "argocd-git-ask-pass":
|
||||
command = gitaskpass.NewCommand()
|
||||
isCLI = true
|
||||
case "argocd-applicationset-controller":
|
||||
command = applicationset.NewCommand()
|
||||
case "argocd-k8s-auth":
|
||||
command = k8sauth.NewCommand()
|
||||
isCLI = true
|
||||
default:
|
||||
command = cli.NewCommand()
|
||||
isCLI = true
|
||||
}
|
||||
util.SetAutoMaxProcs(isCLI)
|
||||
|
||||
if err := command.Execute(); err != nil {
|
||||
os.Exit(1)
|
||||
|
||||
@@ -9,6 +9,8 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"go.uber.org/automaxprocs/maxprocs"
|
||||
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
@@ -88,6 +90,19 @@ type AppOptions struct {
|
||||
ref string
|
||||
}
|
||||
|
||||
// SetAutoMaxProcs sets the GOMAXPROCS value based on the binary name.
|
||||
// It suppresses logs for CLI binaries and logs the setting for services.
|
||||
func SetAutoMaxProcs(isCLI bool) {
|
||||
if isCLI {
|
||||
_, _ = maxprocs.Set() // Intentionally ignore errors for CLI binaries
|
||||
} else {
|
||||
_, err := maxprocs.Set(maxprocs.Logger(log.Infof))
|
||||
if err != nil {
|
||||
log.Errorf("Error setting GOMAXPROCS: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func AddAppFlags(command *cobra.Command, opts *AppOptions) {
|
||||
command.Flags().StringVar(&opts.repoURL, "repo", "", "Repository URL, ignored if a file is set")
|
||||
command.Flags().StringVar(&opts.appPath, "path", "", "Path in repository to the app directory, ignored if a file is set")
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
package util
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"log"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -529,3 +530,27 @@ func TestFilterResources(t *testing.T) {
|
||||
assert.Nil(t, filteredResources)
|
||||
})
|
||||
}
|
||||
|
||||
func TestSetAutoMaxProcs(t *testing.T) {
|
||||
t.Run("CLI mode ignores errors", func(t *testing.T) {
|
||||
logBuffer := &bytes.Buffer{}
|
||||
oldLogger := log.Default()
|
||||
log.SetOutput(logBuffer)
|
||||
defer log.SetOutput(oldLogger.Writer())
|
||||
|
||||
SetAutoMaxProcs(true)
|
||||
|
||||
assert.Empty(t, logBuffer.String(), "Expected no log output when isCLI is true")
|
||||
})
|
||||
|
||||
t.Run("Non-CLI mode logs error on failure", func(t *testing.T) {
|
||||
logBuffer := &bytes.Buffer{}
|
||||
oldLogger := log.Default()
|
||||
log.SetOutput(logBuffer)
|
||||
defer log.SetOutput(oldLogger.Writer())
|
||||
|
||||
SetAutoMaxProcs(false)
|
||||
|
||||
assert.NotContains(t, logBuffer.String(), "Error setting GOMAXPROCS", "Unexpected log output detected")
|
||||
})
|
||||
}
|
||||
|
||||
@@ -20,11 +20,12 @@ import (
|
||||
)
|
||||
|
||||
type ProjectOpts struct {
|
||||
Description string
|
||||
destinations []string
|
||||
Sources []string
|
||||
SignatureKeys []string
|
||||
SourceNamespaces []string
|
||||
Description string
|
||||
destinations []string
|
||||
destinationServiceAccounts []string
|
||||
Sources []string
|
||||
SignatureKeys []string
|
||||
SourceNamespaces []string
|
||||
|
||||
orphanedResourcesEnabled bool
|
||||
orphanedResourcesWarn bool
|
||||
@@ -47,6 +48,8 @@ func AddProjFlags(command *cobra.Command, opts *ProjectOpts) {
|
||||
command.Flags().StringArrayVar(&opts.allowedNamespacedResources, "allow-namespaced-resource", []string{}, "List of allowed namespaced resources")
|
||||
command.Flags().StringArrayVar(&opts.deniedNamespacedResources, "deny-namespaced-resource", []string{}, "List of denied namespaced resources")
|
||||
command.Flags().StringSliceVar(&opts.SourceNamespaces, "source-namespaces", []string{}, "List of source namespaces for applications")
|
||||
command.Flags().StringArrayVar(&opts.destinationServiceAccounts, "dest-service-accounts", []string{},
|
||||
"Destination server, namespace and target service account (e.g. https://192.168.99.100:8443,default,default-sa)")
|
||||
}
|
||||
|
||||
func getGroupKindList(values []string) []v1.GroupKind {
|
||||
@@ -93,6 +96,23 @@ func (opts *ProjectOpts) GetDestinations() []v1alpha1.ApplicationDestination {
|
||||
return destinations
|
||||
}
|
||||
|
||||
func (opts *ProjectOpts) GetDestinationServiceAccounts() []v1alpha1.ApplicationDestinationServiceAccount {
|
||||
destinationServiceAccounts := make([]v1alpha1.ApplicationDestinationServiceAccount, 0)
|
||||
for _, destStr := range opts.destinationServiceAccounts {
|
||||
parts := strings.Split(destStr, ",")
|
||||
if len(parts) != 3 {
|
||||
log.Fatalf("Expected destination service account of the form: server,namespace, defaultServiceAccount. Received: %s", destStr)
|
||||
} else {
|
||||
destinationServiceAccounts = append(destinationServiceAccounts, v1alpha1.ApplicationDestinationServiceAccount{
|
||||
Server: parts[0],
|
||||
Namespace: parts[1],
|
||||
DefaultServiceAccount: parts[2],
|
||||
})
|
||||
}
|
||||
}
|
||||
return destinationServiceAccounts
|
||||
}
|
||||
|
||||
// GetSignatureKeys TODO: Get configured keys and emit warning when a key is specified that is not configured
|
||||
func (opts *ProjectOpts) GetSignatureKeys() []v1alpha1.SignatureKey {
|
||||
signatureKeys := make([]v1alpha1.SignatureKey, 0)
|
||||
@@ -166,6 +186,8 @@ func SetProjSpecOptions(flags *pflag.FlagSet, spec *v1alpha1.AppProjectSpec, pro
|
||||
spec.NamespaceResourceBlacklist = projOpts.GetDeniedNamespacedResources()
|
||||
case "source-namespaces":
|
||||
spec.SourceNamespaces = projOpts.GetSourceNamespaces()
|
||||
case "dest-service-accounts":
|
||||
spec.DestinationServiceAccounts = projOpts.GetDestinationServiceAccounts()
|
||||
}
|
||||
})
|
||||
if flags.Changed("orphaned-resources") || flags.Changed("orphaned-resources-warn") {
|
||||
|
||||
@@ -5,6 +5,8 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
)
|
||||
|
||||
func TestProjectOpts_ResourceLists(t *testing.T) {
|
||||
@@ -22,3 +24,27 @@ func TestProjectOpts_ResourceLists(t *testing.T) {
|
||||
[]v1.GroupKind{{Group: "rbac.authorization.k8s.io", Kind: "ClusterRole"}}, opts.GetDeniedClusterResources(),
|
||||
)
|
||||
}
|
||||
|
||||
func TestProjectOpts_GetDestinationServiceAccounts(t *testing.T) {
|
||||
opts := ProjectOpts{
|
||||
destinationServiceAccounts: []string{
|
||||
"https://192.168.99.100:8443,test-ns,test-sa",
|
||||
"https://kubernetes.default.svc.local:6443,guestbook,guestbook-sa",
|
||||
},
|
||||
}
|
||||
|
||||
assert.ElementsMatch(t,
|
||||
[]v1alpha1.ApplicationDestinationServiceAccount{
|
||||
{
|
||||
Server: "https://192.168.99.100:8443",
|
||||
Namespace: "test-ns",
|
||||
DefaultServiceAccount: "test-sa",
|
||||
},
|
||||
{
|
||||
Server: "https://kubernetes.default.svc.local:6443",
|
||||
Namespace: "guestbook",
|
||||
DefaultServiceAccount: "guestbook-sa",
|
||||
},
|
||||
}, opts.GetDestinationServiceAccounts(),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -22,6 +22,7 @@ type RepoOptions struct {
|
||||
GithubAppPrivateKeyPath string
|
||||
GitHubAppEnterpriseBaseURL string
|
||||
Proxy string
|
||||
NoProxy string
|
||||
GCPServiceAccountKeyPath string
|
||||
ForceHttpBasicAuth bool
|
||||
}
|
||||
@@ -44,6 +45,7 @@ func AddRepoFlags(command *cobra.Command, opts *RepoOptions) {
|
||||
command.Flags().StringVar(&opts.GithubAppPrivateKeyPath, "github-app-private-key-path", "", "private key of the GitHub Application")
|
||||
command.Flags().StringVar(&opts.GitHubAppEnterpriseBaseURL, "github-app-enterprise-base-url", "", "base url to use when using GitHub Enterprise (e.g. https://ghe.example.com/api/v3")
|
||||
command.Flags().StringVar(&opts.Proxy, "proxy", "", "use proxy to access repository")
|
||||
command.Flags().StringVar(&opts.Proxy, "no-proxy", "", "don't access these targets via proxy")
|
||||
command.Flags().StringVar(&opts.GCPServiceAccountKeyPath, "gcp-service-account-key-path", "", "service account key for the Google Cloud Platform")
|
||||
command.Flags().BoolVar(&opts.ForceHttpBasicAuth, "force-http-basic-auth", false, "whether to force use of basic auth when connecting repository via HTTP")
|
||||
}
|
||||
|
||||
@@ -46,6 +46,7 @@ const (
|
||||
ArgoCDGPGKeysConfigMapName = "argocd-gpg-keys-cm"
|
||||
// ArgoCDAppControllerShardConfigMapName contains the application controller to shard mapping
|
||||
ArgoCDAppControllerShardConfigMapName = "argocd-app-controller-shard-cm"
|
||||
ArgoCDCmdParamsConfigMapName = "argocd-cmd-params-cm"
|
||||
)
|
||||
|
||||
// Some default configurables
|
||||
@@ -177,6 +178,7 @@ const (
|
||||
|
||||
// AnnotationKeyAppInstance is the Argo CD application name is used as the instance name
|
||||
AnnotationKeyAppInstance = "argocd.argoproj.io/tracking-id"
|
||||
AnnotationInstallationID = "argocd.argoproj.io/installation-id"
|
||||
|
||||
// AnnotationCompareOptions is a comma-separated list of options for comparison
|
||||
AnnotationCompareOptions = "argocd.argoproj.io/compare-options"
|
||||
@@ -221,7 +223,7 @@ const (
|
||||
EnvGitRetryMaxDuration = "ARGOCD_GIT_RETRY_MAX_DURATION"
|
||||
// EnvGitRetryDuration specifies duration of git remote operation retry
|
||||
EnvGitRetryDuration = "ARGOCD_GIT_RETRY_DURATION"
|
||||
// EnvGitRetryFactor specifies fator of git remote operation retry
|
||||
// EnvGitRetryFactor specifies factor of git remote operation retry
|
||||
EnvGitRetryFactor = "ARGOCD_GIT_RETRY_FACTOR"
|
||||
// EnvGitSubmoduleEnabled overrides git submodule support, true by default
|
||||
EnvGitSubmoduleEnabled = "ARGOCD_GIT_MODULES_ENABLED"
|
||||
|
||||
@@ -116,11 +116,11 @@ type ApplicationController struct {
|
||||
applicationClientset appclientset.Interface
|
||||
auditLogger *argo.AuditLogger
|
||||
// queue contains app namespace/name
|
||||
appRefreshQueue workqueue.RateLimitingInterface
|
||||
appRefreshQueue workqueue.TypedRateLimitingInterface[string]
|
||||
// queue contains app namespace/name/comparisonType and used to request app refresh with the predefined comparison type
|
||||
appComparisonTypeRefreshQueue workqueue.RateLimitingInterface
|
||||
appOperationQueue workqueue.RateLimitingInterface
|
||||
projectRefreshQueue workqueue.RateLimitingInterface
|
||||
appComparisonTypeRefreshQueue workqueue.TypedRateLimitingInterface[string]
|
||||
appOperationQueue workqueue.TypedRateLimitingInterface[string]
|
||||
projectRefreshQueue workqueue.TypedRateLimitingInterface[string]
|
||||
appInformer cache.SharedIndexInformer
|
||||
appLister applisters.ApplicationLister
|
||||
projInformer cache.SharedIndexInformer
|
||||
@@ -130,6 +130,7 @@ type ApplicationController struct {
|
||||
statusHardRefreshTimeout time.Duration
|
||||
statusRefreshJitter time.Duration
|
||||
selfHealTimeout time.Duration
|
||||
selfHealBackOff *wait.Backoff
|
||||
repoClientset apiclient.Clientset
|
||||
db db.ArgoDB
|
||||
settingsMgr *settings_util.SettingsManager
|
||||
@@ -160,10 +161,12 @@ func NewApplicationController(
|
||||
appHardResyncPeriod time.Duration,
|
||||
appResyncJitter time.Duration,
|
||||
selfHealTimeout time.Duration,
|
||||
selfHealBackoff *wait.Backoff,
|
||||
repoErrorGracePeriod time.Duration,
|
||||
metricsPort int,
|
||||
metricsCacheExpiration time.Duration,
|
||||
metricsApplicationLabels []string,
|
||||
metricsApplicationConditions []string,
|
||||
kubectlParallelismLimit int64,
|
||||
persistResourceHealth bool,
|
||||
clusterSharding sharding.ClusterShardingCache,
|
||||
@@ -172,6 +175,7 @@ func NewApplicationController(
|
||||
serverSideDiff bool,
|
||||
dynamicClusterDistributionEnabled bool,
|
||||
ignoreNormalizerOpts normalizers.IgnoreNormalizerOpts,
|
||||
enableK8sEvent []string,
|
||||
) (*ApplicationController, error) {
|
||||
log.Infof("appResyncPeriod=%v, appHardResyncPeriod=%v, appResyncJitter=%v", appResyncPeriod, appHardResyncPeriod, appResyncJitter)
|
||||
db := db.NewDB(namespace, settingsMgr, kubeClientset)
|
||||
@@ -186,19 +190,20 @@ func NewApplicationController(
|
||||
kubectl: kubectl,
|
||||
applicationClientset: applicationClientset,
|
||||
repoClientset: repoClientset,
|
||||
appRefreshQueue: workqueue.NewRateLimitingQueueWithConfig(ratelimiter.NewCustomAppControllerRateLimiter(rateLimiterConfig), workqueue.RateLimitingQueueConfig{Name: "app_reconciliation_queue"}),
|
||||
appOperationQueue: workqueue.NewRateLimitingQueueWithConfig(ratelimiter.NewCustomAppControllerRateLimiter(rateLimiterConfig), workqueue.RateLimitingQueueConfig{Name: "app_operation_processing_queue"}),
|
||||
projectRefreshQueue: workqueue.NewRateLimitingQueueWithConfig(ratelimiter.NewCustomAppControllerRateLimiter(rateLimiterConfig), workqueue.RateLimitingQueueConfig{Name: "project_reconciliation_queue"}),
|
||||
appComparisonTypeRefreshQueue: workqueue.NewRateLimitingQueue(ratelimiter.NewCustomAppControllerRateLimiter(rateLimiterConfig)),
|
||||
appRefreshQueue: workqueue.NewTypedRateLimitingQueueWithConfig(ratelimiter.NewCustomAppControllerRateLimiter(rateLimiterConfig), workqueue.TypedRateLimitingQueueConfig[string]{Name: "app_reconciliation_queue"}),
|
||||
appOperationQueue: workqueue.NewTypedRateLimitingQueueWithConfig(ratelimiter.NewCustomAppControllerRateLimiter(rateLimiterConfig), workqueue.TypedRateLimitingQueueConfig[string]{Name: "app_operation_processing_queue"}),
|
||||
projectRefreshQueue: workqueue.NewTypedRateLimitingQueueWithConfig(ratelimiter.NewCustomAppControllerRateLimiter(rateLimiterConfig), workqueue.TypedRateLimitingQueueConfig[string]{Name: "project_reconciliation_queue"}),
|
||||
appComparisonTypeRefreshQueue: workqueue.NewTypedRateLimitingQueue(ratelimiter.NewCustomAppControllerRateLimiter(rateLimiterConfig)),
|
||||
db: db,
|
||||
statusRefreshTimeout: appResyncPeriod,
|
||||
statusHardRefreshTimeout: appHardResyncPeriod,
|
||||
statusRefreshJitter: appResyncJitter,
|
||||
refreshRequestedApps: make(map[string]CompareWith),
|
||||
refreshRequestedAppsMutex: &sync.Mutex{},
|
||||
auditLogger: argo.NewAuditLogger(namespace, kubeClientset, common.ApplicationController),
|
||||
auditLogger: argo.NewAuditLogger(namespace, kubeClientset, common.ApplicationController, enableK8sEvent),
|
||||
settingsMgr: settingsMgr,
|
||||
selfHealTimeout: selfHealTimeout,
|
||||
selfHealBackOff: selfHealBackoff,
|
||||
clusterSharding: clusterSharding,
|
||||
projByNameCache: sync.Map{},
|
||||
applicationNamespaces: applicationNamespaces,
|
||||
@@ -279,7 +284,7 @@ func NewApplicationController(
|
||||
|
||||
metricsAddr := fmt.Sprintf("0.0.0.0:%d", metricsPort)
|
||||
|
||||
ctrl.metricsServer, err = metrics.NewMetricsServer(metricsAddr, appLister, ctrl.canProcessApp, readinessHealthCheck, metricsApplicationLabels)
|
||||
ctrl.metricsServer, err = metrics.NewMetricsServer(metricsAddr, appLister, ctrl.canProcessApp, readinessHealthCheck, metricsApplicationLabels, metricsApplicationConditions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -901,7 +906,7 @@ func (ctrl *ApplicationController) requestAppRefresh(appName string, compareWith
|
||||
key := ctrl.toAppKey(appName)
|
||||
|
||||
if compareWith != nil && after != nil {
|
||||
ctrl.appComparisonTypeRefreshQueue.AddAfter(fmt.Sprintf("%s/%d", key, compareWith), *after)
|
||||
ctrl.appComparisonTypeRefreshQueue.AddAfter(fmt.Sprintf("%s/%d", key, *compareWith), *after)
|
||||
} else {
|
||||
if compareWith != nil {
|
||||
ctrl.refreshRequestedAppsMutex.Lock()
|
||||
@@ -940,7 +945,7 @@ func (ctrl *ApplicationController) processAppOperationQueueItem() (processNext b
|
||||
ctrl.appOperationQueue.Done(appKey)
|
||||
}()
|
||||
|
||||
obj, exists, err := ctrl.appInformer.GetIndexer().GetByKey(appKey.(string))
|
||||
obj, exists, err := ctrl.appInformer.GetIndexer().GetByKey(appKey)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to get application '%s' from informer index: %+v", appKey, err)
|
||||
return
|
||||
@@ -1012,8 +1017,8 @@ func (ctrl *ApplicationController) processAppComparisonTypeQueueItem() (processN
|
||||
return
|
||||
}
|
||||
|
||||
if parts := strings.Split(key.(string), "/"); len(parts) != 3 {
|
||||
log.Warnf("Unexpected key format in appComparisonTypeRefreshTypeQueue. Key should consists of namespace/name/comparisonType but got: %s", key.(string))
|
||||
if parts := strings.Split(key, "/"); len(parts) != 3 {
|
||||
log.Warnf("Unexpected key format in appComparisonTypeRefreshTypeQueue. Key should consists of namespace/name/comparisonType but got: %s", key)
|
||||
} else {
|
||||
if compareWith, err := strconv.Atoi(parts[2]); err != nil {
|
||||
log.Warnf("Unable to parse comparison type: %v", err)
|
||||
@@ -1039,7 +1044,7 @@ func (ctrl *ApplicationController) processProjectQueueItem() (processNext bool)
|
||||
processNext = false
|
||||
return
|
||||
}
|
||||
obj, exists, err := ctrl.projInformer.GetIndexer().GetByKey(key.(string))
|
||||
obj, exists, err := ctrl.projInformer.GetIndexer().GetByKey(key)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to get project '%s' from informer index: %+v", key, err)
|
||||
return
|
||||
@@ -1551,7 +1556,7 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo
|
||||
ctrl.appOperationQueue.AddRateLimited(appKey)
|
||||
ctrl.appRefreshQueue.Done(appKey)
|
||||
}()
|
||||
obj, exists, err := ctrl.appInformer.GetIndexer().GetByKey(appKey.(string))
|
||||
obj, exists, err := ctrl.appInformer.GetIndexer().GetByKey(appKey)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to get application '%s' from informer index: %+v", appKey, err)
|
||||
return
|
||||
@@ -1689,8 +1694,9 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo
|
||||
app.Status.Summary = tree.GetSummary(app)
|
||||
}
|
||||
|
||||
if project.Spec.SyncWindows.Matches(app).CanSync(false) {
|
||||
syncErrCond, opMS := ctrl.autoSync(app, compareResult.syncStatus, compareResult.resources)
|
||||
canSync, _ := project.Spec.SyncWindows.Matches(app).CanSync(false)
|
||||
if canSync {
|
||||
syncErrCond, opMS := ctrl.autoSync(app, compareResult.syncStatus, compareResult.resources, compareResult.revisionUpdated)
|
||||
setOpMs = opMS
|
||||
if syncErrCond != nil {
|
||||
app.Status.SetConditions(
|
||||
@@ -1913,7 +1919,7 @@ func (ctrl *ApplicationController) persistAppStatus(orig *appv1.Application, new
|
||||
}
|
||||
|
||||
// autoSync will initiate a sync operation for an application configured with automated sync
|
||||
func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus *appv1.SyncStatus, resources []appv1.ResourceStatus) (*appv1.ApplicationCondition, time.Duration) {
|
||||
func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus *appv1.SyncStatus, resources []appv1.ResourceStatus, revisionUpdated bool) (*appv1.ApplicationCondition, time.Duration) {
|
||||
logCtx := getAppLog(app)
|
||||
ts := stats.NewTimingStats()
|
||||
defer func() {
|
||||
@@ -1957,11 +1963,18 @@ func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus *
|
||||
}
|
||||
}
|
||||
|
||||
selfHeal := app.Spec.SyncPolicy.Automated.SelfHeal
|
||||
// Multi-Source Apps with selfHeal disabled should not trigger an autosync if
|
||||
// the last sync revision and the new sync revision is the same.
|
||||
if app.Spec.HasMultipleSources() && !selfHeal && reflect.DeepEqual(app.Status.Sync.Revisions, syncStatus.Revisions) {
|
||||
logCtx.Infof("Skipping auto-sync: selfHeal disabled and sync caused by object update")
|
||||
return nil, 0
|
||||
}
|
||||
|
||||
desiredCommitSHA := syncStatus.Revision
|
||||
desiredCommitSHAsMS := syncStatus.Revisions
|
||||
alreadyAttempted, attemptPhase := alreadyAttemptedSync(app, desiredCommitSHA, desiredCommitSHAsMS, app.Spec.HasMultipleSources())
|
||||
alreadyAttempted, attemptPhase := alreadyAttemptedSync(app, desiredCommitSHA, desiredCommitSHAsMS, app.Spec.HasMultipleSources(), revisionUpdated)
|
||||
ts.AddCheckpoint("already_attempted_sync_ms")
|
||||
selfHeal := app.Spec.SyncPolicy.Automated.SelfHeal
|
||||
op := appv1.Operation{
|
||||
Sync: &appv1.SyncOperation{
|
||||
Revision: desiredCommitSHA,
|
||||
@@ -1972,6 +1985,9 @@ func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus *
|
||||
InitiatedBy: appv1.OperationInitiator{Automated: true},
|
||||
Retry: appv1.RetryStrategy{Limit: 5},
|
||||
}
|
||||
if app.Status.OperationState != nil && app.Status.OperationState.Operation.Sync != nil {
|
||||
op.Sync.SelfHealAttemptsCount = app.Status.OperationState.Operation.Sync.SelfHealAttemptsCount
|
||||
}
|
||||
if app.Spec.SyncPolicy.Retry != nil {
|
||||
op.Retry = *app.Spec.SyncPolicy.Retry
|
||||
}
|
||||
@@ -1989,6 +2005,7 @@ func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus *
|
||||
return nil, 0
|
||||
} else if alreadyAttempted && selfHeal {
|
||||
if shouldSelfHeal, retryAfter := ctrl.shouldSelfHeal(app); shouldSelfHeal {
|
||||
op.Sync.SelfHealAttemptsCount++
|
||||
for _, resource := range resources {
|
||||
if resource.Status != appv1.SyncStatusCodeSynced {
|
||||
op.Sync.Resources = append(op.Sync.Resources, appv1.SyncOperationResource{
|
||||
@@ -2015,7 +2032,7 @@ func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus *
|
||||
}
|
||||
if bAllNeedPrune {
|
||||
message := fmt.Sprintf("Skipping sync attempt to %s: auto-sync will wipe out all resources", desiredCommitSHA)
|
||||
logCtx.Warnf(message)
|
||||
logCtx.Warn(message)
|
||||
return &appv1.ApplicationCondition{Type: appv1.ApplicationConditionSyncError, Message: message}, 0
|
||||
}
|
||||
}
|
||||
@@ -2055,17 +2072,26 @@ func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus *
|
||||
|
||||
// alreadyAttemptedSync returns whether the most recent sync was performed against the
|
||||
// commitSHA and with the same app source config which are currently set in the app
|
||||
func alreadyAttemptedSync(app *appv1.Application, commitSHA string, commitSHAsMS []string, hasMultipleSources bool) (bool, synccommon.OperationPhase) {
|
||||
func alreadyAttemptedSync(app *appv1.Application, commitSHA string, commitSHAsMS []string, hasMultipleSources bool, revisionUpdated bool) (bool, synccommon.OperationPhase) {
|
||||
if app.Status.OperationState == nil || app.Status.OperationState.Operation.Sync == nil || app.Status.OperationState.SyncResult == nil {
|
||||
return false, ""
|
||||
}
|
||||
if hasMultipleSources {
|
||||
if !reflect.DeepEqual(app.Status.OperationState.SyncResult.Revisions, commitSHAsMS) {
|
||||
return false, ""
|
||||
if revisionUpdated {
|
||||
if !reflect.DeepEqual(app.Status.OperationState.SyncResult.Revisions, commitSHAsMS) {
|
||||
return false, ""
|
||||
}
|
||||
} else {
|
||||
log.WithField("application", app.Name).Debugf("Skipping auto-sync: commitSHA %s has no changes", commitSHA)
|
||||
}
|
||||
} else {
|
||||
if app.Status.OperationState.SyncResult.Revision != commitSHA {
|
||||
return false, ""
|
||||
if revisionUpdated {
|
||||
log.WithField("application", app.Name).Infof("Executing compare of syncResult.Revision and commitSha because manifest changed: %v", commitSHA)
|
||||
if app.Status.OperationState.SyncResult.Revision != commitSHA {
|
||||
return false, ""
|
||||
}
|
||||
} else {
|
||||
log.WithField("application", app.Name).Debugf("Skipping auto-sync: commitSHA %s has no changes", commitSHA)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2098,10 +2124,24 @@ func (ctrl *ApplicationController) shouldSelfHeal(app *appv1.Application) (bool,
|
||||
}
|
||||
|
||||
var retryAfter time.Duration
|
||||
if app.Status.OperationState.FinishedAt == nil {
|
||||
retryAfter = ctrl.selfHealTimeout
|
||||
if ctrl.selfHealBackOff == nil {
|
||||
if app.Status.OperationState.FinishedAt == nil {
|
||||
retryAfter = ctrl.selfHealTimeout
|
||||
} else {
|
||||
retryAfter = ctrl.selfHealTimeout - time.Since(app.Status.OperationState.FinishedAt.Time)
|
||||
}
|
||||
} else {
|
||||
retryAfter = ctrl.selfHealTimeout - time.Since(app.Status.OperationState.FinishedAt.Time)
|
||||
backOff := *ctrl.selfHealBackOff
|
||||
backOff.Steps = int(app.Status.OperationState.Operation.Sync.SelfHealAttemptsCount)
|
||||
var delay time.Duration
|
||||
for backOff.Steps > 0 {
|
||||
delay = backOff.Step()
|
||||
}
|
||||
if app.Status.OperationState.FinishedAt == nil {
|
||||
retryAfter = delay
|
||||
} else {
|
||||
retryAfter = delay - time.Since(app.Status.OperationState.FinishedAt.Time)
|
||||
}
|
||||
}
|
||||
return retryAfter <= 0, retryAfter
|
||||
}
|
||||
@@ -2109,7 +2149,7 @@ func (ctrl *ApplicationController) shouldSelfHeal(app *appv1.Application) (bool,
|
||||
// isAppNamespaceAllowed returns whether the application is allowed in the
|
||||
// namespace it's residing in.
|
||||
func (ctrl *ApplicationController) isAppNamespaceAllowed(app *appv1.Application) bool {
|
||||
return app.Namespace == ctrl.namespace || glob.MatchStringInList(ctrl.applicationNamespaces, app.Namespace, false)
|
||||
return app.Namespace == ctrl.namespace || glob.MatchStringInList(ctrl.applicationNamespaces, app.Namespace, glob.REGEXP)
|
||||
}
|
||||
|
||||
func (ctrl *ApplicationController) canProcessApp(obj interface{}) bool {
|
||||
|
||||
@@ -4,16 +4,18 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
clustercache "github.com/argoproj/gitops-engine/pkg/cache"
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube/kubetest"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/require"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/rest"
|
||||
|
||||
clustercache "github.com/argoproj/gitops-engine/pkg/cache"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/common"
|
||||
statecache "github.com/argoproj/argo-cd/v2/controller/cache"
|
||||
@@ -43,12 +45,15 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/reposerver/apiclient"
|
||||
mockrepoclient "github.com/argoproj/argo-cd/v2/reposerver/apiclient/mocks"
|
||||
"github.com/argoproj/argo-cd/v2/test"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo/normalizers"
|
||||
cacheutil "github.com/argoproj/argo-cd/v2/util/cache"
|
||||
appstatecache "github.com/argoproj/argo-cd/v2/util/cache/appstate"
|
||||
"github.com/argoproj/argo-cd/v2/util/settings"
|
||||
)
|
||||
|
||||
var testEnableEventList []string = argo.DefaultEnableEventList()
|
||||
|
||||
type namespacedResource struct {
|
||||
v1alpha1.ResourceNode
|
||||
AppName string
|
||||
@@ -64,6 +69,7 @@ type fakeData struct {
|
||||
metricsCacheExpiration time.Duration
|
||||
applicationNamespaces []string
|
||||
updateRevisionForPathsResponse *apiclient.UpdateRevisionForPathsResponse
|
||||
additionalObjs []runtime.Object
|
||||
}
|
||||
|
||||
type MockKubectl struct {
|
||||
@@ -133,7 +139,9 @@ func newFakeController(data *fakeData, repoErr error) *ApplicationController {
|
||||
},
|
||||
Data: data.configMapData,
|
||||
}
|
||||
kubeClient := fake.NewSimpleClientset(&clust, &cm, &secret)
|
||||
runtimeObjs := []runtime.Object{&clust, &secret, &cm}
|
||||
runtimeObjs = append(runtimeObjs, data.additionalObjs...)
|
||||
kubeClient := fake.NewSimpleClientset(runtimeObjs...)
|
||||
settingsMgr := settings.NewSettingsManager(context.Background(), kubeClient, test.FakeArgoCDNamespace)
|
||||
kubectl := &MockKubectl{Kubectl: &kubetest.MockKubectlCmd{}}
|
||||
ctrl, err := NewApplicationController(
|
||||
@@ -151,10 +159,12 @@ func newFakeController(data *fakeData, repoErr error) *ApplicationController {
|
||||
time.Hour,
|
||||
time.Second,
|
||||
time.Minute,
|
||||
nil,
|
||||
time.Second*10,
|
||||
common.DefaultPortArgoCDMetrics,
|
||||
data.metricsCacheExpiration,
|
||||
[]string{},
|
||||
[]string{},
|
||||
0,
|
||||
true,
|
||||
nil,
|
||||
@@ -163,6 +173,7 @@ func newFakeController(data *fakeData, repoErr error) *ApplicationController {
|
||||
false,
|
||||
false,
|
||||
normalizers.IgnoreNormalizerOpts{},
|
||||
testEnableEventList,
|
||||
)
|
||||
db := &dbmocks.ArgoDB{}
|
||||
db.On("GetApplicationControllerReplicas").Return(1)
|
||||
@@ -554,7 +565,7 @@ func TestAutoSync(t *testing.T) {
|
||||
Status: v1alpha1.SyncStatusCodeOutOfSync,
|
||||
Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
|
||||
}
|
||||
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{{Name: "guestbook", Kind: kube.DeploymentKind, Status: v1alpha1.SyncStatusCodeOutOfSync}})
|
||||
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{{Name: "guestbook", Kind: kube.DeploymentKind, Status: v1alpha1.SyncStatusCodeOutOfSync}}, true)
|
||||
assert.Nil(t, cond)
|
||||
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get(context.Background(), "my-app", metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
@@ -563,6 +574,42 @@ func TestAutoSync(t *testing.T) {
|
||||
assert.False(t, app.Operation.Sync.Prune)
|
||||
}
|
||||
|
||||
func TestMultiSourceSelfHeal(t *testing.T) {
|
||||
// Simulate OutOfSync caused by object change in cluster
|
||||
// So our Sync Revisions and SyncStatus Revisions should deep equal
|
||||
t.Run("ClusterObjectChangeShouldNotTriggerAutoSync", func(t *testing.T) {
|
||||
app := newFakeMultiSourceApp()
|
||||
app.Spec.SyncPolicy.Automated.SelfHeal = false
|
||||
app.Status.Sync.Revisions = []string{"z", "x", "v"}
|
||||
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}, nil)
|
||||
syncStatus := v1alpha1.SyncStatus{
|
||||
Status: v1alpha1.SyncStatusCodeOutOfSync,
|
||||
Revisions: []string{"z", "x", "v"},
|
||||
}
|
||||
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{{Name: "guestbook-1", Kind: kube.DeploymentKind, Status: v1alpha1.SyncStatusCodeOutOfSync}}, true)
|
||||
assert.Nil(t, cond)
|
||||
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get(context.Background(), "my-app", metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, app.Operation)
|
||||
})
|
||||
|
||||
t.Run("NewRevisionChangeShouldTriggerAutoSync", func(t *testing.T) {
|
||||
app := newFakeMultiSourceApp()
|
||||
app.Spec.SyncPolicy.Automated.SelfHeal = false
|
||||
app.Status.Sync.Revisions = []string{"a", "b", "c"}
|
||||
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}, nil)
|
||||
syncStatus := v1alpha1.SyncStatus{
|
||||
Status: v1alpha1.SyncStatusCodeOutOfSync,
|
||||
Revisions: []string{"z", "x", "v"},
|
||||
}
|
||||
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{{Name: "guestbook-1", Kind: kube.DeploymentKind, Status: v1alpha1.SyncStatusCodeOutOfSync}}, true)
|
||||
assert.Nil(t, cond)
|
||||
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get(context.Background(), "my-app", metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, app.Operation)
|
||||
})
|
||||
}
|
||||
|
||||
func TestAutoSyncNotAllowEmpty(t *testing.T) {
|
||||
app := newFakeApp()
|
||||
app.Spec.SyncPolicy.Automated.Prune = true
|
||||
@@ -571,7 +618,7 @@ func TestAutoSyncNotAllowEmpty(t *testing.T) {
|
||||
Status: v1alpha1.SyncStatusCodeOutOfSync,
|
||||
Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
|
||||
}
|
||||
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{})
|
||||
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{}, true)
|
||||
assert.NotNil(t, cond)
|
||||
}
|
||||
|
||||
@@ -584,7 +631,7 @@ func TestAutoSyncAllowEmpty(t *testing.T) {
|
||||
Status: v1alpha1.SyncStatusCodeOutOfSync,
|
||||
Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
|
||||
}
|
||||
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{})
|
||||
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{}, true)
|
||||
assert.Nil(t, cond)
|
||||
}
|
||||
|
||||
@@ -598,7 +645,7 @@ func TestSkipAutoSync(t *testing.T) {
|
||||
Status: v1alpha1.SyncStatusCodeOutOfSync,
|
||||
Revision: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
|
||||
}
|
||||
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{})
|
||||
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{}, true)
|
||||
assert.Nil(t, cond)
|
||||
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get(context.Background(), "my-app", metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
@@ -613,7 +660,7 @@ func TestSkipAutoSync(t *testing.T) {
|
||||
Status: v1alpha1.SyncStatusCodeSynced,
|
||||
Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
|
||||
}
|
||||
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{})
|
||||
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{}, true)
|
||||
assert.Nil(t, cond)
|
||||
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get(context.Background(), "my-app", metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
@@ -629,7 +676,7 @@ func TestSkipAutoSync(t *testing.T) {
|
||||
Status: v1alpha1.SyncStatusCodeOutOfSync,
|
||||
Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
|
||||
}
|
||||
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{})
|
||||
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{}, true)
|
||||
assert.Nil(t, cond)
|
||||
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get(context.Background(), "my-app", metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
@@ -646,7 +693,7 @@ func TestSkipAutoSync(t *testing.T) {
|
||||
Status: v1alpha1.SyncStatusCodeOutOfSync,
|
||||
Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
|
||||
}
|
||||
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{})
|
||||
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{}, true)
|
||||
assert.Nil(t, cond)
|
||||
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get(context.Background(), "my-app", metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
@@ -672,7 +719,7 @@ func TestSkipAutoSync(t *testing.T) {
|
||||
Status: v1alpha1.SyncStatusCodeOutOfSync,
|
||||
Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
|
||||
}
|
||||
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{{Name: "guestbook", Kind: kube.DeploymentKind, Status: v1alpha1.SyncStatusCodeOutOfSync}})
|
||||
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{{Name: "guestbook", Kind: kube.DeploymentKind, Status: v1alpha1.SyncStatusCodeOutOfSync}}, true)
|
||||
assert.NotNil(t, cond)
|
||||
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get(context.Background(), "my-app", metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
@@ -688,7 +735,7 @@ func TestSkipAutoSync(t *testing.T) {
|
||||
}
|
||||
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{
|
||||
{Name: "guestbook", Kind: kube.DeploymentKind, Status: v1alpha1.SyncStatusCodeOutOfSync, RequiresPruning: true},
|
||||
})
|
||||
}, true)
|
||||
assert.Nil(t, cond)
|
||||
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get(context.Background(), "my-app", metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
@@ -724,7 +771,7 @@ func TestAutoSyncIndicateError(t *testing.T) {
|
||||
Source: *app.Spec.Source.DeepCopy(),
|
||||
},
|
||||
}
|
||||
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{{Name: "guestbook", Kind: kube.DeploymentKind, Status: v1alpha1.SyncStatusCodeOutOfSync}})
|
||||
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{{Name: "guestbook", Kind: kube.DeploymentKind, Status: v1alpha1.SyncStatusCodeOutOfSync}}, true)
|
||||
assert.NotNil(t, cond)
|
||||
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get(context.Background(), "my-app", metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
@@ -767,7 +814,7 @@ func TestAutoSyncParameterOverrides(t *testing.T) {
|
||||
Revision: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
|
||||
},
|
||||
}
|
||||
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{{Name: "guestbook", Kind: kube.DeploymentKind, Status: v1alpha1.SyncStatusCodeOutOfSync}})
|
||||
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{{Name: "guestbook", Kind: kube.DeploymentKind, Status: v1alpha1.SyncStatusCodeOutOfSync}}, true)
|
||||
assert.Nil(t, cond)
|
||||
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get(context.Background(), "my-app", metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
@@ -1338,6 +1385,25 @@ func TestNeedRefreshAppStatus(t *testing.T) {
|
||||
assert.Equal(t, CompareWithRecent, compareWith)
|
||||
})
|
||||
|
||||
t.Run("requesting refresh with delay gives correct compression level", func(t *testing.T) {
|
||||
needRefresh, _, _ := ctrl.needRefreshAppStatus(app, 1*time.Hour, 2*time.Hour)
|
||||
assert.False(t, needRefresh)
|
||||
|
||||
// use a one-off controller so other tests don't have a manual refresh request
|
||||
ctrl := newFakeController(&fakeData{apps: []runtime.Object{}}, nil)
|
||||
|
||||
// refresh app with a non-nil delay
|
||||
// use zero-second delay to test the add later logic without waiting in the test
|
||||
delay := time.Duration(0)
|
||||
ctrl.requestAppRefresh(app.Name, CompareWithRecent.Pointer(), &delay)
|
||||
|
||||
ctrl.processAppComparisonTypeQueueItem()
|
||||
needRefresh, refreshType, compareWith := ctrl.needRefreshAppStatus(app, 1*time.Hour, 2*time.Hour)
|
||||
assert.True(t, needRefresh)
|
||||
assert.Equal(t, v1alpha1.RefreshTypeNormal, refreshType)
|
||||
assert.Equal(t, CompareWithRecent, compareWith)
|
||||
})
|
||||
|
||||
t.Run("refresh application which status is not reconciled using latest commit", func(t *testing.T) {
|
||||
app := app.DeepCopy()
|
||||
needRefresh, _, _ := ctrl.needRefreshAppStatus(app, 1*time.Hour, 2*time.Hour)
|
||||
@@ -2102,7 +2168,7 @@ func TestHelmValuesObjectHasReplaceStrategy(t *testing.T) {
|
||||
app,
|
||||
appModified)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, `{"status":{"sync":{"comparedTo":{"source":{"helm":{"valuesObject":{"key":["value-modified1"]}}}}}}}`, string(patch))
|
||||
assert.JSONEq(t, `{"status":{"sync":{"comparedTo":{"source":{"helm":{"valuesObject":{"key":["value-modified1"]}}}}}}}`, string(patch))
|
||||
}
|
||||
|
||||
func TestAppStatusIsReplaced(t *testing.T) {
|
||||
@@ -2134,3 +2200,79 @@ func TestAppStatusIsReplaced(t *testing.T) {
|
||||
require.True(t, has)
|
||||
require.Nil(t, val)
|
||||
}
|
||||
|
||||
func TestAlreadyAttemptSync(t *testing.T) {
|
||||
app := newFakeApp()
|
||||
t.Run("same manifest with sync result", func(t *testing.T) {
|
||||
attempted, _ := alreadyAttemptedSync(app, "sha", []string{}, false, false)
|
||||
assert.True(t, attempted)
|
||||
})
|
||||
|
||||
t.Run("different manifest with sync result", func(t *testing.T) {
|
||||
attempted, _ := alreadyAttemptedSync(app, "sha", []string{}, false, true)
|
||||
assert.False(t, attempted)
|
||||
})
|
||||
}
|
||||
|
||||
func assertDurationAround(t *testing.T, expected time.Duration, actual time.Duration) {
|
||||
delta := time.Second / 2
|
||||
assert.GreaterOrEqual(t, expected, actual-delta)
|
||||
assert.LessOrEqual(t, expected, actual+delta)
|
||||
}
|
||||
|
||||
func TestSelfHealExponentialBackoff(t *testing.T) {
|
||||
ctrl := newFakeController(&fakeData{}, nil)
|
||||
ctrl.selfHealBackOff = &wait.Backoff{
|
||||
Factor: 3,
|
||||
Duration: 2 * time.Second,
|
||||
Cap: 5 * time.Minute,
|
||||
}
|
||||
|
||||
app := &v1alpha1.Application{
|
||||
Status: v1alpha1.ApplicationStatus{
|
||||
OperationState: &v1alpha1.OperationState{
|
||||
Operation: v1alpha1.Operation{
|
||||
Sync: &v1alpha1.SyncOperation{},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
attempts int64
|
||||
finishedAt *metav1.Time
|
||||
expectedDuration time.Duration
|
||||
shouldSelfHeal bool
|
||||
}{{
|
||||
attempts: 0,
|
||||
finishedAt: ptr.To(metav1.Now()),
|
||||
expectedDuration: 0,
|
||||
shouldSelfHeal: true,
|
||||
}, {
|
||||
attempts: 1,
|
||||
finishedAt: ptr.To(metav1.Now()),
|
||||
expectedDuration: 2 * time.Second,
|
||||
shouldSelfHeal: false,
|
||||
}, {
|
||||
attempts: 2,
|
||||
finishedAt: ptr.To(metav1.Now()),
|
||||
expectedDuration: 6 * time.Second,
|
||||
shouldSelfHeal: false,
|
||||
}, {
|
||||
attempts: 3,
|
||||
finishedAt: nil,
|
||||
expectedDuration: 18 * time.Second,
|
||||
shouldSelfHeal: false,
|
||||
}}
|
||||
|
||||
for i := range testCases {
|
||||
tc := testCases[i]
|
||||
t.Run(fmt.Sprintf("test case %d", i), func(t *testing.T) {
|
||||
app.Status.OperationState.Operation.Sync.SelfHealAttemptsCount = tc.attempts
|
||||
app.Status.OperationState.FinishedAt = tc.finishedAt
|
||||
ok, duration := ctrl.shouldSelfHeal(app)
|
||||
require.Equal(t, ok, tc.shouldSelfHeal)
|
||||
assertDurationAround(t, tc.expectedDuration, duration)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
9
controller/cache/cache.go
vendored
9
controller/cache/cache.go
vendored
@@ -197,6 +197,7 @@ type cacheSettings struct {
|
||||
clusterSettings clustercache.Settings
|
||||
appInstanceLabelKey string
|
||||
trackingMethod appv1.TrackingMethod
|
||||
installationID string
|
||||
// resourceOverrides provides a list of ignored differences to ignore watched resource updates
|
||||
resourceOverrides map[string]appv1.ResourceOverride
|
||||
|
||||
@@ -225,6 +226,10 @@ func (c *liveStateCache) loadCacheSettings() (*cacheSettings, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
installationID, err := c.settingsMgr.GetInstallationID()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resourceUpdatesOverrides, err := c.settingsMgr.GetIgnoreResourceUpdatesOverrides()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -246,7 +251,7 @@ func (c *liveStateCache) loadCacheSettings() (*cacheSettings, error) {
|
||||
ResourcesFilter: resourcesFilter,
|
||||
}
|
||||
|
||||
return &cacheSettings{clusterSettings, appInstanceLabelKey, argo.GetTrackingMethod(c.settingsMgr), resourceUpdatesOverrides, ignoreResourceUpdatesEnabled}, nil
|
||||
return &cacheSettings{clusterSettings, appInstanceLabelKey, argo.GetTrackingMethod(c.settingsMgr), installationID, resourceUpdatesOverrides, ignoreResourceUpdatesEnabled}, nil
|
||||
}
|
||||
|
||||
func asResourceNode(r *clustercache.Resource) appv1.ResourceNode {
|
||||
@@ -523,7 +528,7 @@ func (c *liveStateCache) getCluster(server string) (clustercache.ClusterCache, e
|
||||
|
||||
res.Health, _ = health.GetResourceHealth(un, cacheSettings.clusterSettings.ResourceHealthOverride)
|
||||
|
||||
appName := c.resourceTracking.GetAppName(un, cacheSettings.appInstanceLabelKey, cacheSettings.trackingMethod)
|
||||
appName := c.resourceTracking.GetAppName(un, cacheSettings.appInstanceLabelKey, cacheSettings.trackingMethod, cacheSettings.installationID)
|
||||
if isRoot && appName != "" {
|
||||
res.AppName = appName
|
||||
}
|
||||
|
||||
57
controller/cache/info.go
vendored
57
controller/cache/info.go
vendored
@@ -278,6 +278,32 @@ func populateIstioVirtualServiceInfo(un *unstructured.Unstructured, res *Resourc
|
||||
res.NetworkingInfo = &v1alpha1.ResourceNetworkingInfo{TargetRefs: targets, ExternalURLs: urls}
|
||||
}
|
||||
|
||||
func isPodInitializedConditionTrue(status *v1.PodStatus) bool {
|
||||
for _, condition := range status.Conditions {
|
||||
if condition.Type != v1.PodInitialized {
|
||||
continue
|
||||
}
|
||||
|
||||
return condition.Status == v1.ConditionTrue
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isRestartableInitContainer(initContainer *v1.Container) bool {
|
||||
if initContainer == nil {
|
||||
return false
|
||||
}
|
||||
if initContainer.RestartPolicy == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return *initContainer.RestartPolicy == v1.ContainerRestartPolicyAlways
|
||||
}
|
||||
|
||||
func isPodPhaseTerminal(phase v1.PodPhase) bool {
|
||||
return phase == v1.PodFailed || phase == v1.PodSucceeded
|
||||
}
|
||||
|
||||
func populatePodInfo(un *unstructured.Unstructured, res *ResourceInfo) {
|
||||
pod := v1.Pod{}
|
||||
err := runtime.DefaultUnstructuredConverter.FromUnstructured(un.Object, &pod)
|
||||
@@ -288,7 +314,8 @@ func populatePodInfo(un *unstructured.Unstructured, res *ResourceInfo) {
|
||||
totalContainers := len(pod.Spec.Containers)
|
||||
readyContainers := 0
|
||||
|
||||
reason := string(pod.Status.Phase)
|
||||
podPhase := pod.Status.Phase
|
||||
reason := string(podPhase)
|
||||
if pod.Status.Reason != "" {
|
||||
reason = pod.Status.Reason
|
||||
}
|
||||
@@ -306,6 +333,21 @@ func populatePodInfo(un *unstructured.Unstructured, res *ResourceInfo) {
|
||||
res.Images = append(res.Images, image)
|
||||
}
|
||||
|
||||
// If the Pod carries {type:PodScheduled, reason:SchedulingGated}, set reason to 'SchedulingGated'.
|
||||
for _, condition := range pod.Status.Conditions {
|
||||
if condition.Type == v1.PodScheduled && condition.Reason == v1.PodReasonSchedulingGated {
|
||||
reason = v1.PodReasonSchedulingGated
|
||||
}
|
||||
}
|
||||
|
||||
initContainers := make(map[string]*v1.Container)
|
||||
for i := range pod.Spec.InitContainers {
|
||||
initContainers[pod.Spec.InitContainers[i].Name] = &pod.Spec.InitContainers[i]
|
||||
if isRestartableInitContainer(&pod.Spec.InitContainers[i]) {
|
||||
totalContainers++
|
||||
}
|
||||
}
|
||||
|
||||
initializing := false
|
||||
for i := range pod.Status.InitContainerStatuses {
|
||||
container := pod.Status.InitContainerStatuses[i]
|
||||
@@ -313,6 +355,12 @@ func populatePodInfo(un *unstructured.Unstructured, res *ResourceInfo) {
|
||||
switch {
|
||||
case container.State.Terminated != nil && container.State.Terminated.ExitCode == 0:
|
||||
continue
|
||||
case isRestartableInitContainer(initContainers[container.Name]) &&
|
||||
container.Started != nil && *container.Started:
|
||||
if container.Ready {
|
||||
readyContainers++
|
||||
}
|
||||
continue
|
||||
case container.State.Terminated != nil:
|
||||
// initialization is failed
|
||||
if len(container.State.Terminated.Reason) == 0 {
|
||||
@@ -334,8 +382,7 @@ func populatePodInfo(un *unstructured.Unstructured, res *ResourceInfo) {
|
||||
}
|
||||
break
|
||||
}
|
||||
if !initializing {
|
||||
restarts = 0
|
||||
if !initializing || isPodInitializedConditionTrue(&pod.Status) {
|
||||
hasRunning := false
|
||||
for i := len(pod.Status.ContainerStatuses) - 1; i >= 0; i-- {
|
||||
container := pod.Status.ContainerStatuses[i]
|
||||
@@ -370,7 +417,9 @@ func populatePodInfo(un *unstructured.Unstructured, res *ResourceInfo) {
|
||||
// and https://github.com/kubernetes/kubernetes/issues/90358#issuecomment-617859364
|
||||
if pod.DeletionTimestamp != nil && pod.Status.Reason == "NodeLost" {
|
||||
reason = "Unknown"
|
||||
} else if pod.DeletionTimestamp != nil {
|
||||
// If the pod is being deleted and the pod phase is not succeeded or failed, set the reason to "Terminating".
|
||||
// See https://github.com/kubernetes/kubectl/issues/1595#issuecomment-2080001023
|
||||
} else if pod.DeletionTimestamp != nil && !isPodPhaseTerminal(podPhase) {
|
||||
reason = "Terminating"
|
||||
}
|
||||
|
||||
|
||||
546
controller/cache/info_test.go
vendored
546
controller/cache/info_test.go
vendored
@@ -285,6 +285,552 @@ func TestGetPodInfo(t *testing.T) {
|
||||
assert.Equal(t, &v1alpha1.ResourceNetworkingInfo{Labels: map[string]string{"app": "guestbook"}}, info.NetworkingInfo)
|
||||
}
|
||||
|
||||
func TestGetPodWithInitialContainerInfo(t *testing.T) {
|
||||
pod := strToUnstructured(`
|
||||
apiVersion: "v1"
|
||||
kind: "Pod"
|
||||
metadata:
|
||||
labels:
|
||||
app: "app-with-initial-container"
|
||||
name: "app-with-initial-container-5f46976fdb-vd6rv"
|
||||
namespace: "default"
|
||||
ownerReferences:
|
||||
- apiVersion: "apps/v1"
|
||||
kind: "ReplicaSet"
|
||||
name: "app-with-initial-container-5f46976fdb"
|
||||
spec:
|
||||
containers:
|
||||
- image: "alpine:latest"
|
||||
imagePullPolicy: "Always"
|
||||
name: "app-with-initial-container"
|
||||
initContainers:
|
||||
- image: "alpine:latest"
|
||||
imagePullPolicy: "Always"
|
||||
name: "app-with-initial-container-logshipper"
|
||||
nodeName: "minikube"
|
||||
status:
|
||||
containerStatuses:
|
||||
- image: "alpine:latest"
|
||||
name: "app-with-initial-container"
|
||||
ready: true
|
||||
restartCount: 0
|
||||
started: true
|
||||
state:
|
||||
running:
|
||||
startedAt: "2024-10-08T08:44:25Z"
|
||||
initContainerStatuses:
|
||||
- image: "alpine:latest"
|
||||
name: "app-with-initial-container-logshipper"
|
||||
ready: true
|
||||
restartCount: 0
|
||||
started: false
|
||||
state:
|
||||
terminated:
|
||||
exitCode: 0
|
||||
reason: "Completed"
|
||||
phase: "Running"
|
||||
`)
|
||||
|
||||
info := &ResourceInfo{}
|
||||
populateNodeInfo(pod, info, []string{})
|
||||
assert.Equal(t, []v1alpha1.InfoItem{
|
||||
{Name: "Status Reason", Value: "Running"},
|
||||
{Name: "Node", Value: "minikube"},
|
||||
{Name: "Containers", Value: "1/1"},
|
||||
}, info.Info)
|
||||
}
|
||||
|
||||
func TestGetPodInfoWithSidecar(t *testing.T) {
|
||||
pod := strToUnstructured(`
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
app: app-with-sidecar
|
||||
name: app-with-sidecar-6664cc788c-lqlrp
|
||||
namespace: default
|
||||
ownerReferences:
|
||||
- apiVersion: apps/v1
|
||||
kind: ReplicaSet
|
||||
name: app-with-sidecar-6664cc788c
|
||||
spec:
|
||||
containers:
|
||||
- image: 'docker.m.daocloud.io/library/alpine:latest'
|
||||
imagePullPolicy: Always
|
||||
name: app-with-sidecar
|
||||
initContainers:
|
||||
- image: 'docker.m.daocloud.io/library/alpine:latest'
|
||||
imagePullPolicy: Always
|
||||
name: logshipper
|
||||
restartPolicy: Always
|
||||
nodeName: minikube
|
||||
status:
|
||||
containerStatuses:
|
||||
- image: 'docker.m.daocloud.io/library/alpine:latest'
|
||||
name: app-with-sidecar
|
||||
ready: true
|
||||
restartCount: 0
|
||||
started: true
|
||||
state:
|
||||
running:
|
||||
startedAt: '2024-10-08T08:39:43Z'
|
||||
initContainerStatuses:
|
||||
- image: 'docker.m.daocloud.io/library/alpine:latest'
|
||||
name: logshipper
|
||||
ready: true
|
||||
restartCount: 0
|
||||
started: true
|
||||
state:
|
||||
running:
|
||||
startedAt: '2024-10-08T08:39:40Z'
|
||||
phase: Running
|
||||
`)
|
||||
|
||||
info := &ResourceInfo{}
|
||||
populateNodeInfo(pod, info, []string{})
|
||||
assert.Equal(t, []v1alpha1.InfoItem{
|
||||
{Name: "Status Reason", Value: "Running"},
|
||||
{Name: "Node", Value: "minikube"},
|
||||
{Name: "Containers", Value: "2/2"},
|
||||
}, info.Info)
|
||||
}
|
||||
|
||||
func TestGetPodInfoWithInitialContainer(t *testing.T) {
|
||||
pod := strToUnstructured(`
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
generateName: myapp-long-exist-56b7d8794d-
|
||||
labels:
|
||||
app: myapp-long-exist
|
||||
name: myapp-long-exist-56b7d8794d-pbgrd
|
||||
namespace: linghao
|
||||
ownerReferences:
|
||||
- apiVersion: apps/v1
|
||||
kind: ReplicaSet
|
||||
name: myapp-long-exist-56b7d8794d
|
||||
spec:
|
||||
containers:
|
||||
- image: alpine:latest
|
||||
imagePullPolicy: Always
|
||||
name: myapp-long-exist
|
||||
initContainers:
|
||||
- image: alpine:latest
|
||||
imagePullPolicy: Always
|
||||
name: myapp-long-exist-logshipper
|
||||
nodeName: minikube
|
||||
status:
|
||||
containerStatuses:
|
||||
- image: alpine:latest
|
||||
name: myapp-long-exist
|
||||
ready: false
|
||||
restartCount: 0
|
||||
started: false
|
||||
state:
|
||||
waiting:
|
||||
reason: PodInitializing
|
||||
initContainerStatuses:
|
||||
- image: alpine:latest
|
||||
name: myapp-long-exist-logshipper
|
||||
ready: false
|
||||
restartCount: 0
|
||||
started: true
|
||||
state:
|
||||
running:
|
||||
startedAt: '2024-10-09T08:03:45Z'
|
||||
phase: Pending
|
||||
startTime: '2024-10-09T08:02:39Z'
|
||||
`)
|
||||
|
||||
info := &ResourceInfo{}
|
||||
populateNodeInfo(pod, info, []string{})
|
||||
assert.Equal(t, []v1alpha1.InfoItem{
|
||||
{Name: "Status Reason", Value: "Init:0/1"},
|
||||
{Name: "Node", Value: "minikube"},
|
||||
{Name: "Containers", Value: "0/1"},
|
||||
}, info.Info)
|
||||
}
|
||||
|
||||
// Test pod has 2 restartable init containers, the first one running but not started.
|
||||
func TestGetPodInfoWithRestartableInitContainer(t *testing.T) {
|
||||
pod := strToUnstructured(`
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: test1
|
||||
spec:
|
||||
initContainers:
|
||||
- name: restartable-init-1
|
||||
restartPolicy: Always
|
||||
- name: restartable-init-2
|
||||
restartPolicy: Always
|
||||
containers:
|
||||
- name: container
|
||||
nodeName: minikube
|
||||
status:
|
||||
phase: Pending
|
||||
initContainerStatuses:
|
||||
- name: restartable-init-1
|
||||
ready: false
|
||||
restartCount: 3
|
||||
state:
|
||||
running: {}
|
||||
started: false
|
||||
lastTerminationState:
|
||||
terminated:
|
||||
finishedAt: "2023-10-01T00:00:00Z" # Replace with actual time
|
||||
- name: restartable-init-2
|
||||
ready: false
|
||||
state:
|
||||
waiting: {}
|
||||
started: false
|
||||
containerStatuses:
|
||||
- ready: false
|
||||
restartCount: 0
|
||||
state:
|
||||
waiting: {}
|
||||
conditions:
|
||||
- type: ContainersReady
|
||||
status: "False"
|
||||
- type: Initialized
|
||||
status: "False"
|
||||
`)
|
||||
|
||||
info := &ResourceInfo{}
|
||||
populateNodeInfo(pod, info, []string{})
|
||||
assert.Equal(t, []v1alpha1.InfoItem{
|
||||
{Name: "Status Reason", Value: "Init:0/2"},
|
||||
{Name: "Node", Value: "minikube"},
|
||||
{Name: "Containers", Value: "0/3"},
|
||||
{Name: "Restart Count", Value: "3"},
|
||||
}, info.Info)
|
||||
}
|
||||
|
||||
// Test pod has 2 restartable init containers, the first one started and the second one running but not started.
|
||||
func TestGetPodInfoWithPartiallyStartedInitContainers(t *testing.T) {
|
||||
pod := strToUnstructured(`
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: test1
|
||||
spec:
|
||||
initContainers:
|
||||
- name: restartable-init-1
|
||||
restartPolicy: Always
|
||||
- name: restartable-init-2
|
||||
restartPolicy: Always
|
||||
containers:
|
||||
- name: container
|
||||
nodeName: minikube
|
||||
status:
|
||||
phase: Pending
|
||||
initContainerStatuses:
|
||||
- name: restartable-init-1
|
||||
ready: false
|
||||
restartCount: 3
|
||||
state:
|
||||
running: {}
|
||||
started: true
|
||||
lastTerminationState:
|
||||
terminated:
|
||||
finishedAt: "2023-10-01T00:00:00Z" # Replace with actual time
|
||||
- name: restartable-init-2
|
||||
ready: false
|
||||
state:
|
||||
running: {}
|
||||
started: false
|
||||
containerStatuses:
|
||||
- ready: false
|
||||
restartCount: 0
|
||||
state:
|
||||
waiting: {}
|
||||
conditions:
|
||||
- type: ContainersReady
|
||||
status: "False"
|
||||
- type: Initialized
|
||||
status: "False"
|
||||
`)
|
||||
|
||||
info := &ResourceInfo{}
|
||||
populateNodeInfo(pod, info, []string{})
|
||||
assert.Equal(t, []v1alpha1.InfoItem{
|
||||
{Name: "Status Reason", Value: "Init:1/2"},
|
||||
{Name: "Node", Value: "minikube"},
|
||||
{Name: "Containers", Value: "0/3"},
|
||||
{Name: "Restart Count", Value: "3"},
|
||||
}, info.Info)
|
||||
}
|
||||
|
||||
// Test pod has 2 restartable init containers started and 1 container running
|
||||
func TestGetPodInfoWithStartedInitContainers(t *testing.T) {
|
||||
pod := strToUnstructured(`
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: test2
|
||||
spec:
|
||||
initContainers:
|
||||
- name: restartable-init-1
|
||||
restartPolicy: Always
|
||||
- name: restartable-init-2
|
||||
restartPolicy: Always
|
||||
containers:
|
||||
- name: container
|
||||
nodeName: minikube
|
||||
status:
|
||||
phase: Running
|
||||
initContainerStatuses:
|
||||
- name: restartable-init-1
|
||||
ready: false
|
||||
restartCount: 3
|
||||
state:
|
||||
running: {}
|
||||
started: true
|
||||
lastTerminationState:
|
||||
terminated:
|
||||
finishedAt: "2023-10-01T00:00:00Z" # Replace with actual time
|
||||
- name: restartable-init-2
|
||||
ready: false
|
||||
state:
|
||||
running: {}
|
||||
started: true
|
||||
containerStatuses:
|
||||
- ready: true
|
||||
restartCount: 4
|
||||
state:
|
||||
running: {}
|
||||
lastTerminationState:
|
||||
terminated:
|
||||
finishedAt: "2023-10-01T00:00:00Z" # Replace with actual time
|
||||
conditions:
|
||||
- type: ContainersReady
|
||||
status: "False"
|
||||
- type: Initialized
|
||||
status: "True"
|
||||
`)
|
||||
|
||||
info := &ResourceInfo{}
|
||||
populateNodeInfo(pod, info, []string{})
|
||||
assert.Equal(t, []v1alpha1.InfoItem{
|
||||
{Name: "Status Reason", Value: "Running"},
|
||||
{Name: "Node", Value: "minikube"},
|
||||
{Name: "Containers", Value: "1/3"},
|
||||
{Name: "Restart Count", Value: "7"},
|
||||
}, info.Info)
|
||||
}
|
||||
|
||||
// Test pod has 1 init container restarting and 1 container not running
|
||||
func TestGetPodInfoWithNormalInitContainer(t *testing.T) {
|
||||
pod := strToUnstructured(`
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: test7
|
||||
spec:
|
||||
initContainers:
|
||||
- name: init-container
|
||||
containers:
|
||||
- name: main-container
|
||||
nodeName: minikube
|
||||
status:
|
||||
phase: podPhase
|
||||
initContainerStatuses:
|
||||
- ready: false
|
||||
restartCount: 3
|
||||
state:
|
||||
running: {}
|
||||
lastTerminationState:
|
||||
terminated:
|
||||
finishedAt: "2023-10-01T00:00:00Z" # Replace with the actual time
|
||||
containerStatuses:
|
||||
- ready: false
|
||||
restartCount: 0
|
||||
state:
|
||||
waiting: {}
|
||||
`)
|
||||
|
||||
info := &ResourceInfo{}
|
||||
populateNodeInfo(pod, info, []string{})
|
||||
assert.Equal(t, []v1alpha1.InfoItem{
|
||||
{Name: "Status Reason", Value: "Init:0/1"},
|
||||
{Name: "Node", Value: "minikube"},
|
||||
{Name: "Containers", Value: "0/1"},
|
||||
{Name: "Restart Count", Value: "3"},
|
||||
}, info.Info)
|
||||
}
|
||||
|
||||
// Test pod condition succeed
|
||||
func TestPodConditionSucceeded(t *testing.T) {
|
||||
pod := strToUnstructured(`
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: test8
|
||||
spec:
|
||||
nodeName: minikube
|
||||
containers:
|
||||
- name: container
|
||||
status:
|
||||
phase: Succeeded
|
||||
containerStatuses:
|
||||
- ready: false
|
||||
restartCount: 0
|
||||
state:
|
||||
terminated:
|
||||
reason: Completed
|
||||
exitCode: 0
|
||||
`)
|
||||
info := &ResourceInfo{}
|
||||
populateNodeInfo(pod, info, []string{})
|
||||
assert.Equal(t, []v1alpha1.InfoItem{
|
||||
{Name: "Status Reason", Value: "Completed"},
|
||||
{Name: "Node", Value: "minikube"},
|
||||
{Name: "Containers", Value: "0/1"},
|
||||
}, info.Info)
|
||||
}
|
||||
|
||||
// Test pod condition failed
|
||||
func TestPodConditionFailed(t *testing.T) {
|
||||
pod := strToUnstructured(`
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: test9
|
||||
spec:
|
||||
nodeName: minikube
|
||||
containers:
|
||||
- name: container
|
||||
status:
|
||||
phase: Failed
|
||||
containerStatuses:
|
||||
- ready: false
|
||||
restartCount: 0
|
||||
state:
|
||||
terminated:
|
||||
reason: Error
|
||||
exitCode: 1
|
||||
`)
|
||||
info := &ResourceInfo{}
|
||||
populateNodeInfo(pod, info, []string{})
|
||||
assert.Equal(t, []v1alpha1.InfoItem{
|
||||
{Name: "Status Reason", Value: "Error"},
|
||||
{Name: "Node", Value: "minikube"},
|
||||
{Name: "Containers", Value: "0/1"},
|
||||
}, info.Info)
|
||||
}
|
||||
|
||||
// Test pod condition succeed with deletion
|
||||
func TestPodConditionSucceededWithDeletion(t *testing.T) {
|
||||
pod := strToUnstructured(`
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: test10
|
||||
deletionTimestamp: "2023-10-01T00:00:00Z"
|
||||
spec:
|
||||
nodeName: minikube
|
||||
containers:
|
||||
- name: container
|
||||
status:
|
||||
phase: Succeeded
|
||||
containerStatuses:
|
||||
- ready: false
|
||||
restartCount: 0
|
||||
state:
|
||||
terminated:
|
||||
reason: Completed
|
||||
exitCode: 0
|
||||
`)
|
||||
info := &ResourceInfo{}
|
||||
populateNodeInfo(pod, info, []string{})
|
||||
assert.Equal(t, []v1alpha1.InfoItem{
|
||||
{Name: "Status Reason", Value: "Completed"},
|
||||
{Name: "Node", Value: "minikube"},
|
||||
{Name: "Containers", Value: "0/1"},
|
||||
}, info.Info)
|
||||
}
|
||||
|
||||
// Test pod condition running with deletion
|
||||
func TestPodConditionRunningWithDeletion(t *testing.T) {
|
||||
pod := strToUnstructured(`
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: test11
|
||||
deletionTimestamp: "2023-10-01T00:00:00Z"
|
||||
spec:
|
||||
nodeName: minikube
|
||||
containers:
|
||||
- name: container
|
||||
status:
|
||||
phase: Running
|
||||
containerStatuses:
|
||||
- ready: false
|
||||
restartCount: 0
|
||||
state:
|
||||
running: {}
|
||||
`)
|
||||
info := &ResourceInfo{}
|
||||
populateNodeInfo(pod, info, []string{})
|
||||
assert.Equal(t, []v1alpha1.InfoItem{
|
||||
{Name: "Status Reason", Value: "Terminating"},
|
||||
{Name: "Node", Value: "minikube"},
|
||||
{Name: "Containers", Value: "0/1"},
|
||||
}, info.Info)
|
||||
}
|
||||
|
||||
// Test pod condition pending with deletion
|
||||
func TestPodConditionPendingWithDeletion(t *testing.T) {
|
||||
pod := strToUnstructured(`
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: test12
|
||||
deletionTimestamp: "2023-10-01T00:00:00Z"
|
||||
spec:
|
||||
nodeName: minikube
|
||||
containers:
|
||||
- name: container
|
||||
status:
|
||||
phase: Pending
|
||||
`)
|
||||
info := &ResourceInfo{}
|
||||
populateNodeInfo(pod, info, []string{})
|
||||
assert.Equal(t, []v1alpha1.InfoItem{
|
||||
{Name: "Status Reason", Value: "Terminating"},
|
||||
{Name: "Node", Value: "minikube"},
|
||||
{Name: "Containers", Value: "0/1"},
|
||||
}, info.Info)
|
||||
}
|
||||
|
||||
// Test PodScheduled condition with reason SchedulingGated
|
||||
func TestPodScheduledWithSchedulingGated(t *testing.T) {
|
||||
pod := strToUnstructured(`
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: test13
|
||||
spec:
|
||||
nodeName: minikube
|
||||
containers:
|
||||
- name: container1
|
||||
- name: container2
|
||||
status:
|
||||
phase: podPhase
|
||||
conditions:
|
||||
- type: PodScheduled
|
||||
status: "False"
|
||||
reason: SchedulingGated
|
||||
`)
|
||||
info := &ResourceInfo{}
|
||||
populateNodeInfo(pod, info, []string{})
|
||||
assert.Equal(t, []v1alpha1.InfoItem{
|
||||
{Name: "Status Reason", Value: "SchedulingGated"},
|
||||
{Name: "Node", Value: "minikube"},
|
||||
{Name: "Containers", Value: "0/2"},
|
||||
}, info.Info)
|
||||
}
|
||||
|
||||
func TestGetNodeInfo(t *testing.T) {
|
||||
node := strToUnstructured(`
|
||||
apiVersion: v1
|
||||
|
||||
@@ -51,7 +51,7 @@ func (ctrl *ApplicationController) executePostDeleteHooks(app *v1alpha1.Applicat
|
||||
revisions = append(revisions, src.TargetRevision)
|
||||
}
|
||||
|
||||
targets, _, err := ctrl.appStateManager.GetRepoObjs(app, app.Spec.GetSources(), appLabelKey, revisions, false, false, false, proj, false)
|
||||
targets, _, _, err := ctrl.appStateManager.GetRepoObjs(app, app.Spec.GetSources(), appLabelKey, revisions, false, false, false, proj, false)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"regexp"
|
||||
"slices"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
applister "github.com/argoproj/argo-cd/v2/pkg/client/listers/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v2/util/git"
|
||||
"github.com/argoproj/argo-cd/v2/util/healthz"
|
||||
metricsutil "github.com/argoproj/argo-cd/v2/util/metrics"
|
||||
"github.com/argoproj/argo-cd/v2/util/profile"
|
||||
|
||||
ctrl_metrics "sigs.k8s.io/controller-runtime/pkg/metrics"
|
||||
@@ -54,7 +55,8 @@ const (
|
||||
var (
|
||||
descAppDefaultLabels = []string{"namespace", "name", "project"}
|
||||
|
||||
descAppLabels *prometheus.Desc
|
||||
descAppLabels *prometheus.Desc
|
||||
descAppConditions *prometheus.Desc
|
||||
|
||||
descAppInfo = prometheus.NewDesc(
|
||||
"argocd_app_info",
|
||||
@@ -62,6 +64,7 @@ var (
|
||||
append(descAppDefaultLabels, "autosync_enabled", "repo", "dest_server", "dest_namespace", "sync_status", "health_status", "operation"),
|
||||
nil,
|
||||
)
|
||||
|
||||
// Deprecated
|
||||
descAppCreated = prometheus.NewDesc(
|
||||
"argocd_app_created_time",
|
||||
@@ -144,14 +147,14 @@ var (
|
||||
)
|
||||
|
||||
// NewMetricsServer returns a new prometheus server which collects application metrics
|
||||
func NewMetricsServer(addr string, appLister applister.ApplicationLister, appFilter func(obj interface{}) bool, healthCheck func(r *http.Request) error, appLabels []string) (*MetricsServer, error) {
|
||||
func NewMetricsServer(addr string, appLister applister.ApplicationLister, appFilter func(obj interface{}) bool, healthCheck func(r *http.Request) error, appLabels []string, appConditions []string) (*MetricsServer, error) {
|
||||
hostname, err := os.Hostname()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(appLabels) > 0 {
|
||||
normalizedLabels := normalizeLabels("label", appLabels)
|
||||
normalizedLabels := metricsutil.NormalizeLabels("label", appLabels)
|
||||
descAppLabels = prometheus.NewDesc(
|
||||
"argocd_app_labels",
|
||||
"Argo Application labels converted to Prometheus labels",
|
||||
@@ -160,8 +163,17 @@ func NewMetricsServer(addr string, appLister applister.ApplicationLister, appFil
|
||||
)
|
||||
}
|
||||
|
||||
if len(appConditions) > 0 {
|
||||
descAppConditions = prometheus.NewDesc(
|
||||
"argocd_app_condition",
|
||||
"Report application conditions.",
|
||||
append(descAppDefaultLabels, "condition"),
|
||||
nil,
|
||||
)
|
||||
}
|
||||
|
||||
mux := http.NewServeMux()
|
||||
registry := NewAppRegistry(appLister, appFilter, appLabels)
|
||||
registry := NewAppRegistry(appLister, appFilter, appLabels, appConditions)
|
||||
|
||||
mux.Handle(MetricsPath, promhttp.HandlerFor(prometheus.Gatherers{
|
||||
// contains app controller specific metrics
|
||||
@@ -203,20 +215,6 @@ func NewMetricsServer(addr string, appLister applister.ApplicationLister, appFil
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Prometheus invalid labels, more info: https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels.
|
||||
var invalidPromLabelChars = regexp.MustCompile(`[^a-zA-Z0-9_]`)
|
||||
|
||||
func normalizeLabels(prefix string, appLabels []string) []string {
|
||||
results := []string{}
|
||||
for _, label := range appLabels {
|
||||
// prometheus labels don't accept dash in their name
|
||||
curr := invalidPromLabelChars.ReplaceAllString(label, "_")
|
||||
result := fmt.Sprintf("%s_%s", prefix, curr)
|
||||
results = append(results, result)
|
||||
}
|
||||
return results
|
||||
}
|
||||
|
||||
func (m *MetricsServer) RegisterClustersInfoSource(ctx context.Context, source HasClustersInfo) {
|
||||
collector := &clusterCollector{infoSource: source}
|
||||
go collector.Run(ctx)
|
||||
@@ -307,24 +305,26 @@ func (m *MetricsServer) SetExpiration(cacheExpiration time.Duration) error {
|
||||
}
|
||||
|
||||
type appCollector struct {
|
||||
store applister.ApplicationLister
|
||||
appFilter func(obj interface{}) bool
|
||||
appLabels []string
|
||||
store applister.ApplicationLister
|
||||
appFilter func(obj interface{}) bool
|
||||
appLabels []string
|
||||
appConditions []string
|
||||
}
|
||||
|
||||
// NewAppCollector returns a prometheus collector for application metrics
|
||||
func NewAppCollector(appLister applister.ApplicationLister, appFilter func(obj interface{}) bool, appLabels []string) prometheus.Collector {
|
||||
func NewAppCollector(appLister applister.ApplicationLister, appFilter func(obj interface{}) bool, appLabels []string, appConditions []string) prometheus.Collector {
|
||||
return &appCollector{
|
||||
store: appLister,
|
||||
appFilter: appFilter,
|
||||
appLabels: appLabels,
|
||||
store: appLister,
|
||||
appFilter: appFilter,
|
||||
appLabels: appLabels,
|
||||
appConditions: appConditions,
|
||||
}
|
||||
}
|
||||
|
||||
// NewAppRegistry creates a new prometheus registry that collects applications
|
||||
func NewAppRegistry(appLister applister.ApplicationLister, appFilter func(obj interface{}) bool, appLabels []string) *prometheus.Registry {
|
||||
func NewAppRegistry(appLister applister.ApplicationLister, appFilter func(obj interface{}) bool, appLabels []string, appConditions []string) *prometheus.Registry {
|
||||
registry := prometheus.NewRegistry()
|
||||
registry.MustRegister(NewAppCollector(appLister, appFilter, appLabels))
|
||||
registry.MustRegister(NewAppCollector(appLister, appFilter, appLabels, appConditions))
|
||||
return registry
|
||||
}
|
||||
|
||||
@@ -333,6 +333,9 @@ func (c *appCollector) Describe(ch chan<- *prometheus.Desc) {
|
||||
if len(c.appLabels) > 0 {
|
||||
ch <- descAppLabels
|
||||
}
|
||||
if len(c.appConditions) > 0 {
|
||||
ch <- descAppConditions
|
||||
}
|
||||
ch <- descAppInfo
|
||||
ch <- descAppSyncStatusCode
|
||||
ch <- descAppHealthStatus
|
||||
@@ -397,6 +400,19 @@ func (c *appCollector) collectApps(ch chan<- prometheus.Metric, app *argoappv1.A
|
||||
addGauge(descAppLabels, 1, labelValues...)
|
||||
}
|
||||
|
||||
if len(c.appConditions) > 0 {
|
||||
conditionCount := make(map[string]int)
|
||||
for _, condition := range app.Status.Conditions {
|
||||
if slices.Contains(c.appConditions, condition.Type) {
|
||||
conditionCount[condition.Type]++
|
||||
}
|
||||
}
|
||||
|
||||
for conditionType, count := range conditionCount {
|
||||
addGauge(descAppConditions, float64(count), conditionType)
|
||||
}
|
||||
}
|
||||
|
||||
// Deprecated controller metrics
|
||||
if os.Getenv(EnvVarLegacyControllerMetrics) == "true" {
|
||||
addGauge(descAppCreated, float64(app.CreationTimestamp.Unix()))
|
||||
|
||||
@@ -116,6 +116,41 @@ status:
|
||||
status: Degraded
|
||||
`
|
||||
|
||||
const fakeApp4 = `
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: my-app-4
|
||||
namespace: argocd
|
||||
labels:
|
||||
team-name: my-team
|
||||
team-bu: bu-id
|
||||
argoproj.io/cluster: test-cluster
|
||||
spec:
|
||||
destination:
|
||||
namespace: dummy-namespace
|
||||
server: https://localhost:6443
|
||||
project: important-project
|
||||
source:
|
||||
path: some/path
|
||||
repoURL: https://github.com/argoproj/argocd-example-apps.git
|
||||
status:
|
||||
sync:
|
||||
status: OutOfSync
|
||||
health:
|
||||
status: Degraded
|
||||
conditions:
|
||||
- lastTransitionTime: "2024-08-07T12:25:40Z"
|
||||
message: Application has 1 orphaned resources
|
||||
type: OrphanedResourceWarning
|
||||
- lastTransitionTime: "2024-08-07T12:25:40Z"
|
||||
message: Resource Pod standalone-pod is excluded in the settings
|
||||
type: ExcludedResourceWarning
|
||||
- lastTransitionTime: "2024-08-07T12:25:40Z"
|
||||
message: Resource Endpoint raw-endpoint is excluded in the settings
|
||||
type: ExcludedResourceWarning
|
||||
`
|
||||
|
||||
const fakeDefaultApp = `
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: Application
|
||||
@@ -179,7 +214,7 @@ func newFakeLister(fakeAppYAMLs ...string) (context.CancelFunc, applister.Applic
|
||||
|
||||
func testApp(t *testing.T, fakeAppYAMLs []string, expectedResponse string) {
|
||||
t.Helper()
|
||||
testMetricServer(t, fakeAppYAMLs, expectedResponse, []string{})
|
||||
testMetricServer(t, fakeAppYAMLs, expectedResponse, []string{}, []string{})
|
||||
}
|
||||
|
||||
type fakeClusterInfo struct {
|
||||
@@ -194,15 +229,17 @@ type TestMetricServerConfig struct {
|
||||
FakeAppYAMLs []string
|
||||
ExpectedResponse string
|
||||
AppLabels []string
|
||||
AppConditions []string
|
||||
ClustersInfo []gitopsCache.ClusterInfo
|
||||
}
|
||||
|
||||
func testMetricServer(t *testing.T, fakeAppYAMLs []string, expectedResponse string, appLabels []string) {
|
||||
func testMetricServer(t *testing.T, fakeAppYAMLs []string, expectedResponse string, appLabels []string, appConditions []string) {
|
||||
t.Helper()
|
||||
cfg := TestMetricServerConfig{
|
||||
FakeAppYAMLs: fakeAppYAMLs,
|
||||
ExpectedResponse: expectedResponse,
|
||||
AppLabels: appLabels,
|
||||
AppConditions: appConditions,
|
||||
ClustersInfo: []gitopsCache.ClusterInfo{},
|
||||
}
|
||||
runTest(t, cfg)
|
||||
@@ -212,7 +249,7 @@ func runTest(t *testing.T, cfg TestMetricServerConfig) {
|
||||
t.Helper()
|
||||
cancel, appLister := newFakeLister(cfg.FakeAppYAMLs...)
|
||||
defer cancel()
|
||||
metricsServ, err := NewMetricsServer("localhost:8082", appLister, appFilter, noOpHealthCheck, cfg.AppLabels)
|
||||
metricsServ, err := NewMetricsServer("localhost:8082", appLister, appFilter, noOpHealthCheck, cfg.AppLabels, cfg.AppConditions)
|
||||
require.NoError(t, err)
|
||||
|
||||
if len(cfg.ClustersInfo) > 0 {
|
||||
@@ -303,7 +340,61 @@ argocd_app_labels{label_non_existing="",name="my-app-3",namespace="argocd",proje
|
||||
for _, c := range cases {
|
||||
c := c
|
||||
t.Run(c.description, func(t *testing.T) {
|
||||
testMetricServer(t, c.applications, c.responseContains, c.metricLabels)
|
||||
testMetricServer(t, c.applications, c.responseContains, c.metricLabels, []string{})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetricConditions(t *testing.T) {
|
||||
type testCases struct {
|
||||
testCombination
|
||||
description string
|
||||
metricConditions []string
|
||||
}
|
||||
cases := []testCases{
|
||||
{
|
||||
description: "metric will only output OrphanedResourceWarning",
|
||||
metricConditions: []string{"OrphanedResourceWarning"},
|
||||
testCombination: testCombination{
|
||||
applications: []string{fakeApp4},
|
||||
responseContains: `
|
||||
# HELP argocd_app_condition Report application conditions.
|
||||
# TYPE argocd_app_condition gauge
|
||||
argocd_app_condition{condition="OrphanedResourceWarning",name="my-app-4",namespace="argocd",project="important-project"} 1
|
||||
`,
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "metric will only output ExcludedResourceWarning",
|
||||
metricConditions: []string{"ExcludedResourceWarning"},
|
||||
testCombination: testCombination{
|
||||
applications: []string{fakeApp4},
|
||||
responseContains: `
|
||||
# HELP argocd_app_condition Report application conditions.
|
||||
# TYPE argocd_app_condition gauge
|
||||
argocd_app_condition{condition="ExcludedResourceWarning",name="my-app-4",namespace="argocd",project="important-project"} 2
|
||||
`,
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "metric will only output both OrphanedResourceWarning and ExcludedResourceWarning",
|
||||
metricConditions: []string{"ExcludedResourceWarning", "OrphanedResourceWarning"},
|
||||
testCombination: testCombination{
|
||||
applications: []string{fakeApp4},
|
||||
responseContains: `
|
||||
# HELP argocd_app_condition Report application conditions.
|
||||
# TYPE argocd_app_condition gauge
|
||||
argocd_app_condition{condition="OrphanedResourceWarning",name="my-app-4",namespace="argocd",project="important-project"} 1
|
||||
argocd_app_condition{condition="ExcludedResourceWarning",name="my-app-4",namespace="argocd",project="important-project"} 2
|
||||
`,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
c := c
|
||||
t.Run(c.description, func(t *testing.T) {
|
||||
testMetricServer(t, c.applications, c.responseContains, []string{}, c.metricConditions)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -335,7 +426,7 @@ argocd_app_sync_status{name="my-app",namespace="argocd",project="important-proje
|
||||
func TestMetricsSyncCounter(t *testing.T) {
|
||||
cancel, appLister := newFakeLister()
|
||||
defer cancel()
|
||||
metricsServ, err := NewMetricsServer("localhost:8082", appLister, appFilter, noOpHealthCheck, []string{})
|
||||
metricsServ, err := NewMetricsServer("localhost:8082", appLister, appFilter, noOpHealthCheck, []string{}, []string{})
|
||||
require.NoError(t, err)
|
||||
|
||||
appSyncTotal := `
|
||||
@@ -387,7 +478,7 @@ func assertMetricsNotPrinted(t *testing.T, expectedLines, body string) {
|
||||
func TestReconcileMetrics(t *testing.T) {
|
||||
cancel, appLister := newFakeLister()
|
||||
defer cancel()
|
||||
metricsServ, err := NewMetricsServer("localhost:8082", appLister, appFilter, noOpHealthCheck, []string{})
|
||||
metricsServ, err := NewMetricsServer("localhost:8082", appLister, appFilter, noOpHealthCheck, []string{}, []string{})
|
||||
require.NoError(t, err)
|
||||
|
||||
appReconcileMetrics := `
|
||||
@@ -420,7 +511,7 @@ argocd_app_reconcile_count{dest_server="https://localhost:6443",namespace="argoc
|
||||
func TestMetricsReset(t *testing.T) {
|
||||
cancel, appLister := newFakeLister()
|
||||
defer cancel()
|
||||
metricsServ, err := NewMetricsServer("localhost:8082", appLister, appFilter, noOpHealthCheck, []string{})
|
||||
metricsServ, err := NewMetricsServer("localhost:8082", appLister, appFilter, noOpHealthCheck, []string{}, []string{})
|
||||
require.NoError(t, err)
|
||||
|
||||
appSyncTotal := `
|
||||
@@ -457,23 +548,23 @@ argocd_app_sync_total{dest_server="https://localhost:6443",name="my-app",namespa
|
||||
func TestWorkqueueMetrics(t *testing.T) {
|
||||
cancel, appLister := newFakeLister()
|
||||
defer cancel()
|
||||
metricsServ, err := NewMetricsServer("localhost:8082", appLister, appFilter, noOpHealthCheck, []string{})
|
||||
metricsServ, err := NewMetricsServer("localhost:8082", appLister, appFilter, noOpHealthCheck, []string{}, []string{})
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedMetrics := `
|
||||
# TYPE workqueue_adds_total counter
|
||||
workqueue_adds_total{name="test"}
|
||||
workqueue_adds_total{controller="test",name="test"}
|
||||
|
||||
# TYPE workqueue_depth gauge
|
||||
workqueue_depth{name="test"}
|
||||
workqueue_depth{controller="test",name="test"}
|
||||
|
||||
# TYPE workqueue_longest_running_processor_seconds gauge
|
||||
workqueue_longest_running_processor_seconds{name="test"}
|
||||
workqueue_longest_running_processor_seconds{controller="test",name="test"}
|
||||
|
||||
# TYPE workqueue_queue_duration_seconds histogram
|
||||
|
||||
# TYPE workqueue_unfinished_work_seconds gauge
|
||||
workqueue_unfinished_work_seconds{name="test"}
|
||||
workqueue_unfinished_work_seconds{controller="test",name="test"}
|
||||
|
||||
# TYPE workqueue_work_duration_seconds histogram
|
||||
`
|
||||
@@ -492,7 +583,7 @@ workqueue_unfinished_work_seconds{name="test"}
|
||||
func TestGoMetrics(t *testing.T) {
|
||||
cancel, appLister := newFakeLister()
|
||||
defer cancel()
|
||||
metricsServ, err := NewMetricsServer("localhost:8082", appLister, appFilter, noOpHealthCheck, []string{})
|
||||
metricsServ, err := NewMetricsServer("localhost:8082", appLister, appFilter, noOpHealthCheck, []string{}, []string{})
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedMetrics := `
|
||||
|
||||
@@ -70,7 +70,7 @@ type managedResource struct {
|
||||
type AppStateManager interface {
|
||||
CompareAppState(app *v1alpha1.Application, project *v1alpha1.AppProject, revisions []string, sources []v1alpha1.ApplicationSource, noCache bool, noRevisionCache bool, localObjects []string, hasMultipleSources bool, rollback bool) (*comparisonResult, error)
|
||||
SyncAppState(app *v1alpha1.Application, state *v1alpha1.OperationState)
|
||||
GetRepoObjs(app *v1alpha1.Application, sources []v1alpha1.ApplicationSource, appLabelKey string, revisions []string, noCache, noRevisionCache, verifySignature bool, proj *v1alpha1.AppProject, rollback bool) ([]*unstructured.Unstructured, []*apiclient.ManifestResponse, error)
|
||||
GetRepoObjs(app *v1alpha1.Application, sources []v1alpha1.ApplicationSource, appLabelKey string, revisions []string, noCache, noRevisionCache, verifySignature bool, proj *v1alpha1.AppProject, rollback bool) ([]*unstructured.Unstructured, []*apiclient.ManifestResponse, bool, error)
|
||||
}
|
||||
|
||||
// comparisonResult holds the state of an application after the reconciliation
|
||||
@@ -88,6 +88,7 @@ type comparisonResult struct {
|
||||
timings map[string]time.Duration
|
||||
diffResultList *diff.DiffResultList
|
||||
hasPostDeleteHooks bool
|
||||
revisionUpdated bool
|
||||
}
|
||||
|
||||
func (res *comparisonResult) GetSyncStatus() *v1alpha1.SyncStatus {
|
||||
@@ -123,51 +124,56 @@ type appStateManager struct {
|
||||
// task to the repo-server. It returns the list of generated manifests as unstructured
|
||||
// objects. It also returns the full response from all calls to the repo server as the
|
||||
// second argument.
|
||||
func (m *appStateManager) GetRepoObjs(app *v1alpha1.Application, sources []v1alpha1.ApplicationSource, appLabelKey string, revisions []string, noCache, noRevisionCache, verifySignature bool, proj *v1alpha1.AppProject, rollback bool) ([]*unstructured.Unstructured, []*apiclient.ManifestResponse, error) {
|
||||
func (m *appStateManager) GetRepoObjs(app *v1alpha1.Application, sources []v1alpha1.ApplicationSource, appLabelKey string, revisions []string, noCache, noRevisionCache, verifySignature bool, proj *v1alpha1.AppProject, rollback bool) ([]*unstructured.Unstructured, []*apiclient.ManifestResponse, bool, error) {
|
||||
ts := stats.NewTimingStats()
|
||||
helmRepos, err := m.db.ListHelmRepositories(context.Background())
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to list Helm repositories: %w", err)
|
||||
return nil, nil, false, fmt.Errorf("failed to list Helm repositories: %w", err)
|
||||
}
|
||||
permittedHelmRepos, err := argo.GetPermittedRepos(proj, helmRepos)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to get permitted Helm repositories for project %q: %w", proj.Name, err)
|
||||
return nil, nil, false, fmt.Errorf("failed to get permitted Helm repositories for project %q: %w", proj.Name, err)
|
||||
}
|
||||
|
||||
ts.AddCheckpoint("repo_ms")
|
||||
helmRepositoryCredentials, err := m.db.GetAllHelmRepositoryCredentials(context.Background())
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to get Helm credentials: %w", err)
|
||||
return nil, nil, false, fmt.Errorf("failed to get Helm credentials: %w", err)
|
||||
}
|
||||
permittedHelmCredentials, err := argo.GetPermittedReposCredentials(proj, helmRepositoryCredentials)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to get permitted Helm credentials for project %q: %w", proj.Name, err)
|
||||
return nil, nil, false, fmt.Errorf("failed to get permitted Helm credentials for project %q: %w", proj.Name, err)
|
||||
}
|
||||
|
||||
enabledSourceTypes, err := m.settingsMgr.GetEnabledSourceTypes()
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to get enabled source types: %w", err)
|
||||
return nil, nil, false, fmt.Errorf("failed to get enabled source types: %w", err)
|
||||
}
|
||||
ts.AddCheckpoint("plugins_ms")
|
||||
|
||||
kustomizeSettings, err := m.settingsMgr.GetKustomizeSettings()
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to get Kustomize settings: %w", err)
|
||||
return nil, nil, false, fmt.Errorf("failed to get Kustomize settings: %w", err)
|
||||
}
|
||||
|
||||
helmOptions, err := m.settingsMgr.GetHelmSettings()
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to get Helm settings: %w", err)
|
||||
return nil, nil, false, fmt.Errorf("failed to get Helm settings: %w", err)
|
||||
}
|
||||
|
||||
installationID, err := m.settingsMgr.GetInstallationID()
|
||||
if err != nil {
|
||||
return nil, nil, false, fmt.Errorf("failed to get installation ID: %w", err)
|
||||
}
|
||||
|
||||
ts.AddCheckpoint("build_options_ms")
|
||||
serverVersion, apiResources, err := m.liveStateCache.GetVersionsInfo(app.Spec.Destination.Server)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to get cluster version for cluster %q: %w", app.Spec.Destination.Server, err)
|
||||
return nil, nil, false, fmt.Errorf("failed to get cluster version for cluster %q: %w", app.Spec.Destination.Server, err)
|
||||
}
|
||||
conn, repoClient, err := m.repoClientset.NewRepoServerClient()
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to connect to repo server: %w", err)
|
||||
return nil, nil, false, fmt.Errorf("failed to connect to repo server: %w", err)
|
||||
}
|
||||
defer io.Close(conn)
|
||||
|
||||
@@ -179,21 +185,26 @@ func (m *appStateManager) GetRepoObjs(app *v1alpha1.Application, sources []v1alp
|
||||
// revisions for the rollback
|
||||
refSources, err := argo.GetRefSources(context.Background(), sources, app.Spec.Project, m.db.GetRepository, revisions, rollback)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to get ref sources: %w", err)
|
||||
return nil, nil, false, fmt.Errorf("failed to get ref sources: %w", err)
|
||||
}
|
||||
|
||||
revisionUpdated := false
|
||||
|
||||
atLeastOneRevisionIsNotPossibleToBeUpdated := false
|
||||
|
||||
keyManifestGenerateAnnotationVal, keyManifestGenerateAnnotationExists := app.Annotations[v1alpha1.AnnotationKeyManifestGeneratePaths]
|
||||
|
||||
for i, source := range sources {
|
||||
if len(revisions) < len(sources) || revisions[i] == "" {
|
||||
revisions[i] = source.TargetRevision
|
||||
}
|
||||
ts.AddCheckpoint("helm_ms")
|
||||
repo, err := m.db.GetRepository(context.Background(), source.RepoURL, proj.Name)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to get repo %q: %w", source.RepoURL, err)
|
||||
return nil, nil, false, fmt.Errorf("failed to get repo %q: %w", source.RepoURL, err)
|
||||
}
|
||||
kustomizeOptions, err := kustomizeSettings.GetOptions(source)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to get Kustomize options for source %d of %d: %w", i+1, len(sources), err)
|
||||
return nil, nil, false, fmt.Errorf("failed to get Kustomize options for source %d of %d: %w", i+1, len(sources), err)
|
||||
}
|
||||
|
||||
syncedRevision := app.Status.Sync.Revision
|
||||
@@ -205,13 +216,15 @@ func (m *appStateManager) GetRepoObjs(app *v1alpha1.Application, sources []v1alp
|
||||
}
|
||||
}
|
||||
|
||||
val, ok := app.Annotations[v1alpha1.AnnotationKeyManifestGeneratePaths]
|
||||
if !source.IsHelm() && syncedRevision != "" && ok && val != "" {
|
||||
revision := revisions[i]
|
||||
|
||||
if !source.IsHelm() && syncedRevision != "" && keyManifestGenerateAnnotationExists && keyManifestGenerateAnnotationVal != "" {
|
||||
// Validate the manifest-generate-path annotation to avoid generating manifests if it has not changed.
|
||||
_, err = repoClient.UpdateRevisionForPaths(context.Background(), &apiclient.UpdateRevisionForPathsRequest{
|
||||
updateRevisionResult, err := repoClient.UpdateRevisionForPaths(context.Background(), &apiclient.UpdateRevisionForPathsRequest{
|
||||
Repo: repo,
|
||||
Revision: revisions[i],
|
||||
Revision: revision,
|
||||
SyncedRevision: syncedRevision,
|
||||
NoRevisionCache: noRevisionCache,
|
||||
Paths: path.GetAppRefreshPaths(app),
|
||||
AppLabelKey: appLabelKey,
|
||||
AppName: app.InstanceName(m.namespace),
|
||||
@@ -222,18 +235,29 @@ func (m *appStateManager) GetRepoObjs(app *v1alpha1.Application, sources []v1alp
|
||||
TrackingMethod: string(argo.GetTrackingMethod(m.settingsMgr)),
|
||||
RefSources: refSources,
|
||||
HasMultipleSources: app.Spec.HasMultipleSources(),
|
||||
InstallationID: installationID,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to compare revisions for source %d of %d: %w", i+1, len(sources), err)
|
||||
return nil, nil, false, fmt.Errorf("failed to compare revisions for source %d of %d: %w", i+1, len(sources), err)
|
||||
}
|
||||
if updateRevisionResult.Changes {
|
||||
revisionUpdated = true
|
||||
}
|
||||
|
||||
// Generate manifests should use same revision as updateRevisionForPaths, because HEAD revision may be different between these two calls
|
||||
if updateRevisionResult.Revision != "" {
|
||||
revision = updateRevisionResult.Revision
|
||||
}
|
||||
} else {
|
||||
// revisionUpdated is set to true if at least one revision is not possible to be updated,
|
||||
atLeastOneRevisionIsNotPossibleToBeUpdated = true
|
||||
}
|
||||
|
||||
ts.AddCheckpoint("version_ms")
|
||||
log.Debugf("Generating Manifest for source %s revision %s", source, revisions[i])
|
||||
log.Debugf("Generating Manifest for source %s revision %s", source, revision)
|
||||
manifestInfo, err := repoClient.GenerateManifest(context.Background(), &apiclient.ManifestRequest{
|
||||
Repo: repo,
|
||||
Repos: permittedHelmRepos,
|
||||
Revision: revisions[i],
|
||||
Revision: revision,
|
||||
NoCache: noCache,
|
||||
NoRevisionCache: noRevisionCache,
|
||||
AppLabelKey: appLabelKey,
|
||||
@@ -252,27 +276,34 @@ func (m *appStateManager) GetRepoObjs(app *v1alpha1.Application, sources []v1alp
|
||||
RefSources: refSources,
|
||||
ProjectName: proj.Name,
|
||||
ProjectSourceRepos: proj.Spec.SourceRepos,
|
||||
InstallationID: installationID,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to generate manifest for source %d of %d: %w", i+1, len(sources), err)
|
||||
return nil, nil, false, fmt.Errorf("failed to generate manifest for source %d of %d: %w", i+1, len(sources), err)
|
||||
}
|
||||
|
||||
targetObj, err := unmarshalManifests(manifestInfo.Manifests)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to unmarshal manifests for source %d of %d: %w", i+1, len(sources), err)
|
||||
return nil, nil, false, fmt.Errorf("failed to unmarshal manifests for source %d of %d: %w", i+1, len(sources), err)
|
||||
}
|
||||
targetObjs = append(targetObjs, targetObj...)
|
||||
manifestInfos = append(manifestInfos, manifestInfo)
|
||||
}
|
||||
|
||||
ts.AddCheckpoint("unmarshal_ms")
|
||||
ts.AddCheckpoint("manifests_ms")
|
||||
logCtx := log.WithField("application", app.QualifiedName())
|
||||
for k, v := range ts.Timings() {
|
||||
logCtx = logCtx.WithField(k, v.Milliseconds())
|
||||
}
|
||||
logCtx = logCtx.WithField("time_ms", time.Since(ts.StartTime).Milliseconds())
|
||||
logCtx.Info("GetRepoObjs stats")
|
||||
return targetObjs, manifestInfos, nil
|
||||
|
||||
// in case if annotation not exists, we should always execute selfheal if manifests changed
|
||||
if atLeastOneRevisionIsNotPossibleToBeUpdated {
|
||||
revisionUpdated = true
|
||||
}
|
||||
|
||||
return targetObjs, manifestInfos, revisionUpdated, nil
|
||||
}
|
||||
|
||||
func unmarshalManifests(manifests []string) ([]*unstructured.Unstructured, error) {
|
||||
@@ -329,20 +360,24 @@ func DeduplicateTargetObjects(
|
||||
|
||||
// getComparisonSettings will return the system level settings related to the
|
||||
// diff/normalization process.
|
||||
func (m *appStateManager) getComparisonSettings() (string, map[string]v1alpha1.ResourceOverride, *settings.ResourcesFilter, error) {
|
||||
func (m *appStateManager) getComparisonSettings() (string, map[string]v1alpha1.ResourceOverride, *settings.ResourcesFilter, string, error) {
|
||||
resourceOverrides, err := m.settingsMgr.GetResourceOverrides()
|
||||
if err != nil {
|
||||
return "", nil, nil, err
|
||||
return "", nil, nil, "", err
|
||||
}
|
||||
appLabelKey, err := m.settingsMgr.GetAppInstanceLabelKey()
|
||||
if err != nil {
|
||||
return "", nil, nil, err
|
||||
return "", nil, nil, "", err
|
||||
}
|
||||
resFilter, err := m.settingsMgr.GetResourcesFilter()
|
||||
if err != nil {
|
||||
return "", nil, nil, err
|
||||
return "", nil, nil, "", err
|
||||
}
|
||||
return appLabelKey, resourceOverrides, resFilter, nil
|
||||
installationID, err := m.settingsMgr.GetInstallationID()
|
||||
if err != nil {
|
||||
return "", nil, nil, "", err
|
||||
}
|
||||
return appLabelKey, resourceOverrides, resFilter, installationID, nil
|
||||
}
|
||||
|
||||
// verifyGnuPGSignature verifies the result of a GnuPG operation for a given git
|
||||
@@ -393,7 +428,7 @@ func isManagedNamespace(ns *unstructured.Unstructured, app *v1alpha1.Application
|
||||
// revision and overrides in the app spec.
|
||||
func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1alpha1.AppProject, revisions []string, sources []v1alpha1.ApplicationSource, noCache bool, noRevisionCache bool, localManifests []string, hasMultipleSources bool, rollback bool) (*comparisonResult, error) {
|
||||
ts := stats.NewTimingStats()
|
||||
appLabelKey, resourceOverrides, resFilter, err := m.getComparisonSettings()
|
||||
appLabelKey, resourceOverrides, resFilter, installationID, err := m.getComparisonSettings()
|
||||
|
||||
ts.AddCheckpoint("settings_ms")
|
||||
|
||||
@@ -422,7 +457,7 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1
|
||||
|
||||
// When signature keys are defined in the project spec, we need to verify the signature on the Git revision
|
||||
verifySignature := false
|
||||
if project.Spec.SignatureKeys != nil && len(project.Spec.SignatureKeys) > 0 && gpg.IsGPGEnabled() {
|
||||
if len(project.Spec.SignatureKeys) > 0 && gpg.IsGPGEnabled() {
|
||||
verifySignature = true
|
||||
}
|
||||
|
||||
@@ -439,6 +474,8 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1
|
||||
var manifestInfos []*apiclient.ManifestResponse
|
||||
targetNsExists := false
|
||||
|
||||
var revisionUpdated bool
|
||||
|
||||
if len(localManifests) == 0 {
|
||||
// If the length of revisions is not same as the length of sources,
|
||||
// we take the revisions from the sources directly for all the sources.
|
||||
@@ -449,7 +486,7 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1
|
||||
}
|
||||
}
|
||||
|
||||
targetObjs, manifestInfos, err = m.GetRepoObjs(app, sources, appLabelKey, revisions, noCache, noRevisionCache, verifySignature, project, rollback)
|
||||
targetObjs, manifestInfos, revisionUpdated, err = m.GetRepoObjs(app, sources, appLabelKey, revisions, noCache, noRevisionCache, verifySignature, project, rollback)
|
||||
if err != nil {
|
||||
targetObjs = make([]*unstructured.Unstructured, 0)
|
||||
msg := fmt.Sprintf("Failed to load target state: %s", err.Error())
|
||||
@@ -559,7 +596,7 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1
|
||||
|
||||
for _, liveObj := range liveObjByKey {
|
||||
if liveObj != nil {
|
||||
appInstanceName := m.resourceTracking.GetAppName(liveObj, appLabelKey, trackingMethod)
|
||||
appInstanceName := m.resourceTracking.GetAppName(liveObj, appLabelKey, trackingMethod, installationID)
|
||||
if appInstanceName != "" && appInstanceName != app.InstanceName(m.namespace) {
|
||||
fqInstanceName := strings.ReplaceAll(appInstanceName, "_", "/")
|
||||
conditions = append(conditions, v1alpha1.ApplicationCondition{
|
||||
@@ -591,7 +628,7 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1
|
||||
}
|
||||
|
||||
// No need to care about the return value here, we just want the modified managedNs
|
||||
_, err = syncNamespace(m.resourceTracking, appLabelKey, trackingMethod, app.Name, app.Spec.SyncPolicy)(managedNs, liveObj)
|
||||
_, err = syncNamespace(app.Spec.SyncPolicy)(managedNs, liveObj)
|
||||
if err != nil {
|
||||
conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: err.Error(), LastTransitionTime: &now})
|
||||
failedToLoadObjs = true
|
||||
@@ -698,7 +735,7 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1
|
||||
}
|
||||
gvk := obj.GroupVersionKind()
|
||||
|
||||
isSelfReferencedObj := m.isSelfReferencedObj(liveObj, targetObj, app.GetName(), appLabelKey, trackingMethod)
|
||||
isSelfReferencedObj := m.isSelfReferencedObj(liveObj, targetObj, app.GetName(), appLabelKey, trackingMethod, installationID)
|
||||
|
||||
resState := v1alpha1.ResourceStatus{
|
||||
Namespace: obj.GetNamespace(),
|
||||
@@ -840,6 +877,7 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1
|
||||
diffConfig: diffConfig,
|
||||
diffResultList: diffResults,
|
||||
hasPostDeleteHooks: hasPostDeleteHooks,
|
||||
revisionUpdated: revisionUpdated,
|
||||
}
|
||||
|
||||
if hasMultipleSources {
|
||||
@@ -897,9 +935,7 @@ func useDiffCache(noCache bool, manifestInfos []*apiclient.ManifestResponse, sou
|
||||
return false
|
||||
}
|
||||
|
||||
currentSpec := app.BuildComparedToStatus()
|
||||
specChanged := !reflect.DeepEqual(app.Status.Sync.ComparedTo, currentSpec)
|
||||
if specChanged {
|
||||
if !specEqualsCompareTo(app.Spec, app.Status.Sync.ComparedTo) {
|
||||
log.WithField("useDiffCache", "false").Debug("specChanged")
|
||||
return false
|
||||
}
|
||||
@@ -908,6 +944,29 @@ func useDiffCache(noCache bool, manifestInfos []*apiclient.ManifestResponse, sou
|
||||
return true
|
||||
}
|
||||
|
||||
// specEqualsCompareTo compares the application spec to the comparedTo status. It normalizes the destination to match
|
||||
// the comparedTo destination before comparing. It does not mutate the original spec or comparedTo.
|
||||
func specEqualsCompareTo(spec v1alpha1.ApplicationSpec, comparedTo v1alpha1.ComparedTo) bool {
|
||||
// Make a copy to be sure we don't mutate the original.
|
||||
specCopy := spec.DeepCopy()
|
||||
currentSpec := specCopy.BuildComparedToStatus()
|
||||
|
||||
// The spec might have been augmented to include both server and name, so change it to match the comparedTo before
|
||||
// comparing.
|
||||
if comparedTo.Destination.Server == "" {
|
||||
currentSpec.Destination.Server = ""
|
||||
}
|
||||
if comparedTo.Destination.Name == "" {
|
||||
currentSpec.Destination.Name = ""
|
||||
}
|
||||
|
||||
// Set IsServerInferred to false on both, because that field is not important for comparison.
|
||||
comparedTo.Destination.SetIsServerInferred(false)
|
||||
currentSpec.Destination.SetIsServerInferred(false)
|
||||
|
||||
return reflect.DeepEqual(comparedTo, currentSpec)
|
||||
}
|
||||
|
||||
func (m *appStateManager) persistRevisionHistory(
|
||||
app *v1alpha1.Application,
|
||||
revision string,
|
||||
@@ -1002,7 +1061,7 @@ func NewAppStateManager(
|
||||
// group and kind) match the properties of the live object, or if the tracking method
|
||||
// used does not provide the required properties for matching.
|
||||
// Reference: https://github.com/argoproj/argo-cd/issues/8683
|
||||
func (m *appStateManager) isSelfReferencedObj(live, config *unstructured.Unstructured, appName, appLabelKey string, trackingMethod v1alpha1.TrackingMethod) bool {
|
||||
func (m *appStateManager) isSelfReferencedObj(live, config *unstructured.Unstructured, appName, appLabelKey string, trackingMethod v1alpha1.TrackingMethod, installationID string) bool {
|
||||
if live == nil {
|
||||
return true
|
||||
}
|
||||
@@ -1035,7 +1094,7 @@ func (m *appStateManager) isSelfReferencedObj(live, config *unstructured.Unstruc
|
||||
// to match the properties from the live object. Cluster scoped objects
|
||||
// carry the app's destination namespace in the tracking annotation,
|
||||
// but are unique in GVK + name combination.
|
||||
appInstance := m.resourceTracking.GetAppInstance(live, appLabelKey, trackingMethod)
|
||||
appInstance := m.resourceTracking.GetAppInstance(live, appLabelKey, trackingMethod, installationID)
|
||||
if appInstance != nil {
|
||||
return isSelfReferencedObj(live, *appInstance)
|
||||
}
|
||||
|
||||
@@ -1372,8 +1372,8 @@ func TestIsLiveResourceManaged(t *testing.T) {
|
||||
configObj := managedObj.DeepCopy()
|
||||
|
||||
// then
|
||||
assert.True(t, manager.isSelfReferencedObj(managedObj, configObj, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel))
|
||||
assert.True(t, manager.isSelfReferencedObj(managedObj, configObj, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation))
|
||||
assert.True(t, manager.isSelfReferencedObj(managedObj, configObj, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel, ""))
|
||||
assert.True(t, manager.isSelfReferencedObj(managedObj, configObj, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation, ""))
|
||||
})
|
||||
t.Run("will return true if tracked with label", func(t *testing.T) {
|
||||
// given
|
||||
@@ -1381,43 +1381,43 @@ func TestIsLiveResourceManaged(t *testing.T) {
|
||||
configObj := managedObjWithLabel.DeepCopy()
|
||||
|
||||
// then
|
||||
assert.True(t, manager.isSelfReferencedObj(managedObjWithLabel, configObj, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel))
|
||||
assert.True(t, manager.isSelfReferencedObj(managedObjWithLabel, configObj, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel, ""))
|
||||
})
|
||||
t.Run("will handle if trackingId has wrong resource name and config is nil", func(t *testing.T) {
|
||||
// given
|
||||
t.Parallel()
|
||||
|
||||
// then
|
||||
assert.True(t, manager.isSelfReferencedObj(unmanagedObjWrongName, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel))
|
||||
assert.False(t, manager.isSelfReferencedObj(unmanagedObjWrongName, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation))
|
||||
assert.True(t, manager.isSelfReferencedObj(unmanagedObjWrongName, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel, ""))
|
||||
assert.False(t, manager.isSelfReferencedObj(unmanagedObjWrongName, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation, ""))
|
||||
})
|
||||
t.Run("will handle if trackingId has wrong resource group and config is nil", func(t *testing.T) {
|
||||
// given
|
||||
t.Parallel()
|
||||
|
||||
// then
|
||||
assert.True(t, manager.isSelfReferencedObj(unmanagedObjWrongGroup, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel))
|
||||
assert.False(t, manager.isSelfReferencedObj(unmanagedObjWrongGroup, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation))
|
||||
assert.True(t, manager.isSelfReferencedObj(unmanagedObjWrongGroup, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel, ""))
|
||||
assert.False(t, manager.isSelfReferencedObj(unmanagedObjWrongGroup, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation, ""))
|
||||
})
|
||||
t.Run("will handle if trackingId has wrong kind and config is nil", func(t *testing.T) {
|
||||
// given
|
||||
t.Parallel()
|
||||
|
||||
// then
|
||||
assert.True(t, manager.isSelfReferencedObj(unmanagedObjWrongKind, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel))
|
||||
assert.False(t, manager.isSelfReferencedObj(unmanagedObjWrongKind, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation))
|
||||
assert.True(t, manager.isSelfReferencedObj(unmanagedObjWrongKind, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel, ""))
|
||||
assert.False(t, manager.isSelfReferencedObj(unmanagedObjWrongKind, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation, ""))
|
||||
})
|
||||
t.Run("will handle if trackingId has wrong namespace and config is nil", func(t *testing.T) {
|
||||
// given
|
||||
t.Parallel()
|
||||
|
||||
// then
|
||||
assert.True(t, manager.isSelfReferencedObj(unmanagedObjWrongNamespace, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel))
|
||||
assert.False(t, manager.isSelfReferencedObj(unmanagedObjWrongNamespace, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotationAndLabel))
|
||||
assert.True(t, manager.isSelfReferencedObj(unmanagedObjWrongNamespace, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel, ""))
|
||||
assert.False(t, manager.isSelfReferencedObj(unmanagedObjWrongNamespace, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotationAndLabel, ""))
|
||||
})
|
||||
t.Run("will return true if live is nil", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
assert.True(t, manager.isSelfReferencedObj(nil, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation))
|
||||
assert.True(t, manager.isSelfReferencedObj(nil, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation, ""))
|
||||
})
|
||||
|
||||
t.Run("will handle upgrade in desired state APIGroup", func(t *testing.T) {
|
||||
@@ -1427,11 +1427,13 @@ func TestIsLiveResourceManaged(t *testing.T) {
|
||||
delete(config.GetAnnotations(), common.AnnotationKeyAppInstance)
|
||||
|
||||
// then
|
||||
assert.True(t, manager.isSelfReferencedObj(managedWrongAPIGroup, config, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation))
|
||||
assert.True(t, manager.isSelfReferencedObj(managedWrongAPIGroup, config, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation, ""))
|
||||
})
|
||||
}
|
||||
|
||||
func TestUseDiffCache(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
type fixture struct {
|
||||
testName string
|
||||
noCache bool
|
||||
@@ -1527,6 +1529,10 @@ func TestUseDiffCache(t *testing.T) {
|
||||
t.Fatalf("error merging app: %s", err)
|
||||
}
|
||||
}
|
||||
if app.Spec.Destination.Name != "" && app.Spec.Destination.Server != "" {
|
||||
// Simulate the controller's process for populating both of these fields.
|
||||
app.Spec.Destination.SetInferredServer(app.Spec.Destination.Server)
|
||||
}
|
||||
return app
|
||||
}
|
||||
|
||||
@@ -1692,6 +1698,44 @@ func TestUseDiffCache(t *testing.T) {
|
||||
expectedUseCache: false,
|
||||
serverSideDiff: false,
|
||||
},
|
||||
{
|
||||
// There are code paths that modify the ApplicationSpec and augment the destination field with both the
|
||||
// destination server and name. Since both fields are populated in the app spec but not in the comparedTo,
|
||||
// we need to make sure we correctly compare the fields and don't miss the cache.
|
||||
testName: "will return true if the app spec destination contains both server and name, but otherwise matches comparedTo",
|
||||
noCache: false,
|
||||
manifestInfos: manifestInfos("rev1"),
|
||||
sources: sources(),
|
||||
app: app("httpbin", "rev1", false, &argoappv1.Application{
|
||||
Spec: argoappv1.ApplicationSpec{
|
||||
Destination: argoappv1.ApplicationDestination{
|
||||
Server: "https://kubernetes.default.svc",
|
||||
Name: "httpbin",
|
||||
Namespace: "httpbin",
|
||||
},
|
||||
},
|
||||
Status: argoappv1.ApplicationStatus{
|
||||
Resources: []argoappv1.ResourceStatus{},
|
||||
Sync: argoappv1.SyncStatus{
|
||||
Status: argoappv1.SyncStatusCodeSynced,
|
||||
ComparedTo: argoappv1.ComparedTo{
|
||||
Destination: argoappv1.ApplicationDestination{
|
||||
Server: "https://kubernetes.default.svc",
|
||||
Namespace: "httpbin",
|
||||
},
|
||||
},
|
||||
Revision: "rev1",
|
||||
},
|
||||
ReconciledAt: &metav1.Time{
|
||||
Time: time.Now().Add(-time.Hour),
|
||||
},
|
||||
},
|
||||
}),
|
||||
manifestRevisions: []string{"rev1"},
|
||||
statusRefreshTimeout: time.Hour * 24,
|
||||
expectedUseCache: true,
|
||||
serverSideDiff: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
@@ -1710,3 +1754,49 @@ func TestUseDiffCache(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCompareAppStateDefaultRevisionUpdated(t *testing.T) {
|
||||
app := newFakeApp()
|
||||
data := fakeData{
|
||||
manifestResponse: &apiclient.ManifestResponse{
|
||||
Manifests: []string{},
|
||||
Namespace: test.FakeDestNamespace,
|
||||
Server: test.FakeClusterURL,
|
||||
Revision: "abc123",
|
||||
},
|
||||
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
|
||||
}
|
||||
ctrl := newFakeController(&data, nil)
|
||||
sources := make([]argoappv1.ApplicationSource, 0)
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "")
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false, false)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, compRes)
|
||||
assert.NotNil(t, compRes.syncStatus)
|
||||
assert.True(t, compRes.revisionUpdated)
|
||||
}
|
||||
|
||||
func TestCompareAppStateRevisionUpdatedWithHelmSource(t *testing.T) {
|
||||
app := newFakeMultiSourceApp()
|
||||
data := fakeData{
|
||||
manifestResponse: &apiclient.ManifestResponse{
|
||||
Manifests: []string{},
|
||||
Namespace: test.FakeDestNamespace,
|
||||
Server: test.FakeClusterURL,
|
||||
Revision: "abc123",
|
||||
},
|
||||
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
|
||||
}
|
||||
ctrl := newFakeController(&data, nil)
|
||||
sources := make([]argoappv1.ApplicationSource, 0)
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "")
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false, false)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, compRes)
|
||||
assert.NotNil(t, compRes.syncStatus)
|
||||
assert.True(t, compRes.revisionUpdated)
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
@@ -23,6 +24,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/managedfields"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/kubectl/pkg/util/openapi"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/controller/metrics"
|
||||
@@ -30,6 +32,7 @@ import (
|
||||
listersv1alpha1 "github.com/argoproj/argo-cd/v2/pkg/client/listers/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo/diff"
|
||||
"github.com/argoproj/argo-cd/v2/util/glob"
|
||||
logutils "github.com/argoproj/argo-cd/v2/util/log"
|
||||
"github.com/argoproj/argo-cd/v2/util/lua"
|
||||
"github.com/argoproj/argo-cd/v2/util/rand"
|
||||
@@ -41,6 +44,10 @@ const (
|
||||
// EnvVarSyncWaveDelay is an environment variable which controls the delay in seconds between
|
||||
// each sync-wave
|
||||
EnvVarSyncWaveDelay = "ARGOCD_SYNC_WAVE_DELAY"
|
||||
|
||||
// serviceAccountDisallowedCharSet contains the characters that are not allowed to be present
|
||||
// in a DefaultServiceAccount configured for a DestinationServiceAccount
|
||||
serviceAccountDisallowedCharSet = "!*[]{}\\/"
|
||||
)
|
||||
|
||||
func (m *appStateManager) getOpenAPISchema(server string) (openapi.Resources, error) {
|
||||
@@ -167,12 +174,18 @@ func (m *appStateManager) SyncAppState(app *v1alpha1.Application, state *v1alpha
|
||||
state.Phase = common.OperationError
|
||||
state.Message = fmt.Sprintf("Failed to load application project: %v", err)
|
||||
return
|
||||
} else if syncWindowPreventsSync(app, proj) {
|
||||
// If the operation is currently running, simply let the user know the sync is blocked by a current sync window
|
||||
if state.Phase == common.OperationRunning {
|
||||
state.Message = "Sync operation blocked by sync window"
|
||||
} else {
|
||||
isBlocked, err := syncWindowPreventsSync(app, proj)
|
||||
if isBlocked {
|
||||
// If the operation is currently running, simply let the user know the sync is blocked by a current sync window
|
||||
if state.Phase == common.OperationRunning {
|
||||
state.Message = "Sync operation blocked by sync window"
|
||||
if err != nil {
|
||||
state.Message = fmt.Sprintf("%s: %v", state.Message, err)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if !isMultiSourceRevision {
|
||||
@@ -282,8 +295,35 @@ func (m *appStateManager) SyncAppState(app *v1alpha1.Application, state *v1alpha
|
||||
log.Errorf("Could not get appInstanceLabelKey: %v", err)
|
||||
return
|
||||
}
|
||||
installationID, err := m.settingsMgr.GetInstallationID()
|
||||
if err != nil {
|
||||
log.Errorf("Could not get installation ID: %v", err)
|
||||
return
|
||||
}
|
||||
trackingMethod := argo.GetTrackingMethod(m.settingsMgr)
|
||||
|
||||
impersonationEnabled, err := m.settingsMgr.IsImpersonationEnabled()
|
||||
if err != nil {
|
||||
log.Errorf("could not get impersonation feature flag: %v", err)
|
||||
return
|
||||
}
|
||||
if impersonationEnabled {
|
||||
serviceAccountToImpersonate, err := deriveServiceAccountToImpersonate(proj, app)
|
||||
if err != nil {
|
||||
state.Phase = common.OperationError
|
||||
state.Message = fmt.Sprintf("failed to find a matching service account to impersonate: %v", err)
|
||||
return
|
||||
}
|
||||
logEntry = logEntry.WithFields(log.Fields{"impersonationEnabled": "true", "serviceAccount": serviceAccountToImpersonate})
|
||||
// set the impersonation headers.
|
||||
rawConfig.Impersonate = rest.ImpersonationConfig{
|
||||
UserName: serviceAccountToImpersonate,
|
||||
}
|
||||
restConfig.Impersonate = rest.ImpersonationConfig{
|
||||
UserName: serviceAccountToImpersonate,
|
||||
}
|
||||
}
|
||||
|
||||
opts := []sync.SyncOpt{
|
||||
sync.WithLogr(logutils.NewLogrusLogger(logEntry)),
|
||||
sync.WithHealthOverride(lua.ResourceHealthOverrides(resourceOverrides)),
|
||||
@@ -311,7 +351,7 @@ func (m *appStateManager) SyncAppState(app *v1alpha1.Application, state *v1alpha
|
||||
return (len(syncOp.Resources) == 0 ||
|
||||
isPostDeleteHook(target) ||
|
||||
argo.ContainsSyncResource(key.Name, key.Namespace, schema.GroupVersionKind{Kind: key.Kind, Group: key.Group}, syncOp.Resources)) &&
|
||||
m.isSelfReferencedObj(live, target, app.GetName(), appLabelKey, trackingMethod)
|
||||
m.isSelfReferencedObj(live, target, app.GetName(), appLabelKey, trackingMethod, installationID)
|
||||
}),
|
||||
sync.WithManifestValidation(!syncOp.SyncOptions.HasOption(common.SyncOptionsDisableValidation)),
|
||||
sync.WithSyncWaveHook(delayBetweenSyncWaves),
|
||||
@@ -324,7 +364,7 @@ func (m *appStateManager) SyncAppState(app *v1alpha1.Application, state *v1alpha
|
||||
}
|
||||
|
||||
if syncOp.SyncOptions.HasOption("CreateNamespace=true") {
|
||||
opts = append(opts, sync.WithNamespaceModifier(syncNamespace(m.resourceTracking, appLabelKey, trackingMethod, app.Name, app.Spec.SyncPolicy)))
|
||||
opts = append(opts, sync.WithNamespaceModifier(syncNamespace(app.Spec.SyncPolicy)))
|
||||
}
|
||||
|
||||
syncCtx, cleanup, err := sync.NewSyncContext(
|
||||
@@ -528,11 +568,52 @@ func delayBetweenSyncWaves(phase common.SyncPhase, wave int, finalWave bool) err
|
||||
return nil
|
||||
}
|
||||
|
||||
func syncWindowPreventsSync(app *v1alpha1.Application, proj *v1alpha1.AppProject) bool {
|
||||
func syncWindowPreventsSync(app *v1alpha1.Application, proj *v1alpha1.AppProject) (bool, error) {
|
||||
window := proj.Spec.SyncWindows.Matches(app)
|
||||
isManual := false
|
||||
if app.Status.OperationState != nil {
|
||||
isManual = !app.Status.OperationState.Operation.InitiatedBy.Automated
|
||||
}
|
||||
return !window.CanSync(isManual)
|
||||
canSync, err := window.CanSync(isManual)
|
||||
if err != nil {
|
||||
// prevents sync because sync window has an error
|
||||
return true, err
|
||||
}
|
||||
return !canSync, nil
|
||||
}
|
||||
|
||||
// deriveServiceAccountToImpersonate determines the service account to be used for impersonation for the sync operation.
|
||||
// The returned service account will be fully qualified including namespace and the service account name in the format system:serviceaccount:<namespace>:<service_account>
|
||||
func deriveServiceAccountToImpersonate(project *v1alpha1.AppProject, application *v1alpha1.Application) (string, error) {
|
||||
// spec.Destination.Namespace is optional. If not specified, use the Application's
|
||||
// namespace
|
||||
serviceAccountNamespace := application.Spec.Destination.Namespace
|
||||
if serviceAccountNamespace == "" {
|
||||
serviceAccountNamespace = application.Namespace
|
||||
}
|
||||
// Loop through the destinationServiceAccounts and see if there is any destination that is a candidate.
|
||||
// if so, return the service account specified for that destination.
|
||||
for _, item := range project.Spec.DestinationServiceAccounts {
|
||||
dstServerMatched, err := glob.MatchWithError(item.Server, application.Spec.Destination.Server)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("invalid glob pattern for destination server: %w", err)
|
||||
}
|
||||
dstNamespaceMatched, err := glob.MatchWithError(item.Namespace, application.Spec.Destination.Namespace)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("invalid glob pattern for destination namespace: %w", err)
|
||||
}
|
||||
if dstServerMatched && dstNamespaceMatched {
|
||||
if strings.Trim(item.DefaultServiceAccount, " ") == "" || strings.ContainsAny(item.DefaultServiceAccount, serviceAccountDisallowedCharSet) {
|
||||
return "", fmt.Errorf("default service account contains invalid chars '%s'", item.DefaultServiceAccount)
|
||||
} else if strings.Contains(item.DefaultServiceAccount, ":") {
|
||||
// service account is specified along with its namespace.
|
||||
return fmt.Sprintf("system:serviceaccount:%s", item.DefaultServiceAccount), nil
|
||||
} else {
|
||||
// service account needs to be prefixed with a namespace
|
||||
return fmt.Sprintf("system:serviceaccount:%s:%s", serviceAccountNamespace, item.DefaultServiceAccount), nil
|
||||
}
|
||||
}
|
||||
}
|
||||
// if there is no match found in the AppProject.Spec.DestinationServiceAccounts, use the default service account of the destination namespace.
|
||||
return "", fmt.Errorf("no matching service account found for destination server %s and namespace %s", application.Spec.Destination.Server, serviceAccountNamespace)
|
||||
}
|
||||
|
||||
@@ -5,12 +5,11 @@ import (
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo"
|
||||
)
|
||||
|
||||
// syncNamespace determine if Argo CD should create and/or manage the namespace
|
||||
// where the application will be deployed.
|
||||
func syncNamespace(resourceTracking argo.ResourceTracking, appLabelKey string, trackingMethod v1alpha1.TrackingMethod, appName string, syncPolicy *v1alpha1.SyncPolicy) func(m, l *unstructured.Unstructured) (bool, error) {
|
||||
func syncNamespace(syncPolicy *v1alpha1.SyncPolicy) func(m *unstructured.Unstructured, l *unstructured.Unstructured) (bool, error) {
|
||||
// This function must return true for the managed namespace to be synced.
|
||||
return func(managedNs, liveNs *unstructured.Unstructured) (bool, error) {
|
||||
if managedNs == nil {
|
||||
|
||||
@@ -8,9 +8,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/common"
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo"
|
||||
)
|
||||
|
||||
func createFakeNamespace(uid string, resourceVersion string, labels map[string]string, annotations map[string]string) *unstructured.Unstructured {
|
||||
@@ -250,7 +248,7 @@ func Test_shouldNamespaceSync(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
actual, err := syncNamespace(argo.NewResourceTracking(), common.LabelKeyAppInstance, argo.TrackingMethodAnnotation, "some-app", tt.syncPolicy)(tt.managedNs, tt.liveNs)
|
||||
actual, err := syncNamespace(tt.syncPolicy)(tt.managedNs, tt.liveNs)
|
||||
require.NoError(t, err)
|
||||
|
||||
if tt.managedNs != nil {
|
||||
|
||||
@@ -2,6 +2,7 @@ package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/sync"
|
||||
@@ -9,6 +10,7 @@ import (
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@@ -644,6 +646,771 @@ func TestNormalizeTargetResourcesWithList(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestDeriveServiceAccountMatchingNamespaces(t *testing.T) {
|
||||
type fixture struct {
|
||||
project *v1alpha1.AppProject
|
||||
application *v1alpha1.Application
|
||||
}
|
||||
|
||||
setup := func(destinationServiceAccounts []v1alpha1.ApplicationDestinationServiceAccount, destinationNamespace, destinationServerURL, applicationNamespace string) *fixture {
|
||||
project := &v1alpha1.AppProject{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: "argocd-ns",
|
||||
Name: "testProj",
|
||||
},
|
||||
Spec: v1alpha1.AppProjectSpec{
|
||||
DestinationServiceAccounts: destinationServiceAccounts,
|
||||
},
|
||||
}
|
||||
app := &v1alpha1.Application{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: applicationNamespace,
|
||||
Name: "testApp",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "testProj",
|
||||
Destination: v1alpha1.ApplicationDestination{
|
||||
Server: destinationServerURL,
|
||||
Namespace: destinationNamespace,
|
||||
},
|
||||
},
|
||||
}
|
||||
return &fixture{
|
||||
project: project,
|
||||
application: app,
|
||||
}
|
||||
}
|
||||
|
||||
t.Run("empty destination service accounts", func(t *testing.T) {
|
||||
// given an application referring a project with no destination service accounts
|
||||
t.Parallel()
|
||||
destinationServiceAccounts := []v1alpha1.ApplicationDestinationServiceAccount{}
|
||||
destinationNamespace := "testns"
|
||||
destinationServerURL := "https://kubernetes.svc.local"
|
||||
applicationNamespace := "argocd-ns"
|
||||
expectedSA := ""
|
||||
expectedErrMsg := "no matching service account found for destination server https://kubernetes.svc.local and namespace testns"
|
||||
|
||||
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
|
||||
// when
|
||||
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
|
||||
assert.Equal(t, expectedSA, sa)
|
||||
|
||||
// then, there should be an error saying no valid match was found
|
||||
assert.EqualError(t, err, expectedErrMsg)
|
||||
})
|
||||
|
||||
t.Run("exact match of destination namespace", func(t *testing.T) {
|
||||
// given an application referring a project with exactly one destination service account that matches the application destination,
|
||||
t.Parallel()
|
||||
destinationServiceAccounts := []v1alpha1.ApplicationDestinationServiceAccount{
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "testns",
|
||||
DefaultServiceAccount: "test-sa",
|
||||
},
|
||||
}
|
||||
destinationNamespace := "testns"
|
||||
destinationServerURL := "https://kubernetes.svc.local"
|
||||
applicationNamespace := "argocd-ns"
|
||||
expectedSA := "system:serviceaccount:testns:test-sa"
|
||||
|
||||
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
|
||||
// when
|
||||
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
|
||||
|
||||
// then, there should be no error and should use the right service account for impersonation
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expectedSA, sa)
|
||||
})
|
||||
|
||||
t.Run("exact one match with multiple destination service accounts", func(t *testing.T) {
|
||||
// given an application referring a project with multiple destination service accounts having one exact match for application destination
|
||||
t.Parallel()
|
||||
destinationServiceAccounts := []v1alpha1.ApplicationDestinationServiceAccount{
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "guestbook",
|
||||
DefaultServiceAccount: "guestbook-sa",
|
||||
},
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "guestbook-test",
|
||||
DefaultServiceAccount: "guestbook-test-sa",
|
||||
},
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "default",
|
||||
DefaultServiceAccount: "default-sa",
|
||||
},
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "testns",
|
||||
DefaultServiceAccount: "test-sa",
|
||||
},
|
||||
}
|
||||
destinationNamespace := "testns"
|
||||
destinationServerURL := "https://kubernetes.svc.local"
|
||||
applicationNamespace := "argocd-ns"
|
||||
expectedSA := "system:serviceaccount:testns:test-sa"
|
||||
|
||||
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
|
||||
// when
|
||||
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
|
||||
|
||||
// then, there should be no error and should use the right service account for impersonation
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expectedSA, sa)
|
||||
})
|
||||
|
||||
t.Run("first match to be used when multiple matches are available", func(t *testing.T) {
|
||||
// given an application referring a project with multiple destination service accounts having multiple match for application destination
|
||||
t.Parallel()
|
||||
destinationServiceAccounts := []v1alpha1.ApplicationDestinationServiceAccount{
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "testns",
|
||||
DefaultServiceAccount: "test-sa",
|
||||
},
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "testns",
|
||||
DefaultServiceAccount: "test-sa-2",
|
||||
},
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "testns",
|
||||
DefaultServiceAccount: "test-sa-3",
|
||||
},
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "guestbook",
|
||||
DefaultServiceAccount: "guestbook-sa",
|
||||
},
|
||||
}
|
||||
destinationNamespace := "testns"
|
||||
destinationServerURL := "https://kubernetes.svc.local"
|
||||
applicationNamespace := "argocd-ns"
|
||||
expectedSA := "system:serviceaccount:testns:test-sa"
|
||||
|
||||
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
|
||||
// when
|
||||
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
|
||||
|
||||
// then, there should be no error and it should use the first matching service account for impersonation
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expectedSA, sa)
|
||||
})
|
||||
|
||||
t.Run("first match to be used when glob pattern is used", func(t *testing.T) {
|
||||
// given an application referring a project with multiple destination service accounts with glob patterns matching the application destination
|
||||
t.Parallel()
|
||||
destinationServiceAccounts := []v1alpha1.ApplicationDestinationServiceAccount{
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "test*",
|
||||
DefaultServiceAccount: "test-sa",
|
||||
},
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "testns",
|
||||
DefaultServiceAccount: "test-sa-2",
|
||||
},
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "default",
|
||||
DefaultServiceAccount: "default-sa",
|
||||
},
|
||||
}
|
||||
destinationNamespace := "testns"
|
||||
destinationServerURL := "https://kubernetes.svc.local"
|
||||
applicationNamespace := "argocd-ns"
|
||||
expectedSA := "system:serviceaccount:testns:test-sa"
|
||||
|
||||
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
|
||||
// when
|
||||
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
|
||||
|
||||
// then, there should not be any error and should use the first matching glob pattern service account for impersonation
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expectedSA, sa)
|
||||
})
|
||||
|
||||
t.Run("no match among a valid list", func(t *testing.T) {
|
||||
// given an application referring a project with multiple destination service accounts with no matches for application destination
|
||||
t.Parallel()
|
||||
destinationServiceAccounts := []v1alpha1.ApplicationDestinationServiceAccount{
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "test1",
|
||||
DefaultServiceAccount: "test-sa",
|
||||
},
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "test2",
|
||||
DefaultServiceAccount: "test-sa-2",
|
||||
},
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "default",
|
||||
DefaultServiceAccount: "default-sa",
|
||||
},
|
||||
}
|
||||
destinationNamespace := "testns"
|
||||
destinationServerURL := "https://kubernetes.svc.local"
|
||||
applicationNamespace := "argocd-ns"
|
||||
expectedSA := ""
|
||||
expectedErrMsg := "no matching service account found for destination server https://kubernetes.svc.local and namespace testns"
|
||||
|
||||
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
|
||||
// when
|
||||
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
|
||||
|
||||
// then, there should be an error saying no match was found
|
||||
require.EqualError(t, err, expectedErrMsg)
|
||||
assert.Equal(t, expectedSA, sa)
|
||||
})
|
||||
|
||||
t.Run("app destination namespace is empty", func(t *testing.T) {
|
||||
// given an application referring a project with multiple destination service accounts with empty application destination namespace
|
||||
t.Parallel()
|
||||
destinationServiceAccounts := []v1alpha1.ApplicationDestinationServiceAccount{
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
DefaultServiceAccount: "test-sa",
|
||||
},
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "*",
|
||||
DefaultServiceAccount: "test-sa-2",
|
||||
},
|
||||
}
|
||||
destinationNamespace := ""
|
||||
destinationServerURL := "https://kubernetes.svc.local"
|
||||
applicationNamespace := "argocd-ns"
|
||||
expectedSA := "system:serviceaccount:argocd-ns:test-sa"
|
||||
|
||||
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
|
||||
// when
|
||||
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
|
||||
|
||||
// then, there should not be any error and the service account configured for with empty namespace should be used.
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expectedSA, sa)
|
||||
})
|
||||
|
||||
t.Run("match done via catch all glob pattern", func(t *testing.T) {
|
||||
// given an application referring a project with multiple destination service accounts having a catch all glob pattern
|
||||
t.Parallel()
|
||||
destinationServiceAccounts := []v1alpha1.ApplicationDestinationServiceAccount{
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "testns1",
|
||||
DefaultServiceAccount: "test-sa-2",
|
||||
},
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "default",
|
||||
DefaultServiceAccount: "default-sa",
|
||||
},
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "*",
|
||||
DefaultServiceAccount: "test-sa",
|
||||
},
|
||||
}
|
||||
destinationNamespace := "testns"
|
||||
destinationServerURL := "https://kubernetes.svc.local"
|
||||
applicationNamespace := "argocd-ns"
|
||||
expectedSA := "system:serviceaccount:testns:test-sa"
|
||||
|
||||
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
|
||||
// when
|
||||
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
|
||||
|
||||
// then, there should not be any error and the catch all service account should be returned
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expectedSA, sa)
|
||||
})
|
||||
|
||||
t.Run("match done via invalid glob pattern", func(t *testing.T) {
|
||||
// given an application referring a project with a destination service account having an invalid glob pattern for namespace
|
||||
t.Parallel()
|
||||
destinationServiceAccounts := []v1alpha1.ApplicationDestinationServiceAccount{
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "e[[a*",
|
||||
DefaultServiceAccount: "test-sa",
|
||||
},
|
||||
}
|
||||
destinationNamespace := "testns"
|
||||
destinationServerURL := "https://kubernetes.svc.local"
|
||||
applicationNamespace := "argocd-ns"
|
||||
expectedSA := ""
|
||||
|
||||
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
|
||||
// when
|
||||
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
|
||||
|
||||
// then, there must be an error as the glob pattern is invalid.
|
||||
require.ErrorContains(t, err, "invalid glob pattern for destination namespace")
|
||||
assert.Equal(t, expectedSA, sa)
|
||||
})
|
||||
|
||||
t.Run("sa specified with a namespace", func(t *testing.T) {
|
||||
// given an application referring a project with multiple destination service accounts having a matching service account specified with its namespace
|
||||
t.Parallel()
|
||||
destinationServiceAccounts := []v1alpha1.ApplicationDestinationServiceAccount{
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "testns",
|
||||
DefaultServiceAccount: "myns:test-sa",
|
||||
},
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "default",
|
||||
DefaultServiceAccount: "default-sa",
|
||||
},
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "*",
|
||||
DefaultServiceAccount: "test-sa",
|
||||
},
|
||||
}
|
||||
destinationNamespace := "testns"
|
||||
destinationServerURL := "https://kubernetes.svc.local"
|
||||
applicationNamespace := "argocd-ns"
|
||||
expectedSA := "system:serviceaccount:myns:test-sa"
|
||||
|
||||
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
|
||||
// when
|
||||
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
|
||||
assert.Equal(t, expectedSA, sa)
|
||||
|
||||
// then, there should not be any error and the service account with its namespace should be returned.
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestDeriveServiceAccountMatchingServers(t *testing.T) {
|
||||
type fixture struct {
|
||||
project *v1alpha1.AppProject
|
||||
application *v1alpha1.Application
|
||||
}
|
||||
|
||||
setup := func(destinationServiceAccounts []v1alpha1.ApplicationDestinationServiceAccount, destinationNamespace, destinationServerURL, applicationNamespace string) *fixture {
|
||||
project := &v1alpha1.AppProject{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: "argocd-ns",
|
||||
Name: "testProj",
|
||||
},
|
||||
Spec: v1alpha1.AppProjectSpec{
|
||||
DestinationServiceAccounts: destinationServiceAccounts,
|
||||
},
|
||||
}
|
||||
app := &v1alpha1.Application{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: applicationNamespace,
|
||||
Name: "testApp",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "testProj",
|
||||
Destination: v1alpha1.ApplicationDestination{
|
||||
Server: destinationServerURL,
|
||||
Namespace: destinationNamespace,
|
||||
},
|
||||
},
|
||||
}
|
||||
return &fixture{
|
||||
project: project,
|
||||
application: app,
|
||||
}
|
||||
}
|
||||
|
||||
t.Run("exact one match with multiple destination service accounts", func(t *testing.T) {
|
||||
// given an application referring a project with multiple destination service accounts and one exact match for application destination
|
||||
t.Parallel()
|
||||
destinationServiceAccounts := []v1alpha1.ApplicationDestinationServiceAccount{
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "guestbook",
|
||||
DefaultServiceAccount: "guestbook-sa",
|
||||
},
|
||||
{
|
||||
Server: "https://abc.svc.local",
|
||||
Namespace: "guestbook",
|
||||
DefaultServiceAccount: "guestbook-test-sa",
|
||||
},
|
||||
{
|
||||
Server: "https://cde.svc.local",
|
||||
Namespace: "guestbook",
|
||||
DefaultServiceAccount: "default-sa",
|
||||
},
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "testns",
|
||||
DefaultServiceAccount: "test-sa",
|
||||
},
|
||||
}
|
||||
destinationNamespace := "testns"
|
||||
destinationServerURL := "https://kubernetes.svc.local"
|
||||
applicationNamespace := "argocd-ns"
|
||||
expectedSA := "system:serviceaccount:testns:test-sa"
|
||||
|
||||
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
|
||||
// when
|
||||
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
|
||||
|
||||
// then, there should not be any error and the right service account must be returned.
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expectedSA, sa)
|
||||
})
|
||||
|
||||
t.Run("first match to be used when multiple matches are available", func(t *testing.T) {
|
||||
// given an application referring a project with multiple destination service accounts and multiple matches for application destination
|
||||
t.Parallel()
|
||||
destinationServiceAccounts := []v1alpha1.ApplicationDestinationServiceAccount{
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "testns",
|
||||
DefaultServiceAccount: "test-sa",
|
||||
},
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "testns",
|
||||
DefaultServiceAccount: "test-sa-2",
|
||||
},
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "default",
|
||||
DefaultServiceAccount: "default-sa",
|
||||
},
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "guestbook",
|
||||
DefaultServiceAccount: "guestbook-sa",
|
||||
},
|
||||
}
|
||||
destinationNamespace := "testns"
|
||||
destinationServerURL := "https://kubernetes.svc.local"
|
||||
applicationNamespace := "argocd-ns"
|
||||
expectedSA := "system:serviceaccount:testns:test-sa"
|
||||
|
||||
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
|
||||
// when
|
||||
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
|
||||
|
||||
// then, there should not be any error and first matching service account should be used
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expectedSA, sa)
|
||||
})
|
||||
|
||||
t.Run("first match to be used when glob pattern is used", func(t *testing.T) {
|
||||
// given an application referring a project with multiple destination service accounts with a matching glob pattern and exact match
|
||||
t.Parallel()
|
||||
destinationServiceAccounts := []v1alpha1.ApplicationDestinationServiceAccount{
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "test*",
|
||||
DefaultServiceAccount: "test-sa",
|
||||
},
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "testns",
|
||||
DefaultServiceAccount: "test-sa-2",
|
||||
},
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "default",
|
||||
DefaultServiceAccount: "default-sa",
|
||||
},
|
||||
}
|
||||
destinationNamespace := "testns"
|
||||
destinationServerURL := "https://kubernetes.svc.local"
|
||||
applicationNamespace := "argocd-ns"
|
||||
expectedSA := "system:serviceaccount:testns:test-sa"
|
||||
|
||||
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
|
||||
// when
|
||||
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
|
||||
assert.Equal(t, expectedSA, sa)
|
||||
|
||||
// then, there should not be any error and the service account of the glob pattern, being the first match should be returned.
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("no match among a valid list", func(t *testing.T) {
|
||||
// given an application referring a project with multiple destination service accounts with no match
|
||||
t.Parallel()
|
||||
destinationServiceAccounts := []v1alpha1.ApplicationDestinationServiceAccount{
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "testns",
|
||||
DefaultServiceAccount: "test-sa",
|
||||
},
|
||||
{
|
||||
Server: "https://abc.svc.local",
|
||||
Namespace: "testns",
|
||||
DefaultServiceAccount: "test-sa-2",
|
||||
},
|
||||
{
|
||||
Server: "https://cde.svc.local",
|
||||
Namespace: "default",
|
||||
DefaultServiceAccount: "default-sa",
|
||||
},
|
||||
}
|
||||
destinationNamespace := "testns"
|
||||
destinationServerURL := "https://xyz.svc.local"
|
||||
applicationNamespace := "argocd-ns"
|
||||
expectedSA := ""
|
||||
expectedErr := "no matching service account found for destination server https://xyz.svc.local and namespace testns"
|
||||
|
||||
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
|
||||
// when
|
||||
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
|
||||
|
||||
// then, there an error with appropriate message must be returned
|
||||
require.EqualError(t, err, expectedErr)
|
||||
assert.Equal(t, expectedSA, sa)
|
||||
})
|
||||
|
||||
t.Run("match done via catch all glob pattern", func(t *testing.T) {
|
||||
// given an application referring a project with multiple destination service accounts with matching catch all glob pattern
|
||||
t.Parallel()
|
||||
destinationServiceAccounts := []v1alpha1.ApplicationDestinationServiceAccount{
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "testns1",
|
||||
DefaultServiceAccount: "test-sa-2",
|
||||
},
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "default",
|
||||
DefaultServiceAccount: "default-sa",
|
||||
},
|
||||
{
|
||||
Server: "*",
|
||||
Namespace: "*",
|
||||
DefaultServiceAccount: "test-sa",
|
||||
},
|
||||
}
|
||||
destinationNamespace := "testns"
|
||||
destinationServerURL := "https://localhost:6443"
|
||||
applicationNamespace := "argocd-ns"
|
||||
expectedSA := "system:serviceaccount:testns:test-sa"
|
||||
|
||||
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
|
||||
// when
|
||||
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
|
||||
|
||||
// then, there should not be any error and the service account of the glob pattern match must be returned.
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expectedSA, sa)
|
||||
})
|
||||
|
||||
t.Run("match done via invalid glob pattern", func(t *testing.T) {
|
||||
// given an application referring a project with a destination service account having an invalid glob pattern for server
|
||||
t.Parallel()
|
||||
destinationServiceAccounts := []v1alpha1.ApplicationDestinationServiceAccount{
|
||||
{
|
||||
Server: "e[[a*",
|
||||
Namespace: "test-ns",
|
||||
DefaultServiceAccount: "test-sa",
|
||||
},
|
||||
}
|
||||
destinationNamespace := "testns"
|
||||
destinationServerURL := "https://kubernetes.svc.local"
|
||||
applicationNamespace := "argocd-ns"
|
||||
expectedSA := ""
|
||||
|
||||
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
|
||||
// when
|
||||
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
|
||||
|
||||
// then, there must be an error as the glob pattern is invalid.
|
||||
require.ErrorContains(t, err, "invalid glob pattern for destination server")
|
||||
assert.Equal(t, expectedSA, sa)
|
||||
})
|
||||
|
||||
t.Run("sa specified with a namespace", func(t *testing.T) {
|
||||
// given app sync impersonation feature is enabled and matching service account is prefixed with a namespace
|
||||
t.Parallel()
|
||||
destinationServiceAccounts := []v1alpha1.ApplicationDestinationServiceAccount{
|
||||
{
|
||||
Server: "https://abc.svc.local",
|
||||
Namespace: "testns",
|
||||
DefaultServiceAccount: "myns:test-sa",
|
||||
},
|
||||
{
|
||||
Server: "https://kubernetes.svc.local",
|
||||
Namespace: "default",
|
||||
DefaultServiceAccount: "default-sa",
|
||||
},
|
||||
{
|
||||
Server: "*",
|
||||
Namespace: "*",
|
||||
DefaultServiceAccount: "test-sa",
|
||||
},
|
||||
}
|
||||
destinationNamespace := "testns"
|
||||
destinationServerURL := "https://abc.svc.local"
|
||||
applicationNamespace := "argocd-ns"
|
||||
expectedSA := "system:serviceaccount:myns:test-sa"
|
||||
|
||||
f := setup(destinationServiceAccounts, destinationNamespace, destinationServerURL, applicationNamespace)
|
||||
// when
|
||||
sa, err := deriveServiceAccountToImpersonate(f.project, f.application)
|
||||
|
||||
// then, there should not be any error and the service account with the given namespace prefix must be returned.
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expectedSA, sa)
|
||||
})
|
||||
}
|
||||
|
||||
func TestSyncWithImpersonate(t *testing.T) {
|
||||
type fixture struct {
|
||||
project *v1alpha1.AppProject
|
||||
application *v1alpha1.Application
|
||||
controller *ApplicationController
|
||||
}
|
||||
|
||||
setup := func(impersonationEnabled bool, destinationNamespace, serviceAccountName string) *fixture {
|
||||
app := newFakeApp()
|
||||
app.Status.OperationState = nil
|
||||
app.Status.History = nil
|
||||
project := &v1alpha1.AppProject{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: test.FakeArgoCDNamespace,
|
||||
Name: "default",
|
||||
},
|
||||
Spec: v1alpha1.AppProjectSpec{
|
||||
DestinationServiceAccounts: []v1alpha1.
|
||||
ApplicationDestinationServiceAccount{
|
||||
{
|
||||
Server: "https://localhost:6443",
|
||||
Namespace: destinationNamespace,
|
||||
DefaultServiceAccount: serviceAccountName,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
additionalObjs := []runtime.Object{}
|
||||
if serviceAccountName != "" {
|
||||
syncServiceAccount := &corev1.ServiceAccount{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: serviceAccountName,
|
||||
Namespace: test.FakeDestNamespace,
|
||||
},
|
||||
}
|
||||
additionalObjs = append(additionalObjs, syncServiceAccount)
|
||||
}
|
||||
data := fakeData{
|
||||
apps: []runtime.Object{app, project},
|
||||
manifestResponse: &apiclient.ManifestResponse{
|
||||
Manifests: []string{},
|
||||
Namespace: test.FakeDestNamespace,
|
||||
Server: "https://localhost:6443",
|
||||
Revision: "abc123",
|
||||
},
|
||||
managedLiveObjs: map[kube.ResourceKey]*unstructured.Unstructured{},
|
||||
configMapData: map[string]string{
|
||||
"application.sync.impersonation.enabled": strconv.FormatBool(impersonationEnabled),
|
||||
},
|
||||
additionalObjs: additionalObjs,
|
||||
}
|
||||
ctrl := newFakeController(&data, nil)
|
||||
return &fixture{
|
||||
project: project,
|
||||
application: app,
|
||||
controller: ctrl,
|
||||
}
|
||||
}
|
||||
|
||||
t.Run("sync with impersonation and no matching service account", func(t *testing.T) {
|
||||
// given app sync impersonation feature is enabled with an application referring a project no matching service account
|
||||
f := setup(true, test.FakeArgoCDNamespace, "")
|
||||
opMessage := "failed to find a matching service account to impersonate: no matching service account found for destination server https://localhost:6443 and namespace fake-dest-ns"
|
||||
|
||||
opState := &v1alpha1.OperationState{
|
||||
Operation: v1alpha1.Operation{
|
||||
Sync: &v1alpha1.SyncOperation{
|
||||
Source: &v1alpha1.ApplicationSource{},
|
||||
},
|
||||
},
|
||||
Phase: common.OperationRunning,
|
||||
}
|
||||
// when
|
||||
f.controller.appStateManager.SyncAppState(f.application, opState)
|
||||
|
||||
// then, app sync should fail with expected error message in operation state
|
||||
assert.Equal(t, common.OperationError, opState.Phase)
|
||||
assert.Contains(t, opState.Message, opMessage)
|
||||
})
|
||||
|
||||
t.Run("sync with impersonation and empty service account match", func(t *testing.T) {
|
||||
// given app sync impersonation feature is enabled with an application referring a project matching service account that is an empty string
|
||||
f := setup(true, test.FakeDestNamespace, "")
|
||||
opMessage := "failed to find a matching service account to impersonate: default service account contains invalid chars ''"
|
||||
|
||||
opState := &v1alpha1.OperationState{
|
||||
Operation: v1alpha1.Operation{
|
||||
Sync: &v1alpha1.SyncOperation{
|
||||
Source: &v1alpha1.ApplicationSource{},
|
||||
},
|
||||
},
|
||||
Phase: common.OperationRunning,
|
||||
}
|
||||
// when
|
||||
f.controller.appStateManager.SyncAppState(f.application, opState)
|
||||
|
||||
// then app sync should fail with expected error message in operation state
|
||||
assert.Equal(t, common.OperationError, opState.Phase)
|
||||
assert.Contains(t, opState.Message, opMessage)
|
||||
})
|
||||
|
||||
t.Run("sync with impersonation and matching sa", func(t *testing.T) {
|
||||
// given app sync impersonation feature is enabled with an application referring a project matching service account
|
||||
f := setup(true, test.FakeDestNamespace, "test-sa")
|
||||
opMessage := "successfully synced (no more tasks)"
|
||||
|
||||
opState := &v1alpha1.OperationState{
|
||||
Operation: v1alpha1.Operation{
|
||||
Sync: &v1alpha1.SyncOperation{
|
||||
Source: &v1alpha1.ApplicationSource{},
|
||||
},
|
||||
},
|
||||
Phase: common.OperationRunning,
|
||||
}
|
||||
// when
|
||||
f.controller.appStateManager.SyncAppState(f.application, opState)
|
||||
|
||||
// then app sync should not fail
|
||||
assert.Equal(t, common.OperationSucceeded, opState.Phase)
|
||||
assert.Contains(t, opState.Message, opMessage)
|
||||
})
|
||||
|
||||
t.Run("sync without impersonation", func(t *testing.T) {
|
||||
// given app sync impersonation feature is disabled with an application referring a project matching service account
|
||||
f := setup(false, test.FakeDestNamespace, "")
|
||||
opMessage := "successfully synced (no more tasks)"
|
||||
|
||||
opState := &v1alpha1.OperationState{
|
||||
Operation: v1alpha1.Operation{
|
||||
Sync: &v1alpha1.SyncOperation{
|
||||
Source: &v1alpha1.ApplicationSource{},
|
||||
},
|
||||
},
|
||||
Phase: common.OperationRunning,
|
||||
}
|
||||
// when
|
||||
f.controller.appStateManager.SyncAppState(f.application, opState)
|
||||
|
||||
// then application sync should pass using the control plane service account
|
||||
assert.Equal(t, common.OperationSucceeded, opState.Phase)
|
||||
assert.Contains(t, opState.Message, opMessage)
|
||||
})
|
||||
}
|
||||
|
||||
func dig[T any](obj interface{}, path []interface{}) T {
|
||||
i := obj
|
||||
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 117 KiB After Width: | Height: | Size: 163 KiB |
@@ -32,23 +32,41 @@ function initializeVersionDropdown() {
|
||||
window[callbackName] = function(response) {
|
||||
const div = document.createElement('div');
|
||||
div.innerHTML = response.html;
|
||||
document.querySelector(".md-header__inner > .md-header__title").appendChild(div);
|
||||
const headerTitle = document.querySelector(".md-header__inner > .md-header__title");
|
||||
if (headerTitle) {
|
||||
headerTitle.appendChild(div);
|
||||
}
|
||||
|
||||
const container = div.querySelector('.rst-versions');
|
||||
if (!container) return; // Exit if container not found
|
||||
|
||||
// Add caret icon
|
||||
var caret = document.createElement('div');
|
||||
caret.innerHTML = "<i class='fa fa-caret-down dropdown-caret'></i>";
|
||||
caret.classList.add('dropdown-caret');
|
||||
div.querySelector('.rst-current-version').appendChild(caret);
|
||||
const currentVersionElem = div.querySelector('.rst-current-version');
|
||||
if (currentVersionElem) {
|
||||
currentVersionElem.appendChild(caret);
|
||||
}
|
||||
|
||||
div.querySelector('.rst-current-version').addEventListener('click', function() {
|
||||
container.classList.toggle('shift-up');
|
||||
});
|
||||
// Add click listener to toggle dropdown
|
||||
if (currentVersionElem && container) {
|
||||
currentVersionElem.addEventListener('click', function() {
|
||||
container.classList.toggle('shift-up');
|
||||
});
|
||||
}
|
||||
|
||||
// Sorting Logic
|
||||
sortVersionLinks(container);
|
||||
};
|
||||
|
||||
// Load CSS
|
||||
var CSSLink = document.createElement('link');
|
||||
CSSLink.rel = 'stylesheet';
|
||||
CSSLink.href = '/assets/versions.css';
|
||||
document.getElementsByTagName('head')[0].appendChild(CSSLink);
|
||||
|
||||
// Load JSONP Script
|
||||
var script = document.createElement('script');
|
||||
const currentVersion = getCurrentVersion();
|
||||
script.src = 'https://argo-cd.readthedocs.io/_/api/v2/footer_html/?' +
|
||||
@@ -56,28 +74,74 @@ function initializeVersionDropdown() {
|
||||
document.getElementsByTagName('head')[0].appendChild(script);
|
||||
}
|
||||
|
||||
// Function to sort version links
|
||||
function sortVersionLinks(container) {
|
||||
// Find all <dl> elements within the container
|
||||
const dlElements = container.querySelectorAll('dl');
|
||||
|
||||
dlElements.forEach(dl => {
|
||||
const dt = dl.querySelector('dt');
|
||||
if (dt && dt.textContent.trim().toLowerCase() === 'versions') {
|
||||
// Found the Versions <dl>
|
||||
const ddElements = Array.from(dl.querySelectorAll('dd'));
|
||||
|
||||
// Define sorting criteria
|
||||
ddElements.sort((a, b) => {
|
||||
const aText = a.textContent.trim().toLowerCase();
|
||||
const bText = b.textContent.trim().toLowerCase();
|
||||
|
||||
// Prioritize 'latest' and 'stable'
|
||||
if (aText === 'latest') return -1;
|
||||
if (bText === 'latest') return 1;
|
||||
if (aText === 'stable') return -1;
|
||||
if (bText === 'stable') return 1;
|
||||
|
||||
// Extract version numbers (e.g., release-2.9)
|
||||
const aVersionMatch = aText.match(/release-(\d+(\.\d+)*)/);
|
||||
const bVersionMatch = bText.match(/release-(\d+(\.\d+)*)/);
|
||||
|
||||
if (aVersionMatch && bVersionMatch) {
|
||||
const aVersion = aVersionMatch[1].split('.').map(Number);
|
||||
const bVersion = bVersionMatch[1].split('.').map(Number);
|
||||
|
||||
for (let i = 0; i < Math.max(aVersion.length, bVersion.length); i++) {
|
||||
const aNum = aVersion[i] || 0;
|
||||
const bNum = bVersion[i] || 0;
|
||||
if (aNum > bNum) return -1;
|
||||
if (aNum < bNum) return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Fallback to alphabetical order
|
||||
return aText.localeCompare(bText);
|
||||
});
|
||||
|
||||
// Remove existing <dd> elements
|
||||
ddElements.forEach(dd => dl.removeChild(dd));
|
||||
|
||||
// Append sorted <dd> elements
|
||||
ddElements.forEach(dd => dl.appendChild(dd));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// VERSION WARNINGS
|
||||
window.addEventListener("DOMContentLoaded", function() {
|
||||
var margin = 30;
|
||||
var headerHeight = document.getElementsByClassName("md-header")[0].offsetHeight;
|
||||
const currentVersion = getCurrentVersion();
|
||||
if (currentVersion) {
|
||||
if (currentVersion && currentVersion !== "stable") {
|
||||
if (currentVersion === "latest") {
|
||||
document.querySelector("div[data-md-component=announce]").innerHTML = "<div id='announce-msg'>You are viewing the docs for an unreleased version of Argo CD, <a href='https://argo-cd.readthedocs.io/en/stable/'>click here to go to the latest stable version.</a></div>";
|
||||
var bannerHeight = document.getElementById('announce-msg').offsetHeight + margin;
|
||||
document.querySelector("header.md-header").style.top = bannerHeight + "px";
|
||||
document.querySelector('style').textContent +=
|
||||
"@media screen and (min-width: 76.25em){ .md-sidebar { height: 0; top:" + (bannerHeight + headerHeight) + "px !important; }}";
|
||||
document.querySelector('style').textContent +=
|
||||
"@media screen and (min-width: 60em){ .md-sidebar--secondary { height: 0; top:" + (bannerHeight + headerHeight) + "px !important; }}";
|
||||
} else if (currentVersion !== "stable") {
|
||||
} else {
|
||||
document.querySelector("div[data-md-component=announce]").innerHTML = "<div id='announce-msg'>You are viewing the docs for a previous version of Argo CD, <a href='https://argo-cd.readthedocs.io/en/stable/'>click here to go to the latest stable version.</a></div>";
|
||||
var bannerHeight = document.getElementById('announce-msg').offsetHeight + margin;
|
||||
document.querySelector("header.md-header").style.top = bannerHeight + "px";
|
||||
document.querySelector('style').textContent +=
|
||||
"@media screen and (min-width: 76.25em){ .md-sidebar { height: 0; top:" + (bannerHeight + headerHeight) + "px !important; }}";
|
||||
document.querySelector('style').textContent +=
|
||||
"@media screen and (min-width: 60em){ .md-sidebar--secondary { height: 0; top:" + (bannerHeight + headerHeight) + "px !important; }}";
|
||||
}
|
||||
var bannerHeight = document.getElementById('announce-msg').offsetHeight + margin;
|
||||
document.querySelector("header.md-header").style.top = bannerHeight + "px";
|
||||
document.querySelector('style').textContent +=
|
||||
"@media screen and (min-width: 76.25em){ .md-sidebar { height: 0; top:" + (bannerHeight + headerHeight) + "px !important; }}";
|
||||
document.querySelector('style').textContent +=
|
||||
"@media screen and (min-width: 60em){ .md-sidebar--secondary { height: 0; top:" + (bannerHeight + headerHeight) + "px !important; }}";
|
||||
}
|
||||
});
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
# Site
|
||||
# Documentation Site
|
||||
|
||||
## Developing And Testing
|
||||
|
||||
The website is built using `mkdocs` and `mkdocs-material`.
|
||||
The [documentation website](https://argo-cd.readthedocs.io/) is built using `mkdocs` and `mkdocs-material`.
|
||||
|
||||
To test:
|
||||
|
||||
@@ -10,7 +10,7 @@ To test:
|
||||
make serve-docs
|
||||
```
|
||||
Once running, you can view your locally built documentation at [http://0.0.0.0:8000/](http://0.0.0.0:8000/).
|
||||
Make a change to documentation and the website will rebuild and refresh the view.
|
||||
Making changes to documentation will automatically rebuild and refresh the view.
|
||||
|
||||
Before submitting a PR build the website, to verify that there are no errors building the site
|
||||
```bash
|
||||
@@ -60,7 +60,38 @@ data:
|
||||
server: https://some-cluster
|
||||
```
|
||||
|
||||
Note: There is no need to restart Argo CD Server after modifiying the
|
||||
Proxy extensions can also be provided individually using dedicated
|
||||
Argo CD configmap keys for better GitOps operations. The example below
|
||||
demonstrates how to configure the same hypothetical httpbin config
|
||||
above using a dedicated key:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: argocd-cm
|
||||
namespace: argocd
|
||||
data:
|
||||
extension.config.httpbin: |
|
||||
connectionTimeout: 2s
|
||||
keepAlive: 15s
|
||||
idleConnectionTimeout: 60s
|
||||
maxIdleConnections: 30
|
||||
services:
|
||||
- url: http://httpbin.org
|
||||
headers:
|
||||
- name: some-header
|
||||
value: '$some.argocd.secret.key'
|
||||
cluster:
|
||||
name: some-cluster
|
||||
server: https://some-cluster
|
||||
```
|
||||
|
||||
Attention: Extension names must be unique in the Argo CD configmap. If
|
||||
duplicated keys are found, the Argo CD API server will log an error
|
||||
message and no proxy extension will be registered.
|
||||
|
||||
Note: There is no need to restart Argo CD Server after modifying the
|
||||
`extension.config` entry in Argo CD configmap. Changes will be
|
||||
automatically applied. A new proxy registry will be built making
|
||||
all new incoming extensions requests (`<argocd-host>/extensions/*`) to
|
||||
@@ -150,12 +181,11 @@ the argocd-secret with key 'some.argocd.secret.key'.
|
||||
If provided, and multiple services are configured, will have to match
|
||||
the application destination name or server to have requests properly
|
||||
forwarded to this service URL. If there are multiple backends for the
|
||||
same extension this field is required. In this case at least one of
|
||||
the two will be required: name or server. It is better to provide both
|
||||
values to avoid problems with applications unable to send requests to
|
||||
the proper backend service. If only one backend service is
|
||||
configured, this field is ignored, and all requests are forwarded to
|
||||
the configured one.
|
||||
same extension this field is required. In this case, it is necessary
|
||||
to provide both values to avoid problems with applications unable to
|
||||
send requests to the proper backend service. If only one backend
|
||||
service is configured, this field is ignored, and all requests are
|
||||
forwarded to the configured one.
|
||||
|
||||
#### `extensions.backend.services.cluster.name` (*string*)
|
||||
(optional)
|
||||
@@ -268,6 +298,10 @@ section for more details.
|
||||
|
||||
Will be populated with the username logged in Argo CD.
|
||||
|
||||
#### `Argocd-User-Groups`
|
||||
|
||||
Will be populated with the 'groups' claim from the user logged in Argo CD.
|
||||
|
||||
### Multi Backend Use-Case
|
||||
|
||||
In some cases when Argo CD is configured to sync with multiple remote
|
||||
|
||||
@@ -6,7 +6,7 @@ in the `argocd-server` Pods that are placed in the `/tmp/extensions` directory a
|
||||
```
|
||||
/tmp/extensions
|
||||
├── example1
|
||||
│ └── extension-1.js
|
||||
│ └── extension-1.js
|
||||
└── example2
|
||||
└── extension-2.js
|
||||
```
|
||||
@@ -73,7 +73,7 @@ registerSystemLevelExtension(component: ExtensionComponent, title: string, optio
|
||||
|
||||
Below is an example of a simple system level extension:
|
||||
|
||||
```typescript
|
||||
```javascript
|
||||
((window) => {
|
||||
const component = () => {
|
||||
return React.createElement(
|
||||
@@ -106,7 +106,7 @@ registerStatusPanelExtension(component: StatusPanelExtensionComponent, title: st
|
||||
|
||||
Below is an example of a simple extension:
|
||||
|
||||
```typescript
|
||||
```javascript
|
||||
((window) => {
|
||||
const component = () => {
|
||||
return React.createElement(
|
||||
@@ -129,32 +129,95 @@ It is also possible to add an optional flyout widget to your extension. It can b
|
||||
|
||||
Below is an example of an extension using the flyout widget:
|
||||
|
||||
```typescript
|
||||
|
||||
```javascript
|
||||
((window) => {
|
||||
const component = (props: {
|
||||
openFlyout: () => any
|
||||
}) => {
|
||||
openFlyout: () => any
|
||||
}) => {
|
||||
return React.createElement(
|
||||
"div",
|
||||
{
|
||||
style: { padding: "10px" },
|
||||
onClick: () => props.openFlyout()
|
||||
},
|
||||
"Hello World"
|
||||
"div",
|
||||
{
|
||||
style: { padding: "10px" },
|
||||
onClick: () => props.openFlyout()
|
||||
},
|
||||
"Hello World"
|
||||
);
|
||||
};
|
||||
const flyout = () => {
|
||||
return React.createElement(
|
||||
"div",
|
||||
{ style: { padding: "10px" } },
|
||||
"This is a flyout"
|
||||
"div",
|
||||
{ style: { padding: "10px" } },
|
||||
"This is a flyout"
|
||||
);
|
||||
};
|
||||
window.extensionsAPI.registerStatusPanelExtension(
|
||||
component,
|
||||
"My Extension",
|
||||
"my_extension",
|
||||
flyout
|
||||
component,
|
||||
"My Extension",
|
||||
"my_extension",
|
||||
flyout
|
||||
);
|
||||
})(window);
|
||||
```
|
||||
|
||||
## Top Bar Action Menu Extensions
|
||||
|
||||
The top bar panel is the action menu at the top of the application view where the action buttons are displayed like Details, Sync, Refresh. Argo CD allows you to add new button to the top bar action menu of an application.
|
||||
When the extension button is clicked, the custom widget will be rendered in a flyout panel.
|
||||
|
||||
The extension should be registered using the `extensionsAPI.registerTopBarActionMenuExt` method:
|
||||
|
||||
```typescript
|
||||
registerTopBarActionMenuExt(
|
||||
component: TopBarActionMenuExtComponent,
|
||||
title: string,
|
||||
id: string,
|
||||
flyout?: ExtensionComponent,
|
||||
shouldDisplay: (app?: Application) => boolean = () => true,
|
||||
iconClassName?: string,
|
||||
isMiddle = false
|
||||
)
|
||||
```
|
||||
|
||||
The callback function `shouldDisplay` should return true if the extension should be displayed and false otherwise:
|
||||
|
||||
```typescript
|
||||
const shouldDisplay = (app: Application) => {
|
||||
return application.metadata?.labels?.['application.environmentLabelKey'] === "prd";
|
||||
};
|
||||
```
|
||||
|
||||
Below is an example of a simple extension with a flyout widget:
|
||||
|
||||
```javascript
|
||||
((window) => {
|
||||
const shouldDisplay = () => {
|
||||
return true;
|
||||
};
|
||||
const flyout = () => {
|
||||
return React.createElement(
|
||||
"div",
|
||||
{ style: { padding: "10px" } },
|
||||
"This is a flyout"
|
||||
);
|
||||
};
|
||||
const component = () => {
|
||||
return React.createElement(
|
||||
"div",
|
||||
{
|
||||
onClick: () => flyout()
|
||||
},
|
||||
"Toolbar Extension Test"
|
||||
);
|
||||
};
|
||||
window.extensionsAPI.registerTopBarActionMenuExt(
|
||||
component,
|
||||
"Toolbar Extension Test",
|
||||
"Toolbar_Extension_Test",
|
||||
flyout,
|
||||
shouldDisplay,
|
||||
'',
|
||||
true
|
||||
);
|
||||
})(window);
|
||||
```
|
||||
@@ -1,10 +1,26 @@
|
||||
# Overview
|
||||
|
||||
!!! warning "You probably don't want to be reading this section of the docs."
|
||||
This part of the manual is aimed at people wanting to develop third-party applications that interact with Argo CD, e.g.
|
||||
This part of the manual is aimed at helping people contribute to Argo CD, the documentation, or to develop third-party applications that interact with Argo CD, e.g.
|
||||
|
||||
* A chat bot
|
||||
* A Slack integration
|
||||
|
||||
!!! note
|
||||
Please make sure you've completed the [getting started guide](../getting_started.md).
|
||||
|
||||
## Contributing to Argo CD
|
||||
* [Code Contribution Guide](code-contributions/)
|
||||
* [Contributors Quickstart](contributors-quickstart/)
|
||||
* [Running Argo CD Locally](running-locally/)
|
||||
|
||||
Need help? Start with the [Contributors FAQ](faq/)
|
||||
|
||||
## Contributing to the Documentation
|
||||
* [Building and Running Documentation Site Locally](docs-site/)
|
||||
|
||||
## Extensions and Third-Party Applications
|
||||
* [UI Extensions](ui-extensions/)
|
||||
* [Proxy Extensions](proxy-extensions/)
|
||||
* [Config Management Plugins](../operator-manual/config-management-plugins/)
|
||||
|
||||
## Contributing to Argo Website
|
||||
The Argo website is maintained in the [argo-site](https://github.com/argoproj/argo-site) repository.
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user