mirror of
https://github.com/argoproj/argo-cd.git
synced 2026-02-21 18:18:48 +01:00
Compare commits
417 Commits
dependabot
...
renovate/g
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f04ca4a967 | ||
|
|
9ef837c326 | ||
|
|
c11d35a20f | ||
|
|
a7a07e2cd8 | ||
|
|
9faa6098ed | ||
|
|
0fb6c51f9d | ||
|
|
dbef22c843 | ||
|
|
47142b89f4 | ||
|
|
98a22612dd | ||
|
|
6cce4b29b9 | ||
|
|
9087ad7282 | ||
|
|
c377101491 | ||
|
|
1d13ebc372 | ||
|
|
45c8fd9d2b | ||
|
|
2bba563a76 | ||
|
|
f13aa46e7f | ||
|
|
3af3a056a2 | ||
|
|
aed63c628d | ||
|
|
22d3ef0ef6 | ||
|
|
e8875bbe7b | ||
|
|
5c10b47d27 | ||
|
|
1680134dc2 | ||
|
|
a330ae4355 | ||
|
|
cd3dc7a1cf | ||
|
|
f4541a60c0 | ||
|
|
81da5ea740 | ||
|
|
c4d99bb224 | ||
|
|
21ec075fd9 | ||
|
|
b834987db9 | ||
|
|
139debe3bb | ||
|
|
f4c4c66f38 | ||
|
|
0793efb5e4 | ||
|
|
15a35daf16 | ||
|
|
4e5b201ba5 | ||
|
|
bb56b9ea67 | ||
|
|
b18ea682c4 | ||
|
|
7fafc99a7a | ||
|
|
ba38778d8c | ||
|
|
48933252b4 | ||
|
|
adf89ea322 | ||
|
|
e492587fb1 | ||
|
|
fed3c7eef7 | ||
|
|
922e459665 | ||
|
|
0fe2a2110c | ||
|
|
1e4cfcc4a0 | ||
|
|
8d018bbf2e | ||
|
|
41f664493e | ||
|
|
939d88c5c6 | ||
|
|
0174fccf28 | ||
|
|
6212ea2afb | ||
|
|
873c2fcfc7 | ||
|
|
2229f9d6fc | ||
|
|
5a8b427322 | ||
|
|
2e5601f932 | ||
|
|
7ae14c89d9 | ||
|
|
8b8d04ecfa | ||
|
|
c64183717b | ||
|
|
d54c8afc09 | ||
|
|
762114c6df | ||
|
|
564e507dd7 | ||
|
|
e4eb86d2db | ||
|
|
bed3d56d17 | ||
|
|
f401a0ee11 | ||
|
|
bc4775468a | ||
|
|
17e5c1f68f | ||
|
|
df324c07d8 | ||
|
|
6028dea3a5 | ||
|
|
b68601255c | ||
|
|
e24d8d4024 | ||
|
|
93148b52c4 | ||
|
|
12d3f5dba1 | ||
|
|
f5a562ac30 | ||
|
|
1268dd9bff | ||
|
|
8b2799c51c | ||
|
|
993344e232 | ||
|
|
670d383f69 | ||
|
|
7829e2c6c1 | ||
|
|
ed752cb540 | ||
|
|
12b1bf5f34 | ||
|
|
52683fdd3e | ||
|
|
3247474212 | ||
|
|
c69d30e52d | ||
|
|
2cfc70afa9 | ||
|
|
728674f922 | ||
|
|
cb2b7faa6d | ||
|
|
a608753071 | ||
|
|
cc39e63e24 | ||
|
|
db7acf8501 | ||
|
|
23f3472f25 | ||
|
|
b96401bb76 | ||
|
|
f953976d92 | ||
|
|
26b970b5bd | ||
|
|
f44de4b854 | ||
|
|
634ef6ff1c | ||
|
|
4a3884f516 | ||
|
|
fd4355baae | ||
|
|
d954789d47 | ||
|
|
66f7b4caa1 | ||
|
|
c447628913 | ||
|
|
9922336968 | ||
|
|
8fa3e47d17 | ||
|
|
ae16c00916 | ||
|
|
267eb2ff0f | ||
|
|
1cec174803 | ||
|
|
05385b3dd8 | ||
|
|
c07768cd64 | ||
|
|
b88527cb39 | ||
|
|
e8f86101f5 | ||
|
|
5e5c4b7d03 | ||
|
|
c7588ffb44 | ||
|
|
6f6c39d8f4 | ||
|
|
4c9291152b | ||
|
|
f2233ccd67 | ||
|
|
871b0b434c | ||
|
|
35331553bf | ||
|
|
3e70033247 | ||
|
|
22c652cf97 | ||
|
|
2ffaf43c1d | ||
|
|
4445dbafb2 | ||
|
|
04cf408264 | ||
|
|
5b8e4b57ac | ||
|
|
88a32d6aab | ||
|
|
51fa4e8a54 | ||
|
|
56320a7b08 | ||
|
|
87faf58733 | ||
|
|
1db5f2e618 | ||
|
|
d269e6f936 | ||
|
|
e6a7c1d4e2 | ||
|
|
91b8bba570 | ||
|
|
29805b0e8f | ||
|
|
69f24f007b | ||
|
|
7168674403 | ||
|
|
24d4cb57c5 | ||
|
|
d135f73160 | ||
|
|
37b0f0f767 | ||
|
|
8c3b78ef88 | ||
|
|
68e5a4a12c | ||
|
|
d154627681 | ||
|
|
e85e353b81 | ||
|
|
c39fde74f0 | ||
|
|
08cd547750 | ||
|
|
4362e8ccb7 | ||
|
|
a06dfeb832 | ||
|
|
b20fd4342f | ||
|
|
1c5d7f1f65 | ||
|
|
49f3c05d7d | ||
|
|
9bc35de19d | ||
|
|
a4919edffb | ||
|
|
9e804f99f0 | ||
|
|
ef8d03cea5 | ||
|
|
6265da106e | ||
|
|
b1b157068e | ||
|
|
7129a2c147 | ||
|
|
6c38186f7f | ||
|
|
d3bdc9d5f3 | ||
|
|
728262ac55 | ||
|
|
928aee5dff | ||
|
|
5ce60ca6e3 | ||
|
|
88fccc91c6 | ||
|
|
7a2dc7e80f | ||
|
|
8657798324 | ||
|
|
98f2760d50 | ||
|
|
7ed0f2300e | ||
|
|
5d5d17ae35 | ||
|
|
bfe8b30d9a | ||
|
|
65a082b12c | ||
|
|
14a22ad926 | ||
|
|
70c8f4612f | ||
|
|
b1a9fab70c | ||
|
|
1e5761c1d0 | ||
|
|
8c8902b93f | ||
|
|
13c47ee244 | ||
|
|
82391027d9 | ||
|
|
0c82f4079b | ||
|
|
97af89a3b3 | ||
|
|
d737f8fe43 | ||
|
|
57cccb65c2 | ||
|
|
9bca4859e0 | ||
|
|
940a489cfa | ||
|
|
7dae82dfd3 | ||
|
|
0984b03805 | ||
|
|
b74c0a0e1a | ||
|
|
eaef25c3eb | ||
|
|
a8cae97da0 | ||
|
|
b2b6d9822b | ||
|
|
da7f11a826 | ||
|
|
2b1f5959bd | ||
|
|
5e2a8a86d0 | ||
|
|
d3de4435ce | ||
|
|
5510bdfd71 | ||
|
|
c67763b069 | ||
|
|
1d6ba890a8 | ||
|
|
2e90919fe6 | ||
|
|
34bc56352c | ||
|
|
e039293b7e | ||
|
|
2a0eac0ca9 | ||
|
|
6a2077642e | ||
|
|
24b0ecc657 | ||
|
|
d7364b4662 | ||
|
|
f78cddf736 | ||
|
|
45a7a18256 | ||
|
|
04d1ca4733 | ||
|
|
6d9b5bdf53 | ||
|
|
90123bac04 | ||
|
|
37b67fa4a5 | ||
|
|
48faed19f1 | ||
|
|
0e42012778 | ||
|
|
1b3ced9261 | ||
|
|
ad2e4450f2 | ||
|
|
90e2148667 | ||
|
|
2558e80f41 | ||
|
|
3c6449da89 | ||
|
|
e5417e1eb3 | ||
|
|
69f7d39717 | ||
|
|
390ea4ff54 | ||
|
|
7e868da310 | ||
|
|
03ac864dde | ||
|
|
27b70cf56e | ||
|
|
8ed3a24d49 | ||
|
|
9a990b7e89 | ||
|
|
ec80ebdf67 | ||
|
|
4dfab5d136 | ||
|
|
8f23c885b6 | ||
|
|
48a7030125 | ||
|
|
38ad19fd95 | ||
|
|
24c08922e5 | ||
|
|
323f993816 | ||
|
|
6ec53193fd | ||
|
|
559744a65e | ||
|
|
3f03097983 | ||
|
|
9928c906a2 | ||
|
|
99710b5183 | ||
|
|
42d4cfb857 | ||
|
|
751550562c | ||
|
|
72d054d772 | ||
|
|
f3dbc6f9de | ||
|
|
16cc1b15af | ||
|
|
aff3ae3f4d | ||
|
|
b8decb798a | ||
|
|
4393f7deb8 | ||
|
|
4024fe7c22 | ||
|
|
678f61b8d3 | ||
|
|
31e0f428e8 | ||
|
|
998253aa41 | ||
|
|
69d1d88807 | ||
|
|
58b0116d75 | ||
|
|
b9daeac44e | ||
|
|
a78a616566 | ||
|
|
d2b881ae4a | ||
|
|
796f72c3d4 | ||
|
|
7da3ecc08f | ||
|
|
fca42e3fd4 | ||
|
|
7f5072f286 | ||
|
|
fe6aaad4f0 | ||
|
|
90eae48c77 | ||
|
|
9895f55781 | ||
|
|
660295f656 | ||
|
|
be2c243ac8 | ||
|
|
8eac64d54c | ||
|
|
c68ec277d4 | ||
|
|
51d88197d7 | ||
|
|
4e63bc7563 | ||
|
|
19415979e8 | ||
|
|
5ac055d2a2 | ||
|
|
dcf1965c52 | ||
|
|
0a1572b9d9 | ||
|
|
853b8dddd3 | ||
|
|
9fffcd50d3 | ||
|
|
835c1fbd3c | ||
|
|
2ed67e8fac | ||
|
|
dd1547fcb4 | ||
|
|
635b9fe8fb | ||
|
|
195b238a37 | ||
|
|
cb61611816 | ||
|
|
a37a4d4073 | ||
|
|
2de6819422 | ||
|
|
df3a45ac02 | ||
|
|
f8aea44398 | ||
|
|
02de363d9c | ||
|
|
79943d8189 | ||
|
|
36f1a59c09 | ||
|
|
d5383de5c5 | ||
|
|
9cc960d07d | ||
|
|
fd78d66f4d | ||
|
|
3e6f11e08e | ||
|
|
e5b83f1d1b | ||
|
|
60adba2d5f | ||
|
|
be37e0aa3d | ||
|
|
ce35b4c484 | ||
|
|
01d00ac952 | ||
|
|
7f5ef5c087 | ||
|
|
1c9bb478e8 | ||
|
|
bc49329691 | ||
|
|
6747cfa28d | ||
|
|
5ee35ad707 | ||
|
|
908c73255e | ||
|
|
b090ee70a8 | ||
|
|
3eb442ed82 | ||
|
|
a5c6898655 | ||
|
|
ac4ae1779e | ||
|
|
d83ef2c224 | ||
|
|
9dfa9db097 | ||
|
|
20e3877633 | ||
|
|
8e00df5326 | ||
|
|
6b6512ae30 | ||
|
|
262c8151ae | ||
|
|
1bc9adb134 | ||
|
|
733350ce7c | ||
|
|
a74d8996b7 | ||
|
|
4e72dd7c55 | ||
|
|
2c4dd51e15 | ||
|
|
1e2a66d5b2 | ||
|
|
ffc3b1a11d | ||
|
|
54e2648b3f | ||
|
|
aa5d1395bc | ||
|
|
4e69156e18 | ||
|
|
9aff762531 | ||
|
|
0cfc2fd861 | ||
|
|
88ce38e450 | ||
|
|
5bd2d0d917 | ||
|
|
ebff248ba8 | ||
|
|
6f1e27e93c | ||
|
|
965c83e016 | ||
|
|
276d92d4e0 | ||
|
|
79f152c1ba | ||
|
|
a3eb4e722e | ||
|
|
3349949835 | ||
|
|
562194b35c | ||
|
|
56f8797a2b | ||
|
|
313e8bf70f | ||
|
|
786b24e2c4 | ||
|
|
69b1f0a33c | ||
|
|
e452870b0e | ||
|
|
e8e39a996e | ||
|
|
6ead52c21c | ||
|
|
9e25f93e03 | ||
|
|
3f44b85a77 | ||
|
|
ff019243a1 | ||
|
|
cd11e44d8b | ||
|
|
f420cce7a5 | ||
|
|
d39c0083ea | ||
|
|
4cd4e5e74e | ||
|
|
2e4af5fa5a | ||
|
|
e692a22b01 | ||
|
|
abbdfa26fd | ||
|
|
36345afeb2 | ||
|
|
bf035b3cb4 | ||
|
|
d58ba040e9 | ||
|
|
09b5cbdda2 | ||
|
|
c012702ce0 | ||
|
|
fb94cad141 | ||
|
|
c94874fd18 | ||
|
|
a90c54599b | ||
|
|
3b1ac4b22d | ||
|
|
0864f1ac95 | ||
|
|
671107cb10 | ||
|
|
5c2b13f07c | ||
|
|
5ca752429e | ||
|
|
1fbd63d095 | ||
|
|
07bd5e0f9e | ||
|
|
be042c4474 | ||
|
|
346a749cde | ||
|
|
04794332d2 | ||
|
|
39b9e4f8c5 | ||
|
|
0f822ff801 | ||
|
|
4d16fdcea4 | ||
|
|
c60a727524 | ||
|
|
6ec1aa1b84 | ||
|
|
a6a78ef8d6 | ||
|
|
99fea7c12e | ||
|
|
0c1eb30b4d | ||
|
|
ca6e205332 | ||
|
|
5107ec1ce3 | ||
|
|
3401d3bf92 | ||
|
|
0a0176f4fd | ||
|
|
d6ecc66216 | ||
|
|
203e07c9a4 | ||
|
|
29df864ae1 | ||
|
|
7d0820f5ca | ||
|
|
8d47727d38 | ||
|
|
3df2883a4d | ||
|
|
54b3c95e84 | ||
|
|
b8ac5ef635 | ||
|
|
986e1f8589 | ||
|
|
ea31d17f53 | ||
|
|
9567183b7c | ||
|
|
460111f7bc | ||
|
|
ac49c67403 | ||
|
|
e9811678fa | ||
|
|
061c1fc7c5 | ||
|
|
e37c3dbd40 | ||
|
|
a1bcd4246e | ||
|
|
4501ebb93f | ||
|
|
e0f4b00126 | ||
|
|
d518f13b2a | ||
|
|
c880373aae | ||
|
|
b0336b8f79 | ||
|
|
9fd0601e52 | ||
|
|
83d553ca51 | ||
|
|
d43fbe6148 | ||
|
|
1b48f363bb | ||
|
|
614c85cb72 | ||
|
|
414d9eb5db | ||
|
|
5c9a5ef9a6 | ||
|
|
d1113970cd | ||
|
|
58d82bedb8 | ||
|
|
4dd9bc7642 | ||
|
|
7f3709374b | ||
|
|
7922c77991 | ||
|
|
20f9081fb4 | ||
|
|
1d09c8c8a1 | ||
|
|
bee23628a8 | ||
|
|
f03ffb3592 | ||
|
|
d4ebcc0c15 | ||
|
|
a671cc9b23 | ||
|
|
49514c9b4c | ||
|
|
f7590fa302 |
8
.github/ISSUE_TEMPLATE/bug_report.md
vendored
8
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -2,7 +2,7 @@
|
||||
name: Bug report
|
||||
about: Create a report to help us improve
|
||||
title: ''
|
||||
labels: 'bug'
|
||||
labels: ['bug', 'triage/pending']
|
||||
assignees: ''
|
||||
---
|
||||
|
||||
@@ -10,9 +10,9 @@ assignees: ''
|
||||
|
||||
Checklist:
|
||||
|
||||
* [ ] I've searched in the docs and FAQ for my answer: https://bit.ly/argocd-faq.
|
||||
* [ ] I've included steps to reproduce the bug.
|
||||
* [ ] I've pasted the output of `argocd version`.
|
||||
- [ ] I've searched in the docs and FAQ for my answer: https://bit.ly/argocd-faq.
|
||||
- [ ] I've included steps to reproduce the bug.
|
||||
- [ ] I've pasted the output of `argocd version`.
|
||||
|
||||
**Describe the bug**
|
||||
|
||||
|
||||
@@ -2,9 +2,10 @@
|
||||
name: Enhancement proposal
|
||||
about: Propose an enhancement for this project
|
||||
title: ''
|
||||
labels: 'enhancement'
|
||||
labels: ['enhancement', 'triage/pending']
|
||||
assignees: ''
|
||||
---
|
||||
|
||||
# Summary
|
||||
|
||||
What change you think needs making.
|
||||
@@ -15,4 +16,4 @@ Please give examples of your use case, e.g. when would you use this.
|
||||
|
||||
# Proposal
|
||||
|
||||
How do you think this should be implemented?
|
||||
How do you think this should be implemented?
|
||||
|
||||
14
.github/ISSUE_TEMPLATE/new_dev_tool.md
vendored
14
.github/ISSUE_TEMPLATE/new_dev_tool.md
vendored
@@ -2,17 +2,17 @@
|
||||
name: New Dev Tool Request
|
||||
about: This is a request for adding a new tool for setting up a dev environment.
|
||||
title: ''
|
||||
labels: ''
|
||||
labels: ['component:dev-env', 'triage/pending']
|
||||
assignees: ''
|
||||
---
|
||||
|
||||
Checklist:
|
||||
|
||||
* [ ] I am willing to maintain this tool, or have another Argo CD maintainer who is.
|
||||
* [ ] I have another Argo CD maintainer who is willing to help maintain this tool (there needs to be at least two maintainers willing to maintain this tool)
|
||||
* [ ] I have a lead sponsor who is a core Argo CD maintainer
|
||||
* [ ] There is a PR which adds said tool - this is so that the maintainers can assess the impact of having this in the tree
|
||||
* [ ] I have given a motivation why this should be added
|
||||
- [ ] I am willing to maintain this tool, or have another Argo CD maintainer who is.
|
||||
- [ ] I have another Argo CD maintainer who is willing to help maintain this tool (there needs to be at least two maintainers willing to maintain this tool)
|
||||
- [ ] I have a lead sponsor who is a core Argo CD maintainer
|
||||
- [ ] There is a PR which adds said tool - this is so that the maintainers can assess the impact of having this in the tree
|
||||
- [ ] I have given a motivation why this should be added
|
||||
|
||||
### The proposer
|
||||
|
||||
@@ -24,7 +24,7 @@ Checklist:
|
||||
|
||||
### Motivation
|
||||
|
||||
<!-- Why this tool would be useful to have in the tree. -->
|
||||
<!-- Why this tool would be useful to have in the tree. -->
|
||||
|
||||
### Link to PR (Optional)
|
||||
|
||||
|
||||
8
.github/ISSUE_TEMPLATE/security_logs.md
vendored
8
.github/ISSUE_TEMPLATE/security_logs.md
vendored
@@ -1,10 +1,11 @@
|
||||
---
|
||||
name: Security log
|
||||
about: Propose adding security-related logs or tagging existing logs with security fields
|
||||
title: "seclog: [Event Description]"
|
||||
labels: security-log
|
||||
assignees: notfromstatefarm
|
||||
title: 'seclog: [Event Description]'
|
||||
labels: ['security', 'triage/pending']
|
||||
assignees: ''
|
||||
---
|
||||
|
||||
# Event to be logged
|
||||
|
||||
Specify the event that needs to be logged or existing logs that need to be tagged.
|
||||
@@ -16,4 +17,3 @@ What security level should these events be logged under? Refer to https://argo-c
|
||||
# Common Weakness Enumeration
|
||||
|
||||
Is there an associated [CWE](https://cwe.mitre.org/) that could be tagged as well?
|
||||
|
||||
|
||||
3
.github/cherry-pick-bot.yml
vendored
3
.github/cherry-pick-bot.yml
vendored
@@ -1,3 +0,0 @@
|
||||
enabled: true
|
||||
preservePullRequestTitle: true
|
||||
|
||||
15
.github/configs/renovate-config.js
vendored
Normal file
15
.github/configs/renovate-config.js
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
module.exports = {
|
||||
platform: 'github',
|
||||
gitAuthor: 'renovate[bot] <renovate[bot]@users.noreply.github.com>',
|
||||
autodiscover: false,
|
||||
allowPostUpgradeCommandTemplating: true,
|
||||
allowedPostUpgradeCommands: ["make mockgen"],
|
||||
extends: [
|
||||
"github>argoproj/argo-cd//renovate-presets/commons.json5",
|
||||
"github>argoproj/argo-cd//renovate-presets/custom-managers/shell.json5",
|
||||
"github>argoproj/argo-cd//renovate-presets/custom-managers/yaml.json5",
|
||||
"github>argoproj/argo-cd//renovate-presets/fix/disable-all-updates.json5",
|
||||
"github>argoproj/argo-cd//renovate-presets/devtool.json5",
|
||||
"github>argoproj/argo-cd//renovate-presets/docs.json5"
|
||||
]
|
||||
}
|
||||
2
.github/pull_request_template.md
vendored
2
.github/pull_request_template.md
vendored
@@ -8,7 +8,7 @@ Checklist:
|
||||
|
||||
* [ ] Either (a) I've created an [enhancement proposal](https://github.com/argoproj/argo-cd/issues/new/choose) and discussed it with the community, (b) this is a bug fix, or (c) this does not need to be in the release notes.
|
||||
* [ ] The title of the PR states what changed and the related issues number (used for the release note).
|
||||
* [ ] The title of the PR conforms to the [Toolchain Guide](https://argo-cd.readthedocs.io/en/latest/developer-guide/toolchain-guide/#title-of-the-pr)
|
||||
* [ ] The title of the PR conforms to the [Title of the PR](https://argo-cd.readthedocs.io/en/latest/developer-guide/submit-your-pr/#title-of-the-pr)
|
||||
* [ ] I've included "Closes [ISSUE #]" or "Fixes [ISSUE #]" in the description to automatically close the associated issue.
|
||||
* [ ] I've updated both the CLI and UI to expose my feature, or I plan to submit a second PR with them.
|
||||
* [ ] Does this PR require documentation updates?
|
||||
|
||||
2
.github/workflows/bump-major-version.yaml
vendored
2
.github/workflows/bump-major-version.yaml
vendored
@@ -37,7 +37,7 @@ jobs:
|
||||
working-directory: /home/runner/go/src/github.com/argoproj/argo-cd
|
||||
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
|
||||
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Add ~/go/bin to PATH
|
||||
|
||||
114
.github/workflows/cherry-pick-single.yml
vendored
Normal file
114
.github/workflows/cherry-pick-single.yml
vendored
Normal file
@@ -0,0 +1,114 @@
|
||||
name: Cherry Pick Single
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
merge_commit_sha:
|
||||
required: true
|
||||
type: string
|
||||
description: "The merge commit SHA to cherry-pick"
|
||||
version_number:
|
||||
required: true
|
||||
type: string
|
||||
description: "The version number (from cherry-pick/ label)"
|
||||
pr_number:
|
||||
required: true
|
||||
type: string
|
||||
description: "The original PR number"
|
||||
pr_title:
|
||||
required: true
|
||||
type: string
|
||||
description: "The original PR title"
|
||||
secrets:
|
||||
CHERRYPICK_APP_ID:
|
||||
required: true
|
||||
CHERRYPICK_APP_PRIVATE_KEY:
|
||||
required: true
|
||||
|
||||
jobs:
|
||||
cherry-pick:
|
||||
name: Cherry Pick to ${{ inputs.version_number }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Generate a token
|
||||
id: generate-token
|
||||
uses: actions/create-github-app-token@a8d616148505b5069dccd32f177bb87d7f39123b # v2.1.1
|
||||
with:
|
||||
app-id: ${{ secrets.CHERRYPICK_APP_ID }}
|
||||
private-key: ${{ secrets.CHERRYPICK_APP_PRIVATE_KEY }}
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ steps.generate-token.outputs.token }}
|
||||
|
||||
- name: Configure Git
|
||||
run: |
|
||||
git config --global user.name "github-actions[bot]"
|
||||
git config --global user.email "github-actions[bot]@users.noreply.github.com"
|
||||
|
||||
- name: Cherry pick commit
|
||||
id: cherry-pick
|
||||
run: |
|
||||
set -e
|
||||
|
||||
MERGE_COMMIT="${{ inputs.merge_commit_sha }}"
|
||||
TARGET_BRANCH="release-${{ inputs.version_number }}"
|
||||
|
||||
echo "🍒 Cherry-picking commit $MERGE_COMMIT to branch $TARGET_BRANCH"
|
||||
|
||||
# Check if target branch exists
|
||||
if ! git show-ref --verify --quiet "refs/remotes/origin/$TARGET_BRANCH"; then
|
||||
echo "❌ Target branch '$TARGET_BRANCH' does not exist"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Create new branch for cherry-pick
|
||||
CHERRY_PICK_BRANCH="cherry-pick-${{ inputs.pr_number }}-to-${TARGET_BRANCH}"
|
||||
git checkout -b "$CHERRY_PICK_BRANCH" "origin/$TARGET_BRANCH"
|
||||
|
||||
# Perform cherry-pick
|
||||
if git cherry-pick -m 1 "$MERGE_COMMIT"; then
|
||||
echo "✅ Cherry-pick successful"
|
||||
|
||||
# Extract Signed-off-by from the cherry-pick commit
|
||||
SIGNOFF=$(git log -1 --pretty=format:"%B" | grep -E '^Signed-off-by:' || echo "")
|
||||
|
||||
# Push the new branch
|
||||
git push origin "$CHERRY_PICK_BRANCH"
|
||||
|
||||
# Save data for PR creation
|
||||
echo "branch_name=$CHERRY_PICK_BRANCH" >> "$GITHUB_OUTPUT"
|
||||
echo "signoff=$SIGNOFF" >> "$GITHUB_OUTPUT"
|
||||
echo "target_branch=$TARGET_BRANCH" >> "$GITHUB_OUTPUT"
|
||||
else
|
||||
echo "❌ Cherry-pick failed due to conflicts"
|
||||
git cherry-pick --abort
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Create Pull Request
|
||||
run: |
|
||||
# Create cherry-pick PR
|
||||
gh pr create \
|
||||
--title "${{ inputs.pr_title }} (cherry-pick #${{ inputs.pr_number }} for ${{ inputs.version_number }})" \
|
||||
--body "Cherry-picked ${{ inputs.pr_title }} (#${{ inputs.pr_number }})
|
||||
|
||||
${{ steps.cherry-pick.outputs.signoff }}" \
|
||||
--base "${{ steps.cherry-pick.outputs.target_branch }}" \
|
||||
--head "${{ steps.cherry-pick.outputs.branch_name }}"
|
||||
|
||||
# Comment on original PR
|
||||
gh pr comment ${{ inputs.pr_number }} \
|
||||
--body "🍒 Cherry-pick PR created for ${{ inputs.version_number }}: #$(gh pr list --head ${{ steps.cherry-pick.outputs.branch_name }} --json number --jq '.[0].number')"
|
||||
env:
|
||||
GH_TOKEN: ${{ steps.generate-token.outputs.token }}
|
||||
|
||||
- name: Comment on failure
|
||||
if: failure()
|
||||
run: |
|
||||
gh pr comment ${{ inputs.pr_number }} \
|
||||
--body "❌ Cherry-pick failed for ${{ inputs.version_number }}. Please check the workflow logs for details."
|
||||
env:
|
||||
GH_TOKEN: ${{ steps.generate-token.outputs.token }}
|
||||
53
.github/workflows/cherry-pick.yml
vendored
Normal file
53
.github/workflows/cherry-pick.yml
vendored
Normal file
@@ -0,0 +1,53 @@
|
||||
name: Cherry Pick
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
branches:
|
||||
- master
|
||||
types: ["labeled", "closed"]
|
||||
|
||||
jobs:
|
||||
find-labels:
|
||||
name: Find Cherry Pick Labels
|
||||
if: |
|
||||
github.event.pull_request.merged == true && (
|
||||
(github.event.action == 'labeled' && startsWith(github.event.label.name, 'cherry-pick/')) ||
|
||||
(github.event.action == 'closed' && contains(toJSON(github.event.pull_request.labels.*.name), 'cherry-pick/'))
|
||||
)
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
labels: ${{ steps.extract-labels.outputs.labels }}
|
||||
steps:
|
||||
- name: Extract cherry-pick labels
|
||||
id: extract-labels
|
||||
run: |
|
||||
if [[ "${{ github.event.action }}" == "labeled" ]]; then
|
||||
# Label was just added - use it directly
|
||||
LABEL_NAME="${{ github.event.label.name }}"
|
||||
VERSION="${LABEL_NAME#cherry-pick/}"
|
||||
CHERRY_PICK_DATA='[{"label":"'$LABEL_NAME'","version":"'$VERSION'"}]'
|
||||
else
|
||||
# PR was closed - find all cherry-pick labels
|
||||
CHERRY_PICK_DATA=$(echo '${{ toJSON(github.event.pull_request.labels) }}' | jq -c '[.[] | select(.name | startswith("cherry-pick/")) | {label: .name, version: (.name | sub("cherry-pick/"; ""))}]')
|
||||
fi
|
||||
|
||||
echo "labels=$CHERRY_PICK_DATA" >> "$GITHUB_OUTPUT"
|
||||
echo "Found cherry-pick data: $CHERRY_PICK_DATA"
|
||||
|
||||
cherry-pick:
|
||||
name: Cherry Pick
|
||||
needs: find-labels
|
||||
if: needs.find-labels.outputs.labels != '[]'
|
||||
strategy:
|
||||
matrix:
|
||||
include: ${{ fromJSON(needs.find-labels.outputs.labels) }}
|
||||
fail-fast: false
|
||||
uses: ./.github/workflows/cherry-pick-single.yml
|
||||
with:
|
||||
merge_commit_sha: ${{ github.event.pull_request.merge_commit_sha }}
|
||||
version_number: ${{ matrix.version }}
|
||||
pr_number: ${{ github.event.pull_request.number }}
|
||||
pr_title: ${{ github.event.pull_request.title }}
|
||||
secrets:
|
||||
CHERRYPICK_APP_ID: ${{ vars.CHERRYPICK_APP_ID }}
|
||||
CHERRYPICK_APP_PRIVATE_KEY: ${{ secrets.CHERRYPICK_APP_PRIVATE_KEY }}
|
||||
50
.github/workflows/ci-build.yaml
vendored
50
.github/workflows/ci-build.yaml
vendored
@@ -14,7 +14,7 @@ on:
|
||||
env:
|
||||
# Golang version to use across CI steps
|
||||
# renovate: datasource=golang-version packageName=golang
|
||||
GOLANG_VERSION: '1.24.4'
|
||||
GOLANG_VERSION: '1.25.1'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
@@ -32,7 +32,7 @@ jobs:
|
||||
docs: ${{ steps.filter.outputs.docs_any_changed }}
|
||||
steps:
|
||||
- uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
|
||||
- uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
|
||||
- uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
id: filter
|
||||
with:
|
||||
# Any file which is not under docs/, ui/ or is not a markdown file is counted as a backend file
|
||||
@@ -57,7 +57,7 @@ jobs:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
|
||||
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Download all Go modules
|
||||
@@ -78,11 +78,11 @@ jobs:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
|
||||
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Restore go build cache
|
||||
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
|
||||
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
|
||||
with:
|
||||
path: ~/.cache/go-build
|
||||
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
|
||||
@@ -105,14 +105,14 @@ jobs:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
|
||||
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Run golangci-lint
|
||||
uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0
|
||||
with:
|
||||
# renovate: datasource=go packageName=github.com/golangci/golangci-lint versioning=regex:^v(?<major>\d+)\.(?<minor>\d+)\.(?<patch>\d+)?$
|
||||
version: v2.1.6
|
||||
version: v2.4.0
|
||||
args: --verbose
|
||||
|
||||
test-go:
|
||||
@@ -133,7 +133,7 @@ jobs:
|
||||
- name: Create symlink in GOPATH
|
||||
run: ln -s $(pwd) ~/go/src/github.com/argoproj/argo-cd
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
|
||||
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Install required packages
|
||||
@@ -153,7 +153,7 @@ jobs:
|
||||
run: |
|
||||
echo "/usr/local/bin" >> $GITHUB_PATH
|
||||
- name: Restore go build cache
|
||||
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
|
||||
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
|
||||
with:
|
||||
path: ~/.cache/go-build
|
||||
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
|
||||
@@ -197,7 +197,7 @@ jobs:
|
||||
- name: Create symlink in GOPATH
|
||||
run: ln -s $(pwd) ~/go/src/github.com/argoproj/argo-cd
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
|
||||
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Install required packages
|
||||
@@ -217,7 +217,7 @@ jobs:
|
||||
run: |
|
||||
echo "/usr/local/bin" >> $GITHUB_PATH
|
||||
- name: Restore go build cache
|
||||
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
|
||||
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
|
||||
with:
|
||||
path: ~/.cache/go-build
|
||||
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
|
||||
@@ -253,7 +253,7 @@ jobs:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
|
||||
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Create symlink in GOPATH
|
||||
@@ -305,13 +305,13 @@ jobs:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
|
||||
- name: Setup NodeJS
|
||||
uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4.4.0
|
||||
uses: actions/setup-node@a0853c24544627f65ddf259abe73b1d18a591444 # v5.0.0
|
||||
with:
|
||||
# renovate: datasource=node-version packageName=node versioning=node
|
||||
node-version: '22.9.0'
|
||||
node-version: '22.19.0'
|
||||
- name: Restore node dependency cache
|
||||
id: cache-dependencies
|
||||
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
|
||||
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
|
||||
with:
|
||||
path: ui/node_modules
|
||||
key: ${{ runner.os }}-node-dep-v2-${{ hashFiles('**/yarn.lock') }}
|
||||
@@ -339,7 +339,7 @@ jobs:
|
||||
- uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
|
||||
- run: |
|
||||
sudo apt-get install shellcheck
|
||||
shellcheck -e SC2086 -e SC2046 -e SC2068 -e SC2206 -e SC2048 -e SC2059 -e SC2154 -e SC2034 -e SC2016 -e SC2128 -e SC1091 -e SC2207 $(find . -type f -name '*.sh') | tee sc.log
|
||||
shellcheck -e SC2059 -e SC2154 -e SC2034 -e SC2016 -e SC1091 $(find . -type f -name '*.sh' | grep -v './ui/node_modules') | tee sc.log
|
||||
test ! -s sc.log
|
||||
|
||||
analyze:
|
||||
@@ -360,7 +360,7 @@ jobs:
|
||||
fetch-depth: 0
|
||||
- name: Restore node dependency cache
|
||||
id: cache-dependencies
|
||||
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
|
||||
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
|
||||
with:
|
||||
path: ui/node_modules
|
||||
key: ${{ runner.os }}-node-dep-v2-${{ hashFiles('**/yarn.lock') }}
|
||||
@@ -368,12 +368,12 @@ jobs:
|
||||
run: |
|
||||
rm -rf ui/node_modules/argo-ui/node_modules
|
||||
- name: Get e2e code coverage
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0
|
||||
with:
|
||||
name: e2e-code-coverage
|
||||
path: e2e-code-coverage
|
||||
- name: Get unit test code coverage
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0
|
||||
with:
|
||||
name: test-results
|
||||
path: test-results
|
||||
@@ -385,7 +385,7 @@ jobs:
|
||||
run: |
|
||||
go tool covdata percent -i=test-results,e2e-code-coverage/applicationset-controller,e2e-code-coverage/repo-server,e2e-code-coverage/app-controller,e2e-code-coverage/commit-server -o test-results/full-coverage.out
|
||||
- name: Upload code coverage information to codecov.io
|
||||
uses: codecov/codecov-action@18283e04ce6e62d37312384ff67231eb8fd56d24 # v5.4.3
|
||||
uses: codecov/codecov-action@5a1091511ad55cbe89839c7260b706298ca349f7 # v5.5.1
|
||||
with:
|
||||
files: test-results/full-coverage.out
|
||||
fail_ci_if_error: true
|
||||
@@ -402,12 +402,12 @@ jobs:
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}
|
||||
uses: SonarSource/sonarqube-scan-action@2500896589ef8f7247069a56136f8dc177c27ccf # v5.2.0
|
||||
uses: SonarSource/sonarqube-scan-action@1a6d90ebcb0e6a6b1d87e37ba693fe453195ae25 # v5.3.1
|
||||
if: env.sonar_secret != ''
|
||||
test-e2e:
|
||||
name: Run end-to-end tests
|
||||
if: ${{ needs.changes.outputs.backend == 'true' }}
|
||||
runs-on: ubuntu-22.04
|
||||
runs-on: ubuntu-latest-16-cores
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
@@ -449,7 +449,7 @@ jobs:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@8410ad0602e1e429cee44a835ae9f77f654a6694 # v4.0.0
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
|
||||
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: GH actions workaround - Kill XSP4 process
|
||||
@@ -468,7 +468,7 @@ jobs:
|
||||
sudo chmod go-r $HOME/.kube/config
|
||||
kubectl version
|
||||
- name: Restore go build cache
|
||||
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
|
||||
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
|
||||
with:
|
||||
path: ~/.cache/go-build
|
||||
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
|
||||
@@ -496,7 +496,7 @@ jobs:
|
||||
run: |
|
||||
docker pull ghcr.io/dexidp/dex:v2.43.0
|
||||
docker pull argoproj/argo-cd-ci-builder:v1.0.0
|
||||
docker pull redis:7.2.7-alpine
|
||||
docker pull redis:8.2.1-alpine
|
||||
- name: Create target directory for binaries in the build-process
|
||||
run: |
|
||||
mkdir -p dist
|
||||
|
||||
2
.github/workflows/codeql.yml
vendored
2
.github/workflows/codeql.yml
vendored
@@ -33,7 +33,7 @@ jobs:
|
||||
|
||||
# Use correct go version. https://github.com/github/codeql-action/issues/1842#issuecomment-1704398087
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
|
||||
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
|
||||
12
.github/workflows/image-reuse.yaml
vendored
12
.github/workflows/image-reuse.yaml
vendored
@@ -67,16 +67,16 @@ jobs:
|
||||
if: ${{ github.ref_type != 'tag'}}
|
||||
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
|
||||
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
|
||||
with:
|
||||
go-version: ${{ inputs.go-version }}
|
||||
cache: false
|
||||
|
||||
- name: Install cosign
|
||||
uses: sigstore/cosign-installer@3454372f43399081ed03b604cb2d021dabca52bb # v3.8.2
|
||||
uses: sigstore/cosign-installer@d7543c93d881b35a8faa02e8e3605f69b7a1ce62 # v3.10.0
|
||||
|
||||
- uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3.6.0
|
||||
- uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
|
||||
- uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||
|
||||
- name: Setup tags for container image as a CSV type
|
||||
run: |
|
||||
@@ -103,7 +103,7 @@ jobs:
|
||||
echo 'EOF' >> $GITHUB_ENV
|
||||
|
||||
- name: Login to Quay.io
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.quay_username }}
|
||||
@@ -111,7 +111,7 @@ jobs:
|
||||
if: ${{ inputs.quay_image_name && inputs.push }}
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ secrets.ghcr_username }}
|
||||
@@ -119,7 +119,7 @@ jobs:
|
||||
if: ${{ inputs.ghcr_image_name && inputs.push }}
|
||||
|
||||
- name: Login to dockerhub Container Registry
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
|
||||
with:
|
||||
username: ${{ secrets.docker_username }}
|
||||
password: ${{ secrets.docker_password }}
|
||||
|
||||
4
.github/workflows/image.yaml
vendored
4
.github/workflows/image.yaml
vendored
@@ -53,7 +53,7 @@ jobs:
|
||||
with:
|
||||
# Note: cannot use env variables to set go-version (https://docs.github.com/en/actions/using-workflows/reusing-workflows#limitations)
|
||||
# renovate: datasource=golang-version packageName=golang
|
||||
go-version: 1.24.4
|
||||
go-version: 1.25.1
|
||||
platforms: ${{ needs.set-vars.outputs.platforms }}
|
||||
push: false
|
||||
|
||||
@@ -70,7 +70,7 @@ jobs:
|
||||
ghcr_image_name: ghcr.io/argoproj/argo-cd/argocd:${{ needs.set-vars.outputs.image-tag }}
|
||||
# Note: cannot use env variables to set go-version (https://docs.github.com/en/actions/using-workflows/reusing-workflows#limitations)
|
||||
# renovate: datasource=golang-version packageName=golang
|
||||
go-version: 1.24.4
|
||||
go-version: 1.25.1
|
||||
platforms: ${{ needs.set-vars.outputs.platforms }}
|
||||
push: true
|
||||
secrets:
|
||||
|
||||
12
.github/workflows/release.yaml
vendored
12
.github/workflows/release.yaml
vendored
@@ -11,7 +11,7 @@ permissions: {}
|
||||
|
||||
env:
|
||||
# renovate: datasource=golang-version packageName=golang
|
||||
GOLANG_VERSION: '1.24.4' # Note: go-version must also be set in job argocd-image.with.go-version
|
||||
GOLANG_VERSION: '1.25.1' # Note: go-version must also be set in job argocd-image.with.go-version
|
||||
|
||||
jobs:
|
||||
argocd-image:
|
||||
@@ -25,7 +25,7 @@ jobs:
|
||||
quay_image_name: quay.io/argoproj/argocd:${{ github.ref_name }}
|
||||
# Note: cannot use env variables to set go-version (https://docs.github.com/en/actions/using-workflows/reusing-workflows#limitations)
|
||||
# renovate: datasource=golang-version packageName=golang
|
||||
go-version: 1.24.4
|
||||
go-version: 1.25.1
|
||||
platforms: linux/amd64,linux/arm64,linux/s390x,linux/ppc64le
|
||||
push: true
|
||||
secrets:
|
||||
@@ -70,7 +70,7 @@ jobs:
|
||||
run: git fetch --force --tags
|
||||
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
|
||||
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
cache: false
|
||||
@@ -96,7 +96,7 @@ jobs:
|
||||
tool-cache: false
|
||||
|
||||
- name: Run GoReleaser
|
||||
uses: goreleaser/goreleaser-action@9c156ee8a17a598857849441385a2041ef570552 # v6.3.0
|
||||
uses: goreleaser/goreleaser-action@e435ccd777264be153ace6237001ef4d979d3a7a # v6.4.0
|
||||
id: run-goreleaser
|
||||
with:
|
||||
version: latest
|
||||
@@ -153,7 +153,7 @@ jobs:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
|
||||
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
cache: false
|
||||
@@ -198,7 +198,7 @@ jobs:
|
||||
echo "hashes=$(sha256sum /tmp/sbom.tar.gz | base64 -w0)" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Upload SBOM
|
||||
uses: softprops/action-gh-release@72f2c25fcb47643c292f7107632f7a47c1df5cd8 # v2.3.2
|
||||
uses: softprops/action-gh-release@6cbd405e2c4e67a21c47fa9e383d020e4e28b836 # v2.3.3
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
|
||||
32
.github/workflows/renovate.yaml
vendored
Normal file
32
.github/workflows/renovate.yaml
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
name: Renovate
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 * * * *'
|
||||
workflow_dispatch: {}
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
renovate:
|
||||
runs-on: ubuntu-latest
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
steps:
|
||||
- name: Get token
|
||||
id: get_token
|
||||
uses: actions/create-github-app-token@d72941d797fd3113feb6b93fd0dec494b13a2547 # v1
|
||||
with:
|
||||
app-id: ${{ vars.RENOVATE_APP_ID }}
|
||||
private-key: ${{ secrets.RENOVATE_APP_PRIVATE_KEY }}
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # 4.2.2
|
||||
|
||||
- name: Self-hosted Renovate
|
||||
uses: renovatebot/github-action@f8af9272cd94a4637c29f60dea8731afd3134473 #43.0.12
|
||||
with:
|
||||
configurationFile: .github/configs/renovate-config.js
|
||||
token: '${{ steps.get_token.outputs.token }}'
|
||||
env:
|
||||
LOG_LEVEL: 'debug'
|
||||
RENOVATE_REPOSITORIES: '${{ github.repository }}'
|
||||
@@ -58,7 +58,6 @@ linters:
|
||||
- commentedOutCode
|
||||
- deferInLoop
|
||||
- exitAfterDefer
|
||||
- exposedSyncMutex
|
||||
- hugeParam
|
||||
- importShadow
|
||||
- paramTypeCombine # Leave disabled, there are too many failures to be worth fixing.
|
||||
|
||||
@@ -21,7 +21,7 @@ builds:
|
||||
- -X github.com/argoproj/argo-cd/v3/common.gitCommit={{ .FullCommit }}
|
||||
- -X github.com/argoproj/argo-cd/v3/common.gitTreeState={{ .Env.GIT_TREE_STATE }}
|
||||
- -X github.com/argoproj/argo-cd/v3/common.kubectlVersion={{ .Env.KUBECTL_VERSION }}
|
||||
- '{{ if or (eq .Runtime.Goos "linux") (eq .Runtime.Goos "windows") }}-extldflags="-static"{{ end }}'
|
||||
- -extldflags="-static"
|
||||
goos:
|
||||
- linux
|
||||
- windows
|
||||
@@ -42,15 +42,6 @@ builds:
|
||||
goarch: ppc64le
|
||||
- goos: windows
|
||||
goarch: arm64
|
||||
overrides:
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
env:
|
||||
- CGO_ENABLED=1
|
||||
- goos: darwin
|
||||
goarch: arm64
|
||||
env:
|
||||
- CGO_ENABLED=1
|
||||
|
||||
archives:
|
||||
- id: argocd-archive
|
||||
|
||||
@@ -32,6 +32,9 @@ packages:
|
||||
github.com/argoproj/argo-cd/v3/controller/cache:
|
||||
interfaces:
|
||||
LiveStateCache: {}
|
||||
github.com/argoproj/argo-cd/v3/controller/hydrator:
|
||||
interfaces:
|
||||
Dependencies: {}
|
||||
github.com/argoproj/argo-cd/v3/pkg/apiclient/cluster:
|
||||
interfaces:
|
||||
ClusterServiceServer: {}
|
||||
@@ -66,6 +69,9 @@ packages:
|
||||
github.com/argoproj/argo-cd/v3/util/helm:
|
||||
interfaces:
|
||||
Client: {}
|
||||
github.com/argoproj/argo-cd/v3/util/oci:
|
||||
interfaces:
|
||||
Client: {}
|
||||
github.com/argoproj/argo-cd/v3/util/io:
|
||||
interfaces:
|
||||
TempPaths: {}
|
||||
|
||||
@@ -12,3 +12,8 @@
|
||||
/.github/** @argoproj/argocd-approvers @argoproj/argocd-approvers-ci
|
||||
/.goreleaser.yaml @argoproj/argocd-approvers @argoproj/argocd-approvers-ci
|
||||
/sonar-project.properties @argoproj/argocd-approvers @argoproj/argocd-approvers-ci
|
||||
|
||||
# CLI
|
||||
/cmd/argocd/** @argoproj/argocd-approvers @argoproj/argocd-approvers-cli
|
||||
/cmd/main.go @argoproj/argocd-approvers @argoproj/argocd-approvers-cli
|
||||
/docs/operator-manual/ @argoproj/argocd-approvers @argoproj/argocd-approvers-cli
|
||||
@@ -1,10 +1,10 @@
|
||||
ARG BASE_IMAGE=docker.io/library/ubuntu:24.04@sha256:80dd3c3b9c6cecb9f1667e9290b3bc61b78c2678c02cbdae5f0fea92cc6734ab
|
||||
ARG BASE_IMAGE=docker.io/library/ubuntu:25.04@sha256:10bb10bb062de665d4dc3e0ea36715270ead632cfcb74d08ca2273712a0dfb42
|
||||
####################################################################################################
|
||||
# Builder image
|
||||
# Initial stage which pulls prepares build dependencies and CLI tooling we need for our final image
|
||||
# Also used as the image in CI jobs so needs all dependencies
|
||||
####################################################################################################
|
||||
FROM docker.io/library/golang:1.24.4@sha256:db5d0afbfb4ab648af2393b92e87eaae9ad5e01132803d80caef91b5752d289c AS builder
|
||||
FROM docker.io/library/golang:1.25.1@sha256:8305f5fa8ea63c7b5bc85bd223ccc62941f852318ebfbd22f53bbd0b358c07e1 AS builder
|
||||
|
||||
WORKDIR /tmp
|
||||
|
||||
@@ -85,7 +85,7 @@ WORKDIR /home/argocd
|
||||
####################################################################################################
|
||||
# Argo CD UI stage
|
||||
####################################################################################################
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/node:23.0.0@sha256:e643c0b70dca9704dff42e12b17f5b719dbe4f95e6392fc2dfa0c5f02ea8044d AS argocd-ui
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/node:23.11.1@sha256:9a25b5a6f9a90218b73a62205f111e71de5e4289aee952b4dd7e86f7498f2544 AS argocd-ui
|
||||
|
||||
WORKDIR /src
|
||||
COPY ["ui/package.json", "ui/yarn.lock", "./"]
|
||||
@@ -103,7 +103,7 @@ RUN HOST_ARCH=$TARGETARCH NODE_ENV='production' NODE_ONLINE_ENV='online' NODE_OP
|
||||
####################################################################################################
|
||||
# Argo CD Build stage which performs the actual build of Argo CD binaries
|
||||
####################################################################################################
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/golang:1.24.4@sha256:db5d0afbfb4ab648af2393b92e87eaae9ad5e01132803d80caef91b5752d289c AS argocd-build
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/golang:1.25.1@sha256:8305f5fa8ea63c7b5bc85bd223ccc62941f852318ebfbd22f53bbd0b358c07e1 AS argocd-build
|
||||
|
||||
WORKDIR /go/src/github.com/argoproj/argo-cd
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM docker.io/library/golang:1.24.1@sha256:c5adecdb7b3f8c5ca3c88648a861882849cc8b02fed68ece31e25de88ad13418
|
||||
FROM docker.io/library/golang:1.25.1@sha256:8305f5fa8ea63c7b5bc85bd223ccc62941f852318ebfbd22f53bbd0b358c07e1
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
|
||||
16
Makefile
16
Makefile
@@ -43,6 +43,17 @@ endif
|
||||
DOCKER_SRCDIR?=$(GOPATH)/src
|
||||
DOCKER_WORKDIR?=/go/src/github.com/argoproj/argo-cd
|
||||
|
||||
# Allows you to control which Docker network the test-util containers attach to.
|
||||
# This is particularly useful if you are running Kubernetes in Docker (e.g., k3d)
|
||||
# and want the test containers to reach the Kubernetes API via an already-existing Docker network.
|
||||
DOCKER_NETWORK ?= default
|
||||
|
||||
ifneq ($(DOCKER_NETWORK),default)
|
||||
DOCKER_NETWORK_ARG := --network $(DOCKER_NETWORK)
|
||||
else
|
||||
DOCKER_NETWORK_ARG :=
|
||||
endif
|
||||
|
||||
ARGOCD_PROCFILE?=Procfile
|
||||
|
||||
# pointing to python 3.7 to match https://github.com/argoproj/argo-cd/blob/master/.readthedocs.yml
|
||||
@@ -113,11 +124,11 @@ define run-in-test-server
|
||||
-v ${GOPATH}/pkg/mod:/go/pkg/mod${VOLUME_MOUNT} \
|
||||
-v ${GOCACHE}:/tmp/go-build-cache${VOLUME_MOUNT} \
|
||||
-v ${HOME}/.kube:/home/user/.kube${VOLUME_MOUNT} \
|
||||
-v /tmp:/tmp${VOLUME_MOUNT} \
|
||||
-w ${DOCKER_WORKDIR} \
|
||||
-p ${ARGOCD_E2E_APISERVER_PORT}:8080 \
|
||||
-p 4000:4000 \
|
||||
-p 5000:5000 \
|
||||
$(DOCKER_NETWORK_ARG)\
|
||||
$(PODMAN_ARGS) \
|
||||
$(TEST_TOOLS_PREFIX)$(TEST_TOOLS_IMAGE):$(TEST_TOOLS_TAG) \
|
||||
bash -c "$(1)"
|
||||
@@ -138,8 +149,8 @@ define run-in-test-client
|
||||
-v ${GOPATH}/pkg/mod:/go/pkg/mod${VOLUME_MOUNT} \
|
||||
-v ${GOCACHE}:/tmp/go-build-cache${VOLUME_MOUNT} \
|
||||
-v ${HOME}/.kube:/home/user/.kube${VOLUME_MOUNT} \
|
||||
-v /tmp:/tmp${VOLUME_MOUNT} \
|
||||
-w ${DOCKER_WORKDIR} \
|
||||
$(DOCKER_NETWORK_ARG)\
|
||||
$(PODMAN_ARGS) \
|
||||
$(TEST_TOOLS_PREFIX)$(TEST_TOOLS_IMAGE):$(TEST_TOOLS_TAG) \
|
||||
bash -c "$(1)"
|
||||
@@ -604,6 +615,7 @@ install-test-tools-local:
|
||||
.PHONY: install-codegen-tools-local
|
||||
install-codegen-tools-local:
|
||||
./hack/install.sh codegen-tools
|
||||
./hack/install.sh codegen-go-tools
|
||||
|
||||
# Installs all tools required for running codegen (Go packages)
|
||||
.PHONY: install-go-tools-local
|
||||
|
||||
@@ -3,9 +3,9 @@ header:
|
||||
expiration-date: '2024-10-31T00:00:00.000Z' # One year from initial release.
|
||||
last-updated: '2023-10-27'
|
||||
last-reviewed: '2023-10-27'
|
||||
commit-hash: 226a670fe6b3c6769ff6d18e6839298a58e4577d
|
||||
commit-hash: 320f46f06beaf75f9c406e3a47e2e09d36e2047a
|
||||
project-url: https://github.com/argoproj/argo-cd
|
||||
project-release: v3.1.0
|
||||
project-release: v3.2.0
|
||||
changelog: https://github.com/argoproj/argo-cd/releases
|
||||
license: https://github.com/argoproj/argo-cd/blob/master/LICENSE
|
||||
project-lifecycle:
|
||||
|
||||
11
Tiltfile
11
Tiltfile
@@ -10,6 +10,14 @@ cmd_button(
|
||||
text='make codegen-local',
|
||||
)
|
||||
|
||||
cmd_button(
|
||||
'make test-local',
|
||||
argv=['sh', '-c', 'make test-local'],
|
||||
location=location.NAV,
|
||||
icon_name='science',
|
||||
text='make test-local',
|
||||
)
|
||||
|
||||
# add ui button in web ui to run make codegen-local (top nav)
|
||||
cmd_button(
|
||||
'make cli-local',
|
||||
@@ -69,7 +77,7 @@ docker_build_with_restart(
|
||||
],
|
||||
platform=platform,
|
||||
live_update=[
|
||||
sync('.tilt-bin/argocd_linux_amd64', '/usr/local/bin/argocd'),
|
||||
sync('.tilt-bin/argocd_linux', '/usr/local/bin/argocd'),
|
||||
],
|
||||
only=[
|
||||
'.tilt-bin',
|
||||
@@ -260,6 +268,7 @@ local_resource(
|
||||
'make lint-local',
|
||||
deps = code_deps,
|
||||
allow_parallel=True,
|
||||
resource_deps=['vendor']
|
||||
)
|
||||
|
||||
local_resource(
|
||||
|
||||
10
USERS.md
10
USERS.md
@@ -5,8 +5,10 @@ PR with your organization name if you are using Argo CD.
|
||||
|
||||
Currently, the following organizations are **officially** using Argo CD:
|
||||
|
||||
1. [100ms](https://www.100ms.ai/)
|
||||
1. [127Labs](https://127labs.com/)
|
||||
1. [3Rein](https://www.3rein.com/)
|
||||
1. [42 School](https://42.fr/)
|
||||
1. [4data](https://4data.ch/)
|
||||
1. [7shifts](https://www.7shifts.com/)
|
||||
1. [Adevinta](https://www.adevinta.com/)
|
||||
@@ -40,6 +42,7 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Back Market](https://www.backmarket.com)
|
||||
1. [Bajaj Finserv Health Ltd.](https://www.bajajfinservhealth.in)
|
||||
1. [Baloise](https://www.baloise.com)
|
||||
1. [Batumbu](https://batumbu.id)
|
||||
1. [BCDevExchange DevOps Platform](https://bcdevexchange.org/DevOpsPlatform)
|
||||
1. [Beat](https://thebeat.co/en/)
|
||||
1. [Beez Innovation Labs](https://www.beezlabs.com/)
|
||||
@@ -71,6 +74,7 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Chime](https://www.chime.com)
|
||||
1. [Chronicle Labs](https://chroniclelabs.org)
|
||||
1. [Cisco ET&I](https://eti.cisco.com/)
|
||||
1. [Close](https://www.close.com/)
|
||||
1. [Cloud Posse](https://www.cloudposse.com/)
|
||||
1. [Cloud Scale](https://cloudscaleinc.com/)
|
||||
1. [CloudScript](https://www.cloudscript.com.br/)
|
||||
@@ -160,6 +164,7 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Hiya](https://hiya.com)
|
||||
1. [Honestbank](https://honestbank.com)
|
||||
1. [Hostinger](https://www.hostinger.com)
|
||||
1. [Hotjar](https://www.hotjar.com)
|
||||
1. [IABAI](https://www.iab.ai)
|
||||
1. [IBM](https://www.ibm.com/)
|
||||
1. [Ibotta](https://home.ibotta.com)
|
||||
@@ -173,6 +178,7 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Info Support](https://www.infosupport.com/)
|
||||
1. [InsideBoard](https://www.insideboard.com)
|
||||
1. [Instruqt](https://www.instruqt.com)
|
||||
1. [Intel](https://www.intel.com)
|
||||
1. [Intuit](https://www.intuit.com/)
|
||||
1. [Jellysmack](https://www.jellysmack.com)
|
||||
1. [Joblift](https://joblift.com/)
|
||||
@@ -320,7 +326,10 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [SEEK](https://seek.com.au)
|
||||
1. [SEKAI](https://www.sekai.io/)
|
||||
1. [Semgrep](https://semgrep.com)
|
||||
1. [Seznam.cz](https://o-seznam.cz/)
|
||||
1. [Shield](https://shield.com)
|
||||
1. [Shipfox](https://www.shipfox.io)
|
||||
1. [Shock Media](https://www.shockmedia.nl)
|
||||
1. [SI Analytics](https://si-analytics.ai)
|
||||
1. [Sidewalk Entertainment](https://sidewalkplay.com/)
|
||||
1. [Skit](https://skit.ai/)
|
||||
@@ -333,6 +342,7 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Snapp](https://snapp.ir/)
|
||||
1. [Snyk](https://snyk.io/)
|
||||
1. [Softway Medical](https://www.softwaymedical.fr/)
|
||||
1. [Sophotech](https://sopho.tech)
|
||||
1. [South China Morning Post (SCMP)](https://www.scmp.com/)
|
||||
1. [Speee](https://speee.jp/)
|
||||
1. [Spendesk](https://spendesk.com/)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -79,14 +79,10 @@ func (g *ClusterGenerator) GenerateParams(appSetGenerator *argoappsetv1alpha1.Ap
|
||||
return nil, fmt.Errorf("error getting cluster secrets: %w", err)
|
||||
}
|
||||
|
||||
res := []map[string]any{}
|
||||
paramHolder := ¶mHolder{isFlatMode: appSetGenerator.Clusters.FlatList}
|
||||
logCtx.Debugf("Using flat mode = %t for cluster generator", paramHolder.isFlatMode)
|
||||
|
||||
secretsFound := []corev1.Secret{}
|
||||
|
||||
isFlatMode := appSetGenerator.Clusters.FlatList
|
||||
logCtx.Debugf("Using flat mode = %t for cluster generator", isFlatMode)
|
||||
clustersParams := make([]map[string]any, 0)
|
||||
|
||||
for _, cluster := range clustersFromArgoCD {
|
||||
// If there is a secret for this cluster, then it's a non-local cluster, so it will be
|
||||
// handled by the next step.
|
||||
@@ -105,72 +101,80 @@ func (g *ClusterGenerator) GenerateParams(appSetGenerator *argoappsetv1alpha1.Ap
|
||||
return nil, fmt.Errorf("error appending templated values for local cluster: %w", err)
|
||||
}
|
||||
|
||||
if isFlatMode {
|
||||
clustersParams = append(clustersParams, params)
|
||||
} else {
|
||||
res = append(res, params)
|
||||
}
|
||||
|
||||
paramHolder.append(params)
|
||||
logCtx.WithField("cluster", "local cluster").Info("matched local cluster")
|
||||
}
|
||||
}
|
||||
|
||||
// For each matching cluster secret (non-local clusters only)
|
||||
for _, cluster := range secretsFound {
|
||||
params := map[string]any{}
|
||||
|
||||
params["name"] = string(cluster.Data["name"])
|
||||
params["nameNormalized"] = utils.SanitizeName(string(cluster.Data["name"]))
|
||||
params["server"] = string(cluster.Data["server"])
|
||||
|
||||
project, ok := cluster.Data["project"]
|
||||
if ok {
|
||||
params["project"] = string(project)
|
||||
} else {
|
||||
params["project"] = ""
|
||||
}
|
||||
|
||||
if appSet.Spec.GoTemplate {
|
||||
meta := map[string]any{}
|
||||
|
||||
if len(cluster.Annotations) > 0 {
|
||||
meta["annotations"] = cluster.Annotations
|
||||
}
|
||||
if len(cluster.Labels) > 0 {
|
||||
meta["labels"] = cluster.Labels
|
||||
}
|
||||
|
||||
params["metadata"] = meta
|
||||
} else {
|
||||
for key, value := range cluster.Annotations {
|
||||
params["metadata.annotations."+key] = value
|
||||
}
|
||||
|
||||
for key, value := range cluster.Labels {
|
||||
params["metadata.labels."+key] = value
|
||||
}
|
||||
}
|
||||
params := g.getClusterParameters(cluster, appSet)
|
||||
|
||||
err = appendTemplatedValues(appSetGenerator.Clusters.Values, params, appSet.Spec.GoTemplate, appSet.Spec.GoTemplateOptions)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error appending templated values for cluster: %w", err)
|
||||
}
|
||||
|
||||
if isFlatMode {
|
||||
clustersParams = append(clustersParams, params)
|
||||
} else {
|
||||
res = append(res, params)
|
||||
}
|
||||
|
||||
paramHolder.append(params)
|
||||
logCtx.WithField("cluster", cluster.Name).Debug("matched cluster secret")
|
||||
}
|
||||
|
||||
if isFlatMode {
|
||||
res = append(res, map[string]any{
|
||||
"clusters": clustersParams,
|
||||
})
|
||||
return paramHolder.consolidate(), nil
|
||||
}
|
||||
|
||||
type paramHolder struct {
|
||||
isFlatMode bool
|
||||
params []map[string]any
|
||||
}
|
||||
|
||||
func (p *paramHolder) append(params map[string]any) {
|
||||
p.params = append(p.params, params)
|
||||
}
|
||||
|
||||
func (p *paramHolder) consolidate() []map[string]any {
|
||||
if p.isFlatMode {
|
||||
p.params = []map[string]any{
|
||||
{"clusters": p.params},
|
||||
}
|
||||
}
|
||||
return res, nil
|
||||
return p.params
|
||||
}
|
||||
|
||||
func (g *ClusterGenerator) getClusterParameters(cluster corev1.Secret, appSet *argoappsetv1alpha1.ApplicationSet) map[string]any {
|
||||
params := map[string]any{}
|
||||
|
||||
params["name"] = string(cluster.Data["name"])
|
||||
params["nameNormalized"] = utils.SanitizeName(string(cluster.Data["name"]))
|
||||
params["server"] = string(cluster.Data["server"])
|
||||
|
||||
project, ok := cluster.Data["project"]
|
||||
if ok {
|
||||
params["project"] = string(project)
|
||||
} else {
|
||||
params["project"] = ""
|
||||
}
|
||||
|
||||
if appSet.Spec.GoTemplate {
|
||||
meta := map[string]any{}
|
||||
|
||||
if len(cluster.Annotations) > 0 {
|
||||
meta["annotations"] = cluster.Annotations
|
||||
}
|
||||
if len(cluster.Labels) > 0 {
|
||||
meta["labels"] = cluster.Labels
|
||||
}
|
||||
|
||||
params["metadata"] = meta
|
||||
} else {
|
||||
for key, value := range cluster.Annotations {
|
||||
params["metadata.annotations."+key] = value
|
||||
}
|
||||
|
||||
for key, value := range cluster.Labels {
|
||||
params["metadata.labels."+key] = value
|
||||
}
|
||||
}
|
||||
return params
|
||||
}
|
||||
|
||||
func (g *ClusterGenerator) getSecretsByClusterName(log *log.Entry, appSetGenerator *argoappsetv1alpha1.ApplicationSetGenerator) (map[string]corev1.Secret, error) {
|
||||
|
||||
@@ -29,10 +29,10 @@ type GitGenerator struct {
|
||||
}
|
||||
|
||||
// NewGitGenerator creates a new instance of Git Generator
|
||||
func NewGitGenerator(repos services.Repos, namespace string) Generator {
|
||||
func NewGitGenerator(repos services.Repos, controllerNamespace string) Generator {
|
||||
g := &GitGenerator{
|
||||
repos: repos,
|
||||
namespace: namespace,
|
||||
namespace: controllerNamespace,
|
||||
}
|
||||
|
||||
return g
|
||||
@@ -78,11 +78,11 @@ func (g *GitGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha1.Applic
|
||||
if !strings.Contains(appSet.Spec.Template.Spec.Project, "{{") {
|
||||
project := appSet.Spec.Template.Spec.Project
|
||||
appProject := &argoprojiov1alpha1.AppProject{}
|
||||
namespace := g.namespace
|
||||
if namespace == "" {
|
||||
namespace = appSet.Namespace
|
||||
controllerNamespace := g.namespace
|
||||
if controllerNamespace == "" {
|
||||
controllerNamespace = appSet.Namespace
|
||||
}
|
||||
if err := client.Get(context.TODO(), types.NamespacedName{Name: project, Namespace: namespace}, appProject); err != nil {
|
||||
if err := client.Get(context.TODO(), types.NamespacedName{Name: project, Namespace: controllerNamespace}, appProject); err != nil {
|
||||
return nil, fmt.Errorf("error getting project %s: %w", project, err)
|
||||
}
|
||||
// we need to verify the signature on the Git revision if GPG is enabled
|
||||
@@ -222,19 +222,18 @@ func (g *GitGenerator) generateParamsForGitFiles(appSetGenerator *argoprojiov1al
|
||||
func (g *GitGenerator) generateParamsFromGitFile(filePath string, fileContent []byte, values map[string]string, useGoTemplate bool, goTemplateOptions []string, pathParamPrefix string) ([]map[string]any, error) {
|
||||
objectsFound := []map[string]any{}
|
||||
|
||||
// First, we attempt to parse as an array
|
||||
err := yaml.Unmarshal(fileContent, &objectsFound)
|
||||
if err != nil {
|
||||
// If unable to parse as an array, attempt to parse as a single object
|
||||
singleObj := make(map[string]any)
|
||||
err = yaml.Unmarshal(fileContent, &singleObj)
|
||||
// First, we attempt to parse as a single object.
|
||||
// This will also succeed for empty files.
|
||||
singleObj := map[string]any{}
|
||||
err := yaml.Unmarshal(fileContent, &singleObj)
|
||||
if err == nil {
|
||||
objectsFound = append(objectsFound, singleObj)
|
||||
} else {
|
||||
// If unable to parse as an object, try to parse as an array
|
||||
err = yaml.Unmarshal(fileContent, &objectsFound)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to parse file: %w", err)
|
||||
}
|
||||
objectsFound = append(objectsFound, singleObj)
|
||||
} else if len(objectsFound) == 0 {
|
||||
// If file is valid but empty, add a default empty item
|
||||
objectsFound = append(objectsFound, map[string]any{})
|
||||
}
|
||||
|
||||
res := []map[string]any{}
|
||||
|
||||
@@ -825,7 +825,7 @@ func TestGitGenerateParamsFromFiles(t *testing.T) {
|
||||
},
|
||||
repoPathsError: nil,
|
||||
expected: []map[string]any{},
|
||||
expectedError: errors.New("error generating params from git: unable to process file 'cluster-config/production/config.json': unable to parse file: error unmarshaling JSON: while decoding JSON: json: cannot unmarshal string into Go value of type map[string]interface {}"),
|
||||
expectedError: errors.New("error generating params from git: unable to process file 'cluster-config/production/config.json': unable to parse file: error unmarshaling JSON: while decoding JSON: json: cannot unmarshal string into Go value of type []map[string]interface {}"),
|
||||
},
|
||||
{
|
||||
name: "test JSON array",
|
||||
@@ -982,6 +982,16 @@ cluster:
|
||||
},
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "test empty YAML array",
|
||||
files: []v1alpha1.GitFileGeneratorItem{{Path: "**/config.yaml"}},
|
||||
repoFileContents: map[string][]byte{
|
||||
"cluster-config/production/config.yaml": []byte(`[]`),
|
||||
},
|
||||
repoPathsError: nil,
|
||||
expected: []map[string]any{},
|
||||
expectedError: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range cases {
|
||||
@@ -2060,7 +2070,7 @@ func TestGitGenerateParamsFromFilesGoTemplate(t *testing.T) {
|
||||
},
|
||||
repoPathsError: nil,
|
||||
expected: []map[string]any{},
|
||||
expectedError: errors.New("error generating params from git: unable to process file 'cluster-config/production/config.json': unable to parse file: error unmarshaling JSON: while decoding JSON: json: cannot unmarshal string into Go value of type map[string]interface {}"),
|
||||
expectedError: errors.New("error generating params from git: unable to process file 'cluster-config/production/config.json': unable to parse file: error unmarshaling JSON: while decoding JSON: json: cannot unmarshal string into Go value of type []map[string]interface {}"),
|
||||
},
|
||||
{
|
||||
name: "test JSON array",
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/gosimple/slug"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/applicationset/services"
|
||||
pullrequest "github.com/argoproj/argo-cd/v3/applicationset/services/pull_request"
|
||||
@@ -18,8 +19,6 @@ import (
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
|
||||
)
|
||||
|
||||
var _ Generator = (*PullRequestGenerator)(nil)
|
||||
|
||||
const (
|
||||
DefaultPullRequestRequeueAfter = 30 * time.Minute
|
||||
)
|
||||
@@ -49,6 +48,10 @@ func (g *PullRequestGenerator) GetRequeueAfter(appSetGenerator *argoprojiov1alph
|
||||
return DefaultPullRequestRequeueAfter
|
||||
}
|
||||
|
||||
func (g *PullRequestGenerator) GetContinueOnRepoNotFoundError(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) bool {
|
||||
return appSetGenerator.PullRequest.ContinueOnRepoNotFoundError
|
||||
}
|
||||
|
||||
func (g *PullRequestGenerator) GetTemplate(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) *argoprojiov1alpha1.ApplicationSetTemplate {
|
||||
return &appSetGenerator.PullRequest.Template
|
||||
}
|
||||
@@ -69,10 +72,15 @@ func (g *PullRequestGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha
|
||||
}
|
||||
|
||||
pulls, err := pullrequest.ListPullRequests(ctx, svc, appSetGenerator.PullRequest.Filters)
|
||||
params := make([]map[string]any, 0, len(pulls))
|
||||
if err != nil {
|
||||
if pullrequest.IsRepositoryNotFoundError(err) && g.GetContinueOnRepoNotFoundError(appSetGenerator) {
|
||||
log.WithError(err).WithField("generator", g).
|
||||
Warn("Skipping params generation for this repository since it was not found.")
|
||||
return params, nil
|
||||
}
|
||||
return nil, fmt.Errorf("error listing repos: %w", err)
|
||||
}
|
||||
params := make([]map[string]any, 0, len(pulls))
|
||||
|
||||
// In order to follow the DNS label standard as defined in RFC 1123,
|
||||
// we need to limit the 'branch' to 50 to give room to append/suffix-ing it
|
||||
@@ -111,15 +119,15 @@ func (g *PullRequestGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha
|
||||
"author": pull.Author,
|
||||
}
|
||||
|
||||
err := appendTemplatedValues(appSetGenerator.PullRequest.Values, paramMap, applicationSetInfo.Spec.GoTemplate, applicationSetInfo.Spec.GoTemplateOptions)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to append templated values: %w", err)
|
||||
}
|
||||
|
||||
// PR lables will only be supported for Go Template appsets, since fasttemplate will be deprecated.
|
||||
if applicationSetInfo != nil && applicationSetInfo.Spec.GoTemplate {
|
||||
paramMap["labels"] = pull.Labels
|
||||
}
|
||||
|
||||
err := appendTemplatedValues(appSetGenerator.PullRequest.Values, paramMap, applicationSetInfo.Spec.GoTemplate, applicationSetInfo.Spec.GoTemplateOptions)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to append templated values: %w", err)
|
||||
}
|
||||
params = append(params, paramMap)
|
||||
}
|
||||
return params, nil
|
||||
|
||||
@@ -16,11 +16,12 @@ import (
|
||||
func TestPullRequestGithubGenerateParams(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
cases := []struct {
|
||||
selectFunc func(context.Context, *argoprojiov1alpha1.PullRequestGenerator, *argoprojiov1alpha1.ApplicationSet) (pullrequest.PullRequestService, error)
|
||||
values map[string]string
|
||||
expected []map[string]any
|
||||
expectedErr error
|
||||
applicationSet argoprojiov1alpha1.ApplicationSet
|
||||
selectFunc func(context.Context, *argoprojiov1alpha1.PullRequestGenerator, *argoprojiov1alpha1.ApplicationSet) (pullrequest.PullRequestService, error)
|
||||
values map[string]string
|
||||
expected []map[string]any
|
||||
expectedErr error
|
||||
applicationSet argoprojiov1alpha1.ApplicationSet
|
||||
continueOnRepoNotFoundError bool
|
||||
}{
|
||||
{
|
||||
selectFunc: func(context.Context, *argoprojiov1alpha1.PullRequestGenerator, *argoprojiov1alpha1.ApplicationSet) (pullrequest.PullRequestService, error) {
|
||||
@@ -171,6 +172,30 @@ func TestPullRequestGithubGenerateParams(t *testing.T) {
|
||||
expected: nil,
|
||||
expectedErr: errors.New("error listing repos: fake error"),
|
||||
},
|
||||
{
|
||||
selectFunc: func(context.Context, *argoprojiov1alpha1.PullRequestGenerator, *argoprojiov1alpha1.ApplicationSet) (pullrequest.PullRequestService, error) {
|
||||
return pullrequest.NewFakeService(
|
||||
ctx,
|
||||
nil,
|
||||
pullrequest.NewRepositoryNotFoundError(errors.New("repository not found")),
|
||||
)
|
||||
},
|
||||
expected: []map[string]any{},
|
||||
expectedErr: nil,
|
||||
continueOnRepoNotFoundError: true,
|
||||
},
|
||||
{
|
||||
selectFunc: func(context.Context, *argoprojiov1alpha1.PullRequestGenerator, *argoprojiov1alpha1.ApplicationSet) (pullrequest.PullRequestService, error) {
|
||||
return pullrequest.NewFakeService(
|
||||
ctx,
|
||||
nil,
|
||||
pullrequest.NewRepositoryNotFoundError(errors.New("repository not found")),
|
||||
)
|
||||
},
|
||||
expected: nil,
|
||||
expectedErr: errors.New("error listing repos: repository not found"),
|
||||
continueOnRepoNotFoundError: false,
|
||||
},
|
||||
{
|
||||
selectFunc: func(context.Context, *argoprojiov1alpha1.PullRequestGenerator, *argoprojiov1alpha1.ApplicationSet) (pullrequest.PullRequestService, error) {
|
||||
return pullrequest.NewFakeService(
|
||||
@@ -252,6 +277,51 @@ func TestPullRequestGithubGenerateParams(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
selectFunc: func(context.Context, *argoprojiov1alpha1.PullRequestGenerator, *argoprojiov1alpha1.ApplicationSet) (pullrequest.PullRequestService, error) {
|
||||
return pullrequest.NewFakeService(
|
||||
ctx,
|
||||
[]*pullrequest.PullRequest{
|
||||
{
|
||||
Number: 1,
|
||||
Title: "title1",
|
||||
Branch: "my_branch",
|
||||
TargetBranch: "master",
|
||||
HeadSHA: "abcd",
|
||||
Author: "testName",
|
||||
Labels: []string{"preview", "preview:team1"},
|
||||
},
|
||||
},
|
||||
nil,
|
||||
)
|
||||
},
|
||||
values: map[string]string{
|
||||
"preview_env": "{{ regexFind \"(team1|team2)\" (.labels | join \",\") }}",
|
||||
},
|
||||
expected: []map[string]any{
|
||||
{
|
||||
"number": "1",
|
||||
"title": "title1",
|
||||
"branch": "my_branch",
|
||||
"branch_slug": "my-branch",
|
||||
"target_branch": "master",
|
||||
"target_branch_slug": "master",
|
||||
"head_sha": "abcd",
|
||||
"head_short_sha": "abcd",
|
||||
"head_short_sha_7": "abcd",
|
||||
"author": "testName",
|
||||
"labels": []string{"preview", "preview:team1"},
|
||||
"values": map[string]string{"preview_env": "team1"},
|
||||
},
|
||||
},
|
||||
expectedErr: nil,
|
||||
applicationSet: argoprojiov1alpha1.ApplicationSet{
|
||||
Spec: argoprojiov1alpha1.ApplicationSetSpec{
|
||||
// Application set is using fasttemplate.
|
||||
GoTemplate: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
@@ -260,7 +330,8 @@ func TestPullRequestGithubGenerateParams(t *testing.T) {
|
||||
}
|
||||
generatorConfig := argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
PullRequest: &argoprojiov1alpha1.PullRequestGenerator{
|
||||
Values: c.values,
|
||||
Values: c.values,
|
||||
ContinueOnRepoNotFoundError: c.continueOnRepoNotFoundError,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -10,15 +10,15 @@ import (
|
||||
"github.com/argoproj/argo-cd/v3/applicationset/services"
|
||||
)
|
||||
|
||||
func GetGenerators(ctx context.Context, c client.Client, k8sClient kubernetes.Interface, namespace string, argoCDService services.Repos, dynamicClient dynamic.Interface, scmConfig SCMConfig) map[string]Generator {
|
||||
func GetGenerators(ctx context.Context, c client.Client, k8sClient kubernetes.Interface, controllerNamespace string, argoCDService services.Repos, dynamicClient dynamic.Interface, scmConfig SCMConfig) map[string]Generator {
|
||||
terminalGenerators := map[string]Generator{
|
||||
"List": NewListGenerator(),
|
||||
"Clusters": NewClusterGenerator(ctx, c, k8sClient, namespace),
|
||||
"Git": NewGitGenerator(argoCDService, namespace),
|
||||
"Clusters": NewClusterGenerator(ctx, c, k8sClient, controllerNamespace),
|
||||
"Git": NewGitGenerator(argoCDService, controllerNamespace),
|
||||
"SCMProvider": NewSCMProviderGenerator(c, scmConfig),
|
||||
"ClusterDecisionResource": NewDuckTypeGenerator(ctx, dynamicClient, k8sClient, namespace),
|
||||
"ClusterDecisionResource": NewDuckTypeGenerator(ctx, dynamicClient, k8sClient, controllerNamespace),
|
||||
"PullRequest": NewPullRequestGenerator(c, scmConfig),
|
||||
"Plugin": NewPluginGenerator(c, namespace),
|
||||
"Plugin": NewPluginGenerator(c, controllerNamespace),
|
||||
}
|
||||
|
||||
nestedGenerators := map[string]Generator{
|
||||
|
||||
@@ -10,7 +10,10 @@ import (
|
||||
"github.com/microsoft/azure-devops-go-api/azuredevops/v7/git"
|
||||
)
|
||||
|
||||
const AZURE_DEVOPS_DEFAULT_URL = "https://dev.azure.com"
|
||||
const (
|
||||
AZURE_DEVOPS_DEFAULT_URL = "https://dev.azure.com"
|
||||
AZURE_DEVOPS_PROJECT_NOT_FOUND_ERROR = "The following project does not exist"
|
||||
)
|
||||
|
||||
type AzureDevOpsClientFactory interface {
|
||||
// Returns an Azure Devops Client interface.
|
||||
@@ -70,13 +73,22 @@ func (a *AzureDevOpsService) List(ctx context.Context) ([]*PullRequest, error) {
|
||||
SearchCriteria: &git.GitPullRequestSearchCriteria{},
|
||||
}
|
||||
|
||||
pullRequests := []*PullRequest{}
|
||||
|
||||
azurePullRequests, err := client.GetPullRequestsByProject(ctx, args)
|
||||
if err != nil {
|
||||
// A standard Http 404 error is not returned for Azure DevOps,
|
||||
// so checking the error message for a specific pattern.
|
||||
// NOTE: Since the repos are filtered later, only existence of the project
|
||||
// is relevant for AzureDevOps
|
||||
if strings.Contains(err.Error(), AZURE_DEVOPS_PROJECT_NOT_FOUND_ERROR) {
|
||||
// return a custom error indicating that the repository is not found,
|
||||
// but also return the empty result since the decision to continue or not in this case is made by the caller
|
||||
return pullRequests, NewRepositoryNotFoundError(err)
|
||||
}
|
||||
return nil, fmt.Errorf("failed to get pull requests by project: %w", err)
|
||||
}
|
||||
|
||||
pullRequests := []*PullRequest{}
|
||||
|
||||
for _, pr := range *azurePullRequests {
|
||||
if pr.Repository == nil ||
|
||||
pr.Repository.Name == nil ||
|
||||
|
||||
@@ -2,6 +2,7 @@ package pull_request
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/microsoft/azure-devops-go-api/azuredevops/v7/core"
|
||||
@@ -235,3 +236,36 @@ func TestBuildURL(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAzureDevOpsListReturnsRepositoryNotFoundError(t *testing.T) {
|
||||
args := git.GetPullRequestsByProjectArgs{
|
||||
Project: createStringPtr("nonexistent"),
|
||||
SearchCriteria: &git.GitPullRequestSearchCriteria{},
|
||||
}
|
||||
|
||||
pullRequestMock := []git.GitPullRequest{}
|
||||
|
||||
gitClientMock := azureMock.Client{}
|
||||
clientFactoryMock := &AzureClientFactoryMock{mock: &mock.Mock{}}
|
||||
clientFactoryMock.mock.On("GetClient", mock.Anything).Return(&gitClientMock, nil)
|
||||
|
||||
// Mock the GetPullRequestsByProject to return an error containing "404"
|
||||
gitClientMock.On("GetPullRequestsByProject", t.Context(), args).Return(&pullRequestMock,
|
||||
errors.New("The following project does not exist:"))
|
||||
|
||||
provider := AzureDevOpsService{
|
||||
clientFactory: clientFactoryMock,
|
||||
project: "nonexistent",
|
||||
repo: "nonexistent",
|
||||
labels: nil,
|
||||
}
|
||||
|
||||
prs, err := provider.List(t.Context())
|
||||
|
||||
// Should return empty pull requests list
|
||||
assert.Empty(t, prs)
|
||||
|
||||
// Should return RepositoryNotFoundError
|
||||
require.Error(t, err)
|
||||
assert.True(t, IsRepositoryNotFoundError(err), "Expected RepositoryNotFoundError but got: %v", err)
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/ktrysmt/go-bitbucket"
|
||||
)
|
||||
@@ -117,8 +118,17 @@ func (b *BitbucketCloudService) List(_ context.Context) ([]*PullRequest, error)
|
||||
RepoSlug: b.repositorySlug,
|
||||
}
|
||||
|
||||
pullRequests := []*PullRequest{}
|
||||
|
||||
response, err := b.client.Repositories.PullRequests.Gets(opts)
|
||||
if err != nil {
|
||||
// A standard Http 404 error is not returned for Bitbucket Cloud,
|
||||
// so checking the error message for a specific pattern
|
||||
if strings.Contains(err.Error(), "404 Not Found") {
|
||||
// return a custom error indicating that the repository is not found,
|
||||
// but also return the empty result since the decision to continue or not in this case is made by the caller
|
||||
return pullRequests, NewRepositoryNotFoundError(err)
|
||||
}
|
||||
return nil, fmt.Errorf("error listing pull requests for %s/%s: %w", b.owner, b.repositorySlug, err)
|
||||
}
|
||||
|
||||
@@ -142,7 +152,6 @@ func (b *BitbucketCloudService) List(_ context.Context) ([]*PullRequest, error)
|
||||
return nil, fmt.Errorf("error unmarshalling json to type '[]BitbucketCloudPullRequest': %w", err)
|
||||
}
|
||||
|
||||
pullRequests := []*PullRequest{}
|
||||
for _, pull := range pulls {
|
||||
pullRequests = append(pullRequests, &PullRequest{
|
||||
Number: pull.ID,
|
||||
|
||||
@@ -492,3 +492,29 @@ func TestListPullRequestBranchMatchCloud(t *testing.T) {
|
||||
TargetBranch: "branch-200",
|
||||
}, *pullRequests[0])
|
||||
}
|
||||
|
||||
func TestBitbucketCloudListReturnsRepositoryNotFoundError(t *testing.T) {
|
||||
mux := http.NewServeMux()
|
||||
server := httptest.NewServer(mux)
|
||||
defer server.Close()
|
||||
|
||||
path := "/repositories/nonexistent/nonexistent/pullrequests/"
|
||||
|
||||
mux.HandleFunc(path, func(w http.ResponseWriter, _ *http.Request) {
|
||||
// Return 404 status to simulate repository not found
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
_, _ = w.Write([]byte(`{"message": "404 Project Not Found"}`))
|
||||
})
|
||||
|
||||
svc, err := NewBitbucketCloudServiceNoAuth(server.URL, "nonexistent", "nonexistent")
|
||||
require.NoError(t, err)
|
||||
|
||||
prs, err := svc.List(t.Context())
|
||||
|
||||
// Should return empty pull requests list
|
||||
assert.Empty(t, prs)
|
||||
|
||||
// Should return RepositoryNotFoundError
|
||||
require.Error(t, err)
|
||||
assert.True(t, IsRepositoryNotFoundError(err), "Expected RepositoryNotFoundError but got: %v", err)
|
||||
}
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
bitbucketv1 "github.com/gfleury/go-bitbucket-v1"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/applicationset/utils"
|
||||
"github.com/argoproj/argo-cd/v3/applicationset/services"
|
||||
)
|
||||
|
||||
type BitbucketService struct {
|
||||
@@ -49,15 +49,10 @@ func NewBitbucketServiceNoAuth(ctx context.Context, url, projectKey, repositoryS
|
||||
}
|
||||
|
||||
func newBitbucketService(ctx context.Context, bitbucketConfig *bitbucketv1.Configuration, projectKey, repositorySlug string, scmRootCAPath string, insecure bool, caCerts []byte) (PullRequestService, error) {
|
||||
bitbucketConfig.BasePath = utils.NormalizeBitbucketBasePath(bitbucketConfig.BasePath)
|
||||
tlsConfig := utils.GetTlsConfig(scmRootCAPath, insecure, caCerts)
|
||||
bitbucketConfig.HTTPClient = &http.Client{Transport: &http.Transport{
|
||||
TLSClientConfig: tlsConfig,
|
||||
}}
|
||||
bitbucketClient := bitbucketv1.NewAPIClient(ctx, bitbucketConfig)
|
||||
bbClient := services.SetupBitbucketClient(ctx, bitbucketConfig, scmRootCAPath, insecure, caCerts)
|
||||
|
||||
return &BitbucketService{
|
||||
client: bitbucketClient,
|
||||
client: bbClient,
|
||||
projectKey: projectKey,
|
||||
repositorySlug: repositorySlug,
|
||||
}, nil
|
||||
@@ -72,6 +67,11 @@ func (b *BitbucketService) List(_ context.Context) ([]*PullRequest, error) {
|
||||
for {
|
||||
response, err := b.client.DefaultApi.GetPullRequestsPage(b.projectKey, b.repositorySlug, paged)
|
||||
if err != nil {
|
||||
if response != nil && response.Response != nil && response.StatusCode == http.StatusNotFound {
|
||||
// return a custom error indicating that the repository is not found,
|
||||
// but also return the empty result since the decision to continue or not in this case is made by the caller
|
||||
return pullRequests, NewRepositoryNotFoundError(err)
|
||||
}
|
||||
return nil, fmt.Errorf("error listing pull requests for %s/%s: %w", b.projectKey, b.repositorySlug, err)
|
||||
}
|
||||
pulls, err := bitbucketv1.GetPullRequestsResponse(response)
|
||||
|
||||
@@ -510,3 +510,29 @@ func TestListPullRequestBranchMatch(t *testing.T) {
|
||||
})
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
func TestBitbucketServerListReturnsRepositoryNotFoundError(t *testing.T) {
|
||||
mux := http.NewServeMux()
|
||||
server := httptest.NewServer(mux)
|
||||
defer server.Close()
|
||||
|
||||
path := "/rest/api/1.0/projects/nonexistent/repos/nonexistent/pull-requests?limit=100"
|
||||
|
||||
mux.HandleFunc(path, func(w http.ResponseWriter, _ *http.Request) {
|
||||
// Return 404 status to simulate repository not found
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
_, _ = w.Write([]byte(`{"message": "404 Project Not Found"}`))
|
||||
})
|
||||
|
||||
svc, err := NewBitbucketServiceNoAuth(t.Context(), server.URL, "nonexistent", "nonexistent", "", false, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
prs, err := svc.List(t.Context())
|
||||
|
||||
// Should return empty pull requests list
|
||||
assert.Empty(t, prs)
|
||||
|
||||
// Should return RepositoryNotFoundError
|
||||
require.Error(t, err)
|
||||
assert.True(t, IsRepositoryNotFoundError(err), "Expected RepositoryNotFoundError but got: %v", err)
|
||||
}
|
||||
|
||||
23
applicationset/services/pull_request/errors.go
Normal file
23
applicationset/services/pull_request/errors.go
Normal file
@@ -0,0 +1,23 @@
|
||||
package pull_request
|
||||
|
||||
import "errors"
|
||||
|
||||
// RepositoryNotFoundError represents an error when a repository is not found by a pull request provider
|
||||
type RepositoryNotFoundError struct {
|
||||
causingError error
|
||||
}
|
||||
|
||||
func (e *RepositoryNotFoundError) Error() string {
|
||||
return e.causingError.Error()
|
||||
}
|
||||
|
||||
// NewRepositoryNotFoundError creates a new repository not found error
|
||||
func NewRepositoryNotFoundError(err error) error {
|
||||
return &RepositoryNotFoundError{causingError: err}
|
||||
}
|
||||
|
||||
// IsRepositoryNotFoundError checks if the given error is a repository not found error
|
||||
func IsRepositoryNotFoundError(err error) bool {
|
||||
var repoErr *RepositoryNotFoundError
|
||||
return errors.As(err, &repoErr)
|
||||
}
|
||||
48
applicationset/services/pull_request/errors_test.go
Normal file
48
applicationset/services/pull_request/errors_test.go
Normal file
@@ -0,0 +1,48 @@
|
||||
package pull_request
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestRepositoryNotFoundError(t *testing.T) {
|
||||
t.Run("NewRepositoryNotFoundError creates correct error type", func(t *testing.T) {
|
||||
originalErr := errors.New("repository does not exist")
|
||||
repoNotFoundErr := NewRepositoryNotFoundError(originalErr)
|
||||
|
||||
require.Error(t, repoNotFoundErr)
|
||||
assert.Equal(t, "repository does not exist", repoNotFoundErr.Error())
|
||||
})
|
||||
|
||||
t.Run("IsRepositoryNotFoundError identifies RepositoryNotFoundError", func(t *testing.T) {
|
||||
originalErr := errors.New("repository does not exist")
|
||||
repoNotFoundErr := NewRepositoryNotFoundError(originalErr)
|
||||
|
||||
assert.True(t, IsRepositoryNotFoundError(repoNotFoundErr))
|
||||
})
|
||||
|
||||
t.Run("IsRepositoryNotFoundError returns false for regular errors", func(t *testing.T) {
|
||||
regularErr := errors.New("some other error")
|
||||
|
||||
assert.False(t, IsRepositoryNotFoundError(regularErr))
|
||||
})
|
||||
|
||||
t.Run("IsRepositoryNotFoundError returns false for nil error", func(t *testing.T) {
|
||||
assert.False(t, IsRepositoryNotFoundError(nil))
|
||||
})
|
||||
|
||||
t.Run("IsRepositoryNotFoundError works with wrapped errors", func(t *testing.T) {
|
||||
originalErr := errors.New("repository does not exist")
|
||||
repoNotFoundErr := NewRepositoryNotFoundError(originalErr)
|
||||
wrappedErr := errors.New("wrapped: " + repoNotFoundErr.Error())
|
||||
|
||||
// Direct RepositoryNotFoundError should be identified
|
||||
assert.True(t, IsRepositoryNotFoundError(repoNotFoundErr))
|
||||
|
||||
// Wrapped string error should not be identified (this is expected behavior)
|
||||
assert.False(t, IsRepositoryNotFoundError(wrappedErr))
|
||||
})
|
||||
}
|
||||
@@ -52,11 +52,17 @@ func (g *GiteaService) List(ctx context.Context) ([]*PullRequest, error) {
|
||||
State: gitea.StateOpen,
|
||||
}
|
||||
g.client.SetContext(ctx)
|
||||
prs, _, err := g.client.ListRepoPullRequests(g.owner, g.repo, opts)
|
||||
list := []*PullRequest{}
|
||||
prs, resp, err := g.client.ListRepoPullRequests(g.owner, g.repo, opts)
|
||||
if err != nil {
|
||||
if resp != nil && resp.StatusCode == http.StatusNotFound {
|
||||
// return a custom error indicating that the repository is not found,
|
||||
// but also returning the empty result since the decision to continue or not in this case is made by the caller
|
||||
return list, NewRepositoryNotFoundError(err)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
list := []*PullRequest{}
|
||||
|
||||
for _, pr := range prs {
|
||||
if !giteaContainLabels(g.labels, pr.Labels) {
|
||||
continue
|
||||
|
||||
@@ -339,3 +339,35 @@ func TestGetGiteaPRLabelNames(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGiteaListReturnsRepositoryNotFoundError(t *testing.T) {
|
||||
mux := http.NewServeMux()
|
||||
server := httptest.NewServer(mux)
|
||||
defer server.Close()
|
||||
|
||||
// Handle version endpoint that Gitea client calls first
|
||||
mux.HandleFunc("/api/v1/version", func(w http.ResponseWriter, _ *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
_, _ = w.Write([]byte(`{"version":"1.17.0+dev-452-g1f0541780"}`))
|
||||
})
|
||||
|
||||
path := "/api/v1/repos/nonexistent/nonexistent/pulls?limit=0&page=1&state=open"
|
||||
|
||||
mux.HandleFunc(path, func(w http.ResponseWriter, _ *http.Request) {
|
||||
// Return 404 status to simulate repository not found
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
_, _ = w.Write([]byte(`{"message": "404 Project Not Found"}`))
|
||||
})
|
||||
|
||||
svc, err := NewGiteaService("", server.URL, "nonexistent", "nonexistent", []string{}, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
prs, err := svc.List(t.Context())
|
||||
|
||||
// Should return empty pull requests list
|
||||
assert.Empty(t, prs)
|
||||
|
||||
// Should return RepositoryNotFoundError
|
||||
require.Error(t, err)
|
||||
assert.True(t, IsRepositoryNotFoundError(err), "Expected RepositoryNotFoundError but got: %v", err)
|
||||
}
|
||||
|
||||
@@ -37,7 +37,11 @@ func NewGithubService(token, url, owner, repo string, labels []string, optionalH
|
||||
}
|
||||
} else {
|
||||
var err error
|
||||
client, err = github.NewClient(httpClient).WithEnterpriseURLs(url, url)
|
||||
if token == "" {
|
||||
client, err = github.NewClient(httpClient).WithEnterpriseURLs(url, url)
|
||||
} else {
|
||||
client, err = github.NewClient(httpClient).WithAuthToken(token).WithEnterpriseURLs(url, url)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -60,6 +64,11 @@ func (g *GithubService) List(ctx context.Context) ([]*PullRequest, error) {
|
||||
for {
|
||||
pulls, resp, err := g.client.PullRequests.List(ctx, g.owner, g.repo, opts)
|
||||
if err != nil {
|
||||
if resp != nil && resp.StatusCode == http.StatusNotFound {
|
||||
// return a custom error indicating that the repository is not found,
|
||||
// but also returning the empty result since the decision to continue or not in this case is made by the caller
|
||||
return pullRequests, NewRepositoryNotFoundError(err)
|
||||
}
|
||||
return nil, fmt.Errorf("error listing pull requests for %s/%s: %w", g.owner, g.repo, err)
|
||||
}
|
||||
for _, pull := range pulls {
|
||||
|
||||
@@ -1,9 +1,12 @@
|
||||
package pull_request
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-github/v69/github"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -86,3 +89,29 @@ func TestGetGitHubPRLabelNames(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGitHubListReturnsRepositoryNotFoundError(t *testing.T) {
|
||||
mux := http.NewServeMux()
|
||||
server := httptest.NewServer(mux)
|
||||
defer server.Close()
|
||||
|
||||
path := "/repos/nonexistent/nonexistent/pulls"
|
||||
|
||||
mux.HandleFunc(path, func(w http.ResponseWriter, _ *http.Request) {
|
||||
// Return 404 status to simulate repository not found
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
_, _ = w.Write([]byte(`{"message": "404 Project Not Found"}`))
|
||||
})
|
||||
|
||||
svc, err := NewGithubService("", server.URL, "nonexistent", "nonexistent", []string{}, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
prs, err := svc.List(t.Context())
|
||||
|
||||
// Should return empty pull requests list
|
||||
assert.Empty(t, prs)
|
||||
|
||||
// Should return RepositoryNotFoundError
|
||||
require.Error(t, err)
|
||||
assert.True(t, IsRepositoryNotFoundError(err), "Expected RepositoryNotFoundError but got: %v", err)
|
||||
}
|
||||
|
||||
@@ -76,6 +76,11 @@ func (g *GitLabService) List(ctx context.Context) ([]*PullRequest, error) {
|
||||
for {
|
||||
mrs, resp, err := g.client.MergeRequests.ListProjectMergeRequests(g.project, opts, gitlab.WithContext(ctx))
|
||||
if err != nil {
|
||||
if resp != nil && resp.StatusCode == http.StatusNotFound {
|
||||
// return a custom error indicating that the repository is not found,
|
||||
// but also returning the empty result since the decision to continue or not in this case is made by the caller
|
||||
return pullRequests, NewRepositoryNotFoundError(err)
|
||||
}
|
||||
return nil, fmt.Errorf("error listing merge requests for project '%s': %w", g.project, err)
|
||||
}
|
||||
for _, mr := range mrs {
|
||||
|
||||
@@ -191,3 +191,29 @@ func TestListWithStateTLS(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGitLabListReturnsRepositoryNotFoundError(t *testing.T) {
|
||||
mux := http.NewServeMux()
|
||||
server := httptest.NewServer(mux)
|
||||
defer server.Close()
|
||||
|
||||
path := "/api/v4/projects/nonexistent/merge_requests"
|
||||
|
||||
mux.HandleFunc(path, func(w http.ResponseWriter, _ *http.Request) {
|
||||
// Return 404 status to simulate repository not found
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
_, _ = w.Write([]byte(`{"message": "404 Project Not Found"}`))
|
||||
})
|
||||
|
||||
svc, err := NewGitLabService("", server.URL, "nonexistent", []string{}, "", "", false, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
prs, err := svc.List(t.Context())
|
||||
|
||||
// Should return empty pull requests list
|
||||
assert.Empty(t, prs)
|
||||
|
||||
// Should return RepositoryNotFoundError
|
||||
require.Error(t, err)
|
||||
assert.True(t, IsRepositoryNotFoundError(err), "Expected RepositoryNotFoundError but got: %v", err)
|
||||
}
|
||||
|
||||
@@ -30,4 +30,5 @@ type PullRequestService interface {
|
||||
type Filter struct {
|
||||
BranchMatch *regexp.Regexp
|
||||
TargetBranchMatch *regexp.Regexp
|
||||
TitleMatch *regexp.Regexp
|
||||
}
|
||||
|
||||
@@ -25,6 +25,12 @@ func compileFilters(filters []argoprojiov1alpha1.PullRequestGeneratorFilter) ([]
|
||||
return nil, fmt.Errorf("error compiling TargetBranchMatch regexp %q: %w", *filter.TargetBranchMatch, err)
|
||||
}
|
||||
}
|
||||
if filter.TitleMatch != nil {
|
||||
outFilter.TitleMatch, err = regexp.Compile(*filter.TitleMatch)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error compiling TitleMatch regexp %q: %w", *filter.TitleMatch, err)
|
||||
}
|
||||
}
|
||||
outFilters = append(outFilters, outFilter)
|
||||
}
|
||||
return outFilters, nil
|
||||
@@ -37,6 +43,9 @@ func matchFilter(pullRequest *PullRequest, filter *Filter) bool {
|
||||
if filter.TargetBranchMatch != nil && !filter.TargetBranchMatch.MatchString(pullRequest.TargetBranch) {
|
||||
return false
|
||||
}
|
||||
if filter.TitleMatch != nil && !filter.TitleMatch.MatchString(pullRequest.Title) {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -137,6 +137,110 @@ func TestFilterTargetBranchMatch(t *testing.T) {
|
||||
assert.Equal(t, "two", pullRequests[0].Branch)
|
||||
}
|
||||
|
||||
func TestFilterTitleMatch(t *testing.T) {
|
||||
provider, _ := NewFakeService(
|
||||
t.Context(),
|
||||
[]*PullRequest{
|
||||
{
|
||||
Number: 1,
|
||||
Title: "PR one - filter",
|
||||
Branch: "one",
|
||||
TargetBranch: "master",
|
||||
HeadSHA: "189d92cbf9ff857a39e6feccd32798ca700fb958",
|
||||
Author: "name1",
|
||||
},
|
||||
{
|
||||
Number: 2,
|
||||
Title: "PR two - ignore",
|
||||
Branch: "two",
|
||||
TargetBranch: "branch1",
|
||||
HeadSHA: "289d92cbf9ff857a39e6feccd32798ca700fb958",
|
||||
Author: "name2",
|
||||
},
|
||||
{
|
||||
Number: 3,
|
||||
Title: "[filter] PR three",
|
||||
Branch: "three",
|
||||
TargetBranch: "branch2",
|
||||
HeadSHA: "389d92cbf9ff857a39e6feccd32798ca700fb958",
|
||||
Author: "name3",
|
||||
},
|
||||
{
|
||||
Number: 4,
|
||||
Title: "[ignore] PR four",
|
||||
Branch: "four",
|
||||
TargetBranch: "branch3",
|
||||
HeadSHA: "489d92cbf9ff857a39e6feccd32798ca700fb958",
|
||||
Author: "name4",
|
||||
},
|
||||
},
|
||||
nil,
|
||||
)
|
||||
filters := []argoprojiov1alpha1.PullRequestGeneratorFilter{
|
||||
{
|
||||
TitleMatch: strp("\\[filter]"),
|
||||
},
|
||||
}
|
||||
pullRequests, err := ListPullRequests(t.Context(), provider, filters)
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, pullRequests, 1)
|
||||
assert.Equal(t, "three", pullRequests[0].Branch)
|
||||
}
|
||||
|
||||
func TestMultiFilterOrWithTitle(t *testing.T) {
|
||||
provider, _ := NewFakeService(
|
||||
t.Context(),
|
||||
[]*PullRequest{
|
||||
{
|
||||
Number: 1,
|
||||
Title: "PR one - filter",
|
||||
Branch: "one",
|
||||
TargetBranch: "master",
|
||||
HeadSHA: "189d92cbf9ff857a39e6feccd32798ca700fb958",
|
||||
Author: "name1",
|
||||
},
|
||||
{
|
||||
Number: 2,
|
||||
Title: "PR two - ignore",
|
||||
Branch: "two",
|
||||
TargetBranch: "branch1",
|
||||
HeadSHA: "289d92cbf9ff857a39e6feccd32798ca700fb958",
|
||||
Author: "name2",
|
||||
},
|
||||
{
|
||||
Number: 3,
|
||||
Title: "[filter] PR three",
|
||||
Branch: "three",
|
||||
TargetBranch: "branch2",
|
||||
HeadSHA: "389d92cbf9ff857a39e6feccd32798ca700fb958",
|
||||
Author: "name3",
|
||||
},
|
||||
{
|
||||
Number: 4,
|
||||
Title: "[ignore] PR four",
|
||||
Branch: "four",
|
||||
TargetBranch: "branch3",
|
||||
HeadSHA: "489d92cbf9ff857a39e6feccd32798ca700fb958",
|
||||
Author: "name4",
|
||||
},
|
||||
},
|
||||
nil,
|
||||
)
|
||||
filters := []argoprojiov1alpha1.PullRequestGeneratorFilter{
|
||||
{
|
||||
TitleMatch: strp("\\[filter]"),
|
||||
},
|
||||
{
|
||||
TitleMatch: strp("- filter"),
|
||||
},
|
||||
}
|
||||
pullRequests, err := ListPullRequests(t.Context(), provider, filters)
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, pullRequests, 2)
|
||||
assert.Equal(t, "one", pullRequests[0].Branch)
|
||||
assert.Equal(t, "three", pullRequests[1].Branch)
|
||||
}
|
||||
|
||||
func TestMultiFilterOr(t *testing.T) {
|
||||
provider, _ := NewFakeService(
|
||||
t.Context(),
|
||||
@@ -192,7 +296,7 @@ func TestMultiFilterOr(t *testing.T) {
|
||||
assert.Equal(t, "four", pullRequests[2].Branch)
|
||||
}
|
||||
|
||||
func TestMultiFilterOrWithTargetBranchFilter(t *testing.T) {
|
||||
func TestMultiFilterOrWithTargetBranchFilterOrWithTitleFilter(t *testing.T) {
|
||||
provider, _ := NewFakeService(
|
||||
t.Context(),
|
||||
[]*PullRequest{
|
||||
@@ -228,6 +332,14 @@ func TestMultiFilterOrWithTargetBranchFilter(t *testing.T) {
|
||||
HeadSHA: "489d92cbf9ff857a39e6feccd32798ca700fb958",
|
||||
Author: "name4",
|
||||
},
|
||||
{
|
||||
Number: 5,
|
||||
Title: "PR title is different than branch name",
|
||||
Branch: "five",
|
||||
TargetBranch: "branch3",
|
||||
HeadSHA: "489d92cbf9ff857a39e6feccd32798ca700fb958",
|
||||
Author: "name5",
|
||||
},
|
||||
},
|
||||
nil,
|
||||
)
|
||||
@@ -240,12 +352,21 @@ func TestMultiFilterOrWithTargetBranchFilter(t *testing.T) {
|
||||
BranchMatch: strp("r"),
|
||||
TargetBranchMatch: strp("3"),
|
||||
},
|
||||
{
|
||||
TitleMatch: strp("two"),
|
||||
},
|
||||
{
|
||||
BranchMatch: strp("five"),
|
||||
TitleMatch: strp("PR title is different than branch name"),
|
||||
},
|
||||
}
|
||||
pullRequests, err := ListPullRequests(t.Context(), provider, filters)
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, pullRequests, 2)
|
||||
assert.Len(t, pullRequests, 3)
|
||||
assert.Equal(t, "two", pullRequests[0].Branch)
|
||||
assert.Equal(t, "four", pullRequests[1].Branch)
|
||||
assert.Equal(t, "five", pullRequests[2].Branch)
|
||||
assert.Equal(t, "PR title is different than branch name", pullRequests[2].Title)
|
||||
}
|
||||
|
||||
func TestNoFilters(t *testing.T) {
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
bitbucketv1 "github.com/gfleury/go-bitbucket-v1"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/applicationset/utils"
|
||||
"github.com/argoproj/argo-cd/v3/applicationset/services"
|
||||
)
|
||||
|
||||
type BitbucketServerProvider struct {
|
||||
@@ -49,15 +49,10 @@ func NewBitbucketServerProviderNoAuth(ctx context.Context, url, projectKey strin
|
||||
}
|
||||
|
||||
func newBitbucketServerProvider(ctx context.Context, bitbucketConfig *bitbucketv1.Configuration, projectKey string, allBranches bool, scmRootCAPath string, insecure bool, caCerts []byte) (*BitbucketServerProvider, error) {
|
||||
bitbucketConfig.BasePath = utils.NormalizeBitbucketBasePath(bitbucketConfig.BasePath)
|
||||
tlsConfig := utils.GetTlsConfig(scmRootCAPath, insecure, caCerts)
|
||||
bitbucketConfig.HTTPClient = &http.Client{Transport: &http.Transport{
|
||||
TLSClientConfig: tlsConfig,
|
||||
}}
|
||||
bitbucketClient := bitbucketv1.NewAPIClient(ctx, bitbucketConfig)
|
||||
bbClient := services.SetupBitbucketClient(ctx, bitbucketConfig, scmRootCAPath, insecure, caCerts)
|
||||
|
||||
return &BitbucketServerProvider{
|
||||
client: bitbucketClient,
|
||||
client: bbClient,
|
||||
projectKey: projectKey,
|
||||
allBranches: allBranches,
|
||||
}, nil
|
||||
|
||||
@@ -36,7 +36,11 @@ func NewGithubProvider(organization string, token string, url string, allBranche
|
||||
}
|
||||
} else {
|
||||
var err error
|
||||
client, err = github.NewClient(httpClient).WithEnterpriseURLs(url, url)
|
||||
if token == "" {
|
||||
client, err = github.NewClient(httpClient).WithEnterpriseURLs(url, url)
|
||||
} else {
|
||||
client, err = github.NewClient(httpClient).WithAuthToken(token).WithEnterpriseURLs(url, url)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
22
applicationset/services/util.go
Normal file
22
applicationset/services/util.go
Normal file
@@ -0,0 +1,22 @@
|
||||
package services
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
|
||||
bitbucketv1 "github.com/gfleury/go-bitbucket-v1"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/applicationset/utils"
|
||||
)
|
||||
|
||||
// SetupBitbucketClient configures and creates a Bitbucket API client with TLS settings
|
||||
func SetupBitbucketClient(ctx context.Context, config *bitbucketv1.Configuration, scmRootCAPath string, insecure bool, caCerts []byte) *bitbucketv1.APIClient {
|
||||
config.BasePath = utils.NormalizeBitbucketBasePath(config.BasePath)
|
||||
tlsConfig := utils.GetTlsConfig(scmRootCAPath, insecure, caCerts)
|
||||
|
||||
transport := http.DefaultTransport.(*http.Transport).Clone()
|
||||
transport.TLSClientConfig = tlsConfig
|
||||
config.HTTPClient = &http.Client{Transport: transport}
|
||||
|
||||
return bitbucketv1.NewAPIClient(ctx, config)
|
||||
}
|
||||
37
applicationset/services/util_test.go
Normal file
37
applicationset/services/util_test.go
Normal file
@@ -0,0 +1,37 @@
|
||||
package services
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"net/http"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
bitbucketv1 "github.com/gfleury/go-bitbucket-v1"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestSetupBitbucketClient(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
cfg := &bitbucketv1.Configuration{}
|
||||
|
||||
// Act
|
||||
client := SetupBitbucketClient(ctx, cfg, "", false, nil)
|
||||
|
||||
// Assert
|
||||
require.NotNil(t, client, "expected client to be created")
|
||||
require.NotNil(t, cfg.HTTPClient, "expected HTTPClient to be set")
|
||||
|
||||
// The transport should be a clone of DefaultTransport
|
||||
tr, ok := cfg.HTTPClient.Transport.(*http.Transport)
|
||||
require.True(t, ok, "expected HTTPClient.Transport to be *http.Transport")
|
||||
require.NotSame(t, http.DefaultTransport, tr, "transport should be a clone, not the global DefaultTransport")
|
||||
|
||||
// Ensure TLSClientConfig is set
|
||||
require.IsType(t, &tls.Config{}, tr.TLSClientConfig)
|
||||
|
||||
// Defaults from http.DefaultTransport.Clone() should be preserved
|
||||
require.Greater(t, tr.IdleConnTimeout, time.Duration(0), "IdleConnTimeout should be non-zero")
|
||||
require.Positive(t, tr.MaxIdleConns, "MaxIdleConns should be non-zero")
|
||||
require.Greater(t, tr.TLSHandshakeTimeout, time.Duration(0), "TLSHandshakeTimeout should be non-zero")
|
||||
}
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
|
||||
var ErrDisallowedSecretAccess = fmt.Errorf("secret must have label %q=%q", common.LabelKeySecretType, common.LabelValueSecretTypeSCMCreds)
|
||||
|
||||
// getSecretRef gets the value of the key for the specified Secret resource.
|
||||
// GetSecretRef gets the value of the key for the specified Secret resource.
|
||||
func GetSecretRef(ctx context.Context, k8sClient client.Client, ref *argoprojiov1alpha1.SecretRef, namespace string, tokenRefStrictMode bool) (string, error) {
|
||||
if ref == nil {
|
||||
return "", nil
|
||||
|
||||
@@ -399,19 +399,19 @@ func addInvalidGeneratorNames(names map[string]bool, applicationSetInfo *argoapp
|
||||
var values map[string]any
|
||||
err := json.Unmarshal([]byte(config), &values)
|
||||
if err != nil {
|
||||
log.Warnf("couldn't unmarshal kubectl.kubernetes.io/last-applied-configuration: %+v", config)
|
||||
log.Warnf("could not unmarshal kubectl.kubernetes.io/last-applied-configuration: %+v", config)
|
||||
return
|
||||
}
|
||||
|
||||
spec, ok := values["spec"].(map[string]any)
|
||||
if !ok {
|
||||
log.Warn("coundn't get spec from kubectl.kubernetes.io/last-applied-configuration annotation")
|
||||
log.Warn("could not get spec from kubectl.kubernetes.io/last-applied-configuration annotation")
|
||||
return
|
||||
}
|
||||
|
||||
generators, ok := spec["generators"].([]any)
|
||||
if !ok {
|
||||
log.Warn("coundn't get generators from kubectl.kubernetes.io/last-applied-configuration annotation")
|
||||
log.Warn("could not get generators from kubectl.kubernetes.io/last-applied-configuration annotation")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -422,7 +422,7 @@ func addInvalidGeneratorNames(names map[string]bool, applicationSetInfo *argoapp
|
||||
|
||||
generator, ok := generators[index].(map[string]any)
|
||||
if !ok {
|
||||
log.Warn("coundn't get generator from kubectl.kubernetes.io/last-applied-configuration annotation")
|
||||
log.Warn("could not get generator from kubectl.kubernetes.io/last-applied-configuration annotation")
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -74,15 +74,15 @@ func NewWebhookHandler(webhookParallelism int, argocdSettingsMgr *argosettings.S
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get argocd settings: %w", err)
|
||||
}
|
||||
githubHandler, err := github.New(github.Options.Secret(argocdSettings.WebhookGitHubSecret))
|
||||
githubHandler, err := github.New(github.Options.Secret(argocdSettings.GetWebhookGitHubSecret()))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to init GitHub webhook: %w", err)
|
||||
}
|
||||
gitlabHandler, err := gitlab.New(gitlab.Options.Secret(argocdSettings.WebhookGitLabSecret))
|
||||
gitlabHandler, err := gitlab.New(gitlab.Options.Secret(argocdSettings.GetWebhookGitLabSecret()))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to init GitLab webhook: %w", err)
|
||||
}
|
||||
azuredevopsHandler, err := azuredevops.New(azuredevops.Options.BasicAuth(argocdSettings.WebhookAzureDevOpsUsername, argocdSettings.WebhookAzureDevOpsPassword))
|
||||
azuredevopsHandler, err := azuredevops.New(azuredevops.Options.BasicAuth(argocdSettings.GetWebhookAzureDevOpsUsername(), argocdSettings.GetWebhookAzureDevOpsPassword()))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to init Azure DevOps webhook: %w", err)
|
||||
}
|
||||
@@ -339,7 +339,7 @@ func genRevisionHasChanged(gen *v1alpha1.GitGenerator, revision string, touchedH
|
||||
|
||||
func gitGeneratorUsesURL(gen *v1alpha1.GitGenerator, webURL string, repoRegexp *regexp.Regexp) bool {
|
||||
if !repoRegexp.MatchString(gen.RepoURL) {
|
||||
log.Debugf("%s does not match %s", gen.RepoURL, repoRegexp.String())
|
||||
log.Warnf("%s does not match %s", gen.RepoURL, repoRegexp.String())
|
||||
return false
|
||||
}
|
||||
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
# p, <role/user/group>, <resource>, <action>, <object>, <allow/deny>
|
||||
|
||||
p, role:readonly, applications, get, */*, allow
|
||||
p, role:readonly, applicationsets, get, */*, allow
|
||||
p, role:readonly, certificates, get, *, allow
|
||||
p, role:readonly, clusters, get, *, allow
|
||||
p, role:readonly, repositories, get, *, allow
|
||||
|
||||
|
209
assets/swagger.json
generated
209
assets/swagger.json
generated
@@ -374,6 +374,56 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/v1/applications/{appName}/server-side-diff": {
|
||||
"get": {
|
||||
"tags": [
|
||||
"ApplicationService"
|
||||
],
|
||||
"summary": "ServerSideDiff performs server-side diff calculation using dry-run apply",
|
||||
"operationId": "ApplicationService_ServerSideDiff",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"name": "appName",
|
||||
"in": "path",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"name": "appNamespace",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"name": "project",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"collectionFormat": "multi",
|
||||
"name": "targetManifests",
|
||||
"in": "query"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/applicationApplicationServerSideDiffResponse"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/runtimeError"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/v1/applications/{application.metadata.name}": {
|
||||
"put": {
|
||||
"tags": [
|
||||
@@ -999,6 +1049,11 @@
|
||||
"collectionFormat": "multi",
|
||||
"name": "revisions",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "boolean",
|
||||
"name": "noCache",
|
||||
"in": "query"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
@@ -1473,10 +1528,11 @@
|
||||
}
|
||||
},
|
||||
"post": {
|
||||
"description": "Deprecated: use RunResourceActionV2 instead. This version does not support resource action parameters but is\nmaintained for backward compatibility. It will be removed in a future release.",
|
||||
"tags": [
|
||||
"ApplicationService"
|
||||
],
|
||||
"summary": "RunResourceAction run resource action",
|
||||
"summary": "RunResourceAction runs a resource action",
|
||||
"operationId": "ApplicationService_RunResourceAction",
|
||||
"parameters": [
|
||||
{
|
||||
@@ -1490,7 +1546,81 @@
|
||||
"in": "body",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"$ref": "#/definitions/applicationResourceActionRunRequest"
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"name": "namespace",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"name": "resourceName",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"name": "version",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"name": "group",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"name": "kind",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"name": "appNamespace",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"name": "project",
|
||||
"in": "query"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/applicationApplicationResponse"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/runtimeError"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/v1/applications/{name}/resource/actions/v2": {
|
||||
"post": {
|
||||
"tags": [
|
||||
"ApplicationService"
|
||||
],
|
||||
"summary": "RunResourceActionV2 runs a resource action with parameters",
|
||||
"operationId": "ApplicationService_RunResourceActionV2",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"name": "name",
|
||||
"in": "path",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"name": "body",
|
||||
"in": "body",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"$ref": "#/definitions/applicationResourceActionRunRequestV2"
|
||||
}
|
||||
}
|
||||
],
|
||||
@@ -4944,6 +5074,20 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"applicationApplicationServerSideDiffResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"items": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/v1alpha1ResourceDiff"
|
||||
}
|
||||
},
|
||||
"modified": {
|
||||
"type": "boolean"
|
||||
}
|
||||
}
|
||||
},
|
||||
"applicationApplicationSyncRequest": {
|
||||
"type": "object",
|
||||
"title": "ApplicationSyncRequest is a request to apply the config state to live state",
|
||||
@@ -5127,7 +5271,7 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"applicationResourceActionRunRequest": {
|
||||
"applicationResourceActionRunRequestV2": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"action": {
|
||||
@@ -6933,7 +7077,7 @@
|
||||
},
|
||||
"status": {
|
||||
"type": "string",
|
||||
"title": "Status contains the AppSet's perceived status of the managed Application resource: (Waiting, Pending, Progressing, Healthy)"
|
||||
"title": "Status contains the AppSet's perceived status of the managed Application resource"
|
||||
},
|
||||
"step": {
|
||||
"type": "string",
|
||||
@@ -7185,6 +7329,10 @@
|
||||
"description": "ApplicationSetStrategy configures how generated Applications are updated in sequence.",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"deletionOrder": {
|
||||
"type": "string",
|
||||
"title": "DeletionOrder allows specifying the order for deleting generated apps when progressive sync is enabled.\naccepts values \"AllAtOnce\" and \"Reverse\""
|
||||
},
|
||||
"rollingSync": {
|
||||
"$ref": "#/definitions/v1alpha1ApplicationSetRolloutStrategy"
|
||||
},
|
||||
@@ -8560,12 +8708,20 @@
|
||||
"title": "KustomizeOptions are options for kustomize to use when building manifests",
|
||||
"properties": {
|
||||
"binaryPath": {
|
||||
"description": "Deprecated: Use settings.Settings instead. See: settings.Settings.KustomizeVersions.\nIf this field is set, it will be used as the Kustomize binary path.\nOtherwise, Versions is used.",
|
||||
"type": "string",
|
||||
"title": "BinaryPath holds optional path to kustomize binary"
|
||||
},
|
||||
"buildOptions": {
|
||||
"type": "string",
|
||||
"title": "BuildOptions is a string of build parameters to use when calling `kustomize build`"
|
||||
},
|
||||
"versions": {
|
||||
"description": "Versions is a list of Kustomize versions and their corresponding binary paths and build options.",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/v1alpha1KustomizeVersion"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -8629,6 +8785,24 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1KustomizeVersion": {
|
||||
"type": "object",
|
||||
"title": "KustomizeVersion holds information about additional Kustomize versions",
|
||||
"properties": {
|
||||
"buildOptions": {
|
||||
"type": "string",
|
||||
"title": "BuildOptions that are specific to a Kustomize version"
|
||||
},
|
||||
"name": {
|
||||
"type": "string",
|
||||
"title": "Name holds Kustomize version name"
|
||||
},
|
||||
"path": {
|
||||
"type": "string",
|
||||
"title": "Path holds the corresponding binary path"
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1ListGenerator": {
|
||||
"type": "object",
|
||||
"title": "ListGenerator include items info",
|
||||
@@ -8950,6 +9124,10 @@
|
||||
"bitbucketServer": {
|
||||
"$ref": "#/definitions/v1alpha1PullRequestGeneratorBitbucketServer"
|
||||
},
|
||||
"continueOnRepoNotFoundError": {
|
||||
"description": "ContinueOnRepoNotFoundError is a flag to continue the ApplicationSet Pull Request generator parameters generation even if the repository is not found.",
|
||||
"type": "boolean"
|
||||
},
|
||||
"filters": {
|
||||
"description": "Filters for which pull requests should be considered.",
|
||||
"type": "array",
|
||||
@@ -9079,6 +9257,9 @@
|
||||
},
|
||||
"targetBranchMatch": {
|
||||
"type": "string"
|
||||
},
|
||||
"titleMatch": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -9482,21 +9663,9 @@
|
||||
"description": "ResourceActionParam represents a parameter for a resource action.\nIt includes a name, value, type, and an optional default value for the parameter.",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"default": {
|
||||
"description": "Default is the default value of the parameter, if any.",
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"description": "Name is the name of the parameter.",
|
||||
"type": "string"
|
||||
},
|
||||
"type": {
|
||||
"description": "Type is the type of the parameter (e.g., string, integer).",
|
||||
"type": "string"
|
||||
},
|
||||
"value": {
|
||||
"description": "Value is the value of the parameter.",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -9796,6 +9965,10 @@
|
||||
"description": "Limit is the maximum number of attempts for retrying a failed sync. If set to 0, no retries will be performed.",
|
||||
"type": "integer",
|
||||
"format": "int64"
|
||||
},
|
||||
"refresh": {
|
||||
"type": "boolean",
|
||||
"title": "Refresh indicates if the latest revision should be used on retry instead of the initial one (default: false)"
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -10376,7 +10549,7 @@
|
||||
"type": "boolean",
|
||||
"title": "AllowEmpty allows apps have zero live resources (default: false)"
|
||||
},
|
||||
"enable": {
|
||||
"enabled": {
|
||||
"type": "boolean",
|
||||
"title": "Enable allows apps to explicitly control automated sync"
|
||||
},
|
||||
@@ -10395,7 +10568,7 @@
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"path": {
|
||||
"description": "Path is a directory path within the git repository where hydrated manifests should be committed to and synced\nfrom. If hydrateTo is set, this is just the path from which hydrated manifests will be synced.",
|
||||
"description": "Path is a directory path within the git repository where hydrated manifests should be committed to and synced\nfrom. The Path should never point to the root of the repo. If hydrateTo is set, this is just the path from which\nhydrated manifests will be synced.\n\n+kubebuilder:validation:Required\n+kubebuilder:validation:MinLength=1\n+kubebuilder:validation:Pattern=`^.{2,}|[^./]$`",
|
||||
"type": "string"
|
||||
},
|
||||
"targetBranch": {
|
||||
|
||||
@@ -268,7 +268,7 @@ func NewCommand() *cobra.Command {
|
||||
command.Flags().BoolVar(&repoServerPlaintext, "repo-server-plaintext", env.ParseBoolFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_REPO_SERVER_PLAINTEXT", false), "Disable TLS on connections to repo server")
|
||||
command.Flags().BoolVar(&repoServerStrictTLS, "repo-server-strict-tls", env.ParseBoolFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_REPO_SERVER_STRICT_TLS", false), "Whether to use strict validation of the TLS cert presented by the repo server")
|
||||
command.Flags().IntVar(&repoServerTimeoutSeconds, "repo-server-timeout-seconds", env.ParseNumFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_REPO_SERVER_TIMEOUT_SECONDS", 60, 0, math.MaxInt64), "Repo server RPC call timeout seconds.")
|
||||
command.Flags().IntVar(&maxConcurrentReconciliations, "concurrent-reconciliations", env.ParseNumFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_CONCURRENT_RECONCILIATIONS", 10, 1, 100), "Max concurrent reconciliations limit for the controller")
|
||||
command.Flags().IntVar(&maxConcurrentReconciliations, "concurrent-reconciliations", env.ParseNumFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_CONCURRENT_RECONCILIATIONS", 10, 1, math.MaxInt), "Max concurrent reconciliations limit for the controller")
|
||||
command.Flags().StringVar(&scmRootCAPath, "scm-root-ca-path", env.StringFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_SCM_ROOT_CA_PATH", ""), "Provide Root CA Path for self-signed TLS Certificates")
|
||||
command.Flags().StringSliceVar(&globalPreservedAnnotations, "preserved-annotations", env.StringsFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_GLOBAL_PRESERVED_ANNOTATIONS", []string{}, ","), "Sets global preserved field values for annotations")
|
||||
command.Flags().StringSliceVar(&globalPreservedLabels, "preserved-labels", env.StringsFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_GLOBAL_PRESERVED_LABELS", []string{}, ","), "Sets global preserved field values for labels")
|
||||
|
||||
@@ -35,7 +35,7 @@ func NewCommand() *cobra.Command {
|
||||
if nonce == "" {
|
||||
errors.CheckError(fmt.Errorf("%s is not set", askpass.ASKPASS_NONCE_ENV))
|
||||
}
|
||||
conn, err := grpc_util.BlockingDial(ctx, "unix", askpass.SocketPath, nil, grpc.WithTransportCredentials(insecure.NewCredentials()))
|
||||
conn, err := grpc_util.BlockingNewClient(ctx, "unix", askpass.SocketPath, nil, grpc.WithTransportCredentials(insecure.NewCredentials()))
|
||||
errors.CheckError(err)
|
||||
defer utilio.Close(conn)
|
||||
client := askpass.NewAskPassServiceClient(conn)
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !darwin || (cgo && darwin)
|
||||
|
||||
package commands
|
||||
|
||||
import (
|
||||
25
cmd/argocd-k8s-auth/commands/azure_no_cgo.go
Normal file
25
cmd/argocd-k8s-auth/commands/azure_no_cgo.go
Normal file
@@ -0,0 +1,25 @@
|
||||
//go:build darwin && !cgo
|
||||
|
||||
// Package commands
|
||||
// This file is used when the GOOS is darwin and CGO is not enabled.
|
||||
// It provides a no-op implementation of newAzureCommand to allow goreleaser to build
|
||||
// a darwin binary on a linux machine.
|
||||
package commands
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/util/workloadidentity"
|
||||
)
|
||||
|
||||
func newAzureCommand() *cobra.Command {
|
||||
command := &cobra.Command{
|
||||
Use: "azure",
|
||||
Run: func(c *cobra.Command, _ []string) {
|
||||
log.Fatalf(workloadidentity.CGOError)
|
||||
},
|
||||
}
|
||||
return command
|
||||
}
|
||||
@@ -11,16 +11,6 @@ import (
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/common"
|
||||
"github.com/argoproj/argo-cd/v3/reposerver/apiclient"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/util/env"
|
||||
"github.com/argoproj/argo-cd/v3/util/errors"
|
||||
service "github.com/argoproj/argo-cd/v3/util/notification/argocd"
|
||||
"github.com/argoproj/argo-cd/v3/util/tls"
|
||||
|
||||
notificationscontroller "github.com/argoproj/argo-cd/v3/notification_controller/controller"
|
||||
|
||||
"github.com/argoproj/notifications-engine/pkg/controller"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
@@ -30,27 +20,25 @@ import (
|
||||
"k8s.io/client-go/kubernetes"
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/common"
|
||||
notificationscontroller "github.com/argoproj/argo-cd/v3/notification_controller/controller"
|
||||
"github.com/argoproj/argo-cd/v3/reposerver/apiclient"
|
||||
"github.com/argoproj/argo-cd/v3/util/cli"
|
||||
"github.com/argoproj/argo-cd/v3/util/env"
|
||||
"github.com/argoproj/argo-cd/v3/util/errors"
|
||||
service "github.com/argoproj/argo-cd/v3/util/notification/argocd"
|
||||
"github.com/argoproj/argo-cd/v3/util/tls"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultMetricsPort = 9001
|
||||
)
|
||||
|
||||
func addK8SFlagsToCmd(cmd *cobra.Command) clientcmd.ClientConfig {
|
||||
loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
|
||||
loadingRules.DefaultClientConfig = &clientcmd.DefaultClientConfig
|
||||
overrides := clientcmd.ConfigOverrides{}
|
||||
kflags := clientcmd.RecommendedConfigOverrideFlags("")
|
||||
cmd.PersistentFlags().StringVar(&loadingRules.ExplicitPath, "kubeconfig", "", "Path to a kube config. Only required if out-of-cluster")
|
||||
clientcmd.BindOverrideFlags(&overrides, cmd.PersistentFlags(), kflags)
|
||||
return clientcmd.NewInteractiveDeferredLoadingClientConfig(loadingRules, &overrides, os.Stdin)
|
||||
}
|
||||
|
||||
func NewCommand() *cobra.Command {
|
||||
var (
|
||||
clientConfig clientcmd.ClientConfig
|
||||
processorsCount int
|
||||
namespace string
|
||||
appLabelSelector string
|
||||
logLevel string
|
||||
logFormat string
|
||||
@@ -175,10 +163,9 @@ func NewCommand() *cobra.Command {
|
||||
return nil
|
||||
},
|
||||
}
|
||||
clientConfig = addK8SFlagsToCmd(&command)
|
||||
clientConfig = cli.AddKubectlFlagsToCmd(&command)
|
||||
command.Flags().IntVar(&processorsCount, "processors-count", 1, "Processors count.")
|
||||
command.Flags().StringVar(&appLabelSelector, "app-label-selector", "", "App label selector.")
|
||||
command.Flags().StringVar(&namespace, "namespace", "", "Namespace which controller handles. Current namespace if empty.")
|
||||
command.Flags().StringVar(&logLevel, "loglevel", env.StringFromEnv("ARGOCD_NOTIFICATIONS_CONTROLLER_LOGLEVEL", "info"), "Set the logging level. One of: debug|info|warn|error")
|
||||
command.Flags().StringVar(&logFormat, "logformat", env.StringFromEnv("ARGOCD_NOTIFICATIONS_CONTROLLER_LOGFORMAT", "json"), "Set the logging format. One of: json|text")
|
||||
command.Flags().IntVar(&metricsPort, "metrics-port", defaultMetricsPort, "Metrics port")
|
||||
|
||||
@@ -415,7 +415,6 @@ func reconcileApplications(
|
||||
},
|
||||
settingsMgr,
|
||||
stateCache,
|
||||
projInformer,
|
||||
server,
|
||||
cache,
|
||||
time.Second,
|
||||
@@ -464,7 +463,7 @@ func reconcileApplications(
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions = append(revisions, app.Spec.GetSource().TargetRevision)
|
||||
|
||||
res, err := appStateManager.CompareAppState(&app, proj, revisions, sources, false, false, nil, false, false)
|
||||
res, err := appStateManager.CompareAppState(&app, proj, revisions, sources, false, false, nil, false)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error comparing app states: %w", err)
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/redis/go-redis/v9"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
@@ -608,7 +609,31 @@ func NewGenClusterConfigCommand(pathOpts *clientcmd.PathOptions) *cobra.Command
|
||||
clientConfig := clientcmd.NewDefaultClientConfig(*cfgAccess, &overrides)
|
||||
conf, err := clientConfig.ClientConfig()
|
||||
errors.CheckError(err)
|
||||
kubeClientset := fake.NewClientset()
|
||||
// Seed a minimal in-memory Argo CD environment so settings retrieval succeeds
|
||||
argoCDCM := &corev1.ConfigMap{
|
||||
TypeMeta: metav1.TypeMeta{Kind: "ConfigMap", APIVersion: "v1"},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: common.ArgoCDConfigMapName,
|
||||
Namespace: ArgoCDNamespace,
|
||||
Labels: map[string]string{
|
||||
"app.kubernetes.io/part-of": "argocd",
|
||||
},
|
||||
},
|
||||
}
|
||||
argoCDSecret := &corev1.Secret{
|
||||
TypeMeta: metav1.TypeMeta{Kind: "Secret", APIVersion: "v1"},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: common.ArgoCDSecretName,
|
||||
Namespace: ArgoCDNamespace,
|
||||
Labels: map[string]string{
|
||||
"app.kubernetes.io/part-of": "argocd",
|
||||
},
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"server.secretkey": []byte("test"),
|
||||
},
|
||||
}
|
||||
kubeClientset := fake.NewClientset(argoCDCM, argoCDSecret)
|
||||
|
||||
var awsAuthConf *v1alpha1.AWSAuthConfig
|
||||
var execProviderConf *v1alpha1.ExecProviderConfig
|
||||
|
||||
@@ -24,24 +24,24 @@ func TestRun_SignalHandling_GracefulShutdown(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
var err error
|
||||
var runErr error
|
||||
doneCh := make(chan struct{})
|
||||
go func() {
|
||||
err = d.Run(t.Context(), &DashboardConfig{ClientOpts: &apiclient.ClientOptions{}})
|
||||
runErr = d.Run(t.Context(), &DashboardConfig{ClientOpts: &apiclient.ClientOptions{}})
|
||||
close(doneCh)
|
||||
}()
|
||||
|
||||
// Allow some time for the dashboard to register the signal handler
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
|
||||
proc, err := os.FindProcess(os.Getpid())
|
||||
require.NoErrorf(t, err, "failed to find process: %v", err)
|
||||
err = proc.Signal(syscall.SIGINT)
|
||||
require.NoErrorf(t, err, "failed to send SIGINT: %v", err)
|
||||
proc, procErr := os.FindProcess(os.Getpid())
|
||||
require.NoErrorf(t, procErr, "failed to find process: %v", procErr)
|
||||
sigErr := proc.Signal(syscall.SIGINT)
|
||||
require.NoErrorf(t, sigErr, "failed to send SIGINT: %v", sigErr)
|
||||
|
||||
select {
|
||||
case <-doneCh:
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, runErr)
|
||||
case <-time.After(500 * time.Millisecond):
|
||||
t.Fatal("timeout: dashboard.Run did not exit after SIGINT")
|
||||
}
|
||||
|
||||
@@ -39,9 +39,13 @@ import (
|
||||
"github.com/argoproj/argo-cd/v3/cmd/argocd/commands/headless"
|
||||
"github.com/argoproj/argo-cd/v3/cmd/argocd/commands/utils"
|
||||
cmdutil "github.com/argoproj/argo-cd/v3/cmd/util"
|
||||
argocommon "github.com/argoproj/argo-cd/v3/common"
|
||||
"github.com/argoproj/argo-cd/v3/controller"
|
||||
argocdclient "github.com/argoproj/argo-cd/v3/pkg/apiclient"
|
||||
"github.com/argoproj/argo-cd/v3/pkg/apiclient/application"
|
||||
|
||||
resourceutil "github.com/argoproj/gitops-engine/pkg/sync/resource"
|
||||
|
||||
clusterpkg "github.com/argoproj/argo-cd/v3/pkg/apiclient/cluster"
|
||||
projectpkg "github.com/argoproj/argo-cd/v3/pkg/apiclient/project"
|
||||
"github.com/argoproj/argo-cd/v3/pkg/apiclient/settings"
|
||||
@@ -95,6 +99,7 @@ func NewApplicationCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comman
|
||||
command.AddCommand(NewApplicationTerminateOpCommand(clientOpts))
|
||||
command.AddCommand(NewApplicationEditCommand(clientOpts))
|
||||
command.AddCommand(NewApplicationPatchCommand(clientOpts))
|
||||
command.AddCommand(NewApplicationGetResourceCommand(clientOpts))
|
||||
command.AddCommand(NewApplicationPatchResourceCommand(clientOpts))
|
||||
command.AddCommand(NewApplicationDeleteResourceCommand(clientOpts))
|
||||
command.AddCommand(NewApplicationResourceActionsCommand(clientOpts))
|
||||
@@ -348,7 +353,7 @@ func NewApplicationGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Com
|
||||
command := &cobra.Command{
|
||||
Use: "get APPNAME",
|
||||
Short: "Get application details",
|
||||
Example: templates.Examples(`
|
||||
Example: templates.Examples(`
|
||||
# Get basic details about the application "my-app" in wide format
|
||||
argocd app get my-app -o wide
|
||||
|
||||
@@ -378,7 +383,7 @@ func NewApplicationGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Com
|
||||
|
||||
# Get application details and display them in a tree format
|
||||
argocd app get my-app --output tree
|
||||
|
||||
|
||||
# Get application details and display them in a detailed tree format
|
||||
argocd app get my-app --output tree=detailed
|
||||
`),
|
||||
@@ -536,7 +541,7 @@ func NewApplicationLogsCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
command := &cobra.Command{
|
||||
Use: "logs APPNAME",
|
||||
Short: "Get logs of application pods",
|
||||
Example: templates.Examples(`
|
||||
Example: templates.Examples(`
|
||||
# Get logs of pods associated with the application "my-app"
|
||||
argocd app logs my-app
|
||||
|
||||
@@ -850,7 +855,7 @@ func NewApplicationSetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Com
|
||||
command := &cobra.Command{
|
||||
Use: "set APPNAME",
|
||||
Short: "Set application parameters",
|
||||
Example: templates.Examples(`
|
||||
Example: templates.Examples(`
|
||||
# Set application parameters for the application "my-app"
|
||||
argocd app set my-app --parameter key1=value1 --parameter key2=value2
|
||||
|
||||
@@ -1281,6 +1286,7 @@ func NewApplicationDiffCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
revision string
|
||||
localRepoRoot string
|
||||
serverSideGenerate bool
|
||||
serverSideDiff bool
|
||||
localIncludes []string
|
||||
appNamespace string
|
||||
revisions []string
|
||||
@@ -1343,6 +1349,22 @@ func NewApplicationDiffCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
argoSettings, err := settingsIf.Get(ctx, &settings.SettingsQuery{})
|
||||
errors.CheckError(err)
|
||||
diffOption := &DifferenceOption{}
|
||||
|
||||
hasServerSideDiffAnnotation := resourceutil.HasAnnotationOption(app, argocommon.AnnotationCompareOptions, "ServerSideDiff=true")
|
||||
|
||||
// Use annotation if flag not explicitly set
|
||||
if !c.Flags().Changed("server-side-diff") {
|
||||
serverSideDiff = hasServerSideDiffAnnotation
|
||||
} else if serverSideDiff && !hasServerSideDiffAnnotation {
|
||||
// Flag explicitly set to true, but app annotation is not set
|
||||
fmt.Fprintf(os.Stderr, "Warning: Application does not have ServerSideDiff=true annotation.\n")
|
||||
}
|
||||
|
||||
// Server side diff with local requires server side generate to be set as there will be a mismatch with client-generated manifests.
|
||||
if serverSideDiff && local != "" && !serverSideGenerate {
|
||||
log.Fatal("--server-side-diff with --local requires --server-side-generate.")
|
||||
}
|
||||
|
||||
switch {
|
||||
case app.Spec.HasMultipleSources() && len(revisions) > 0 && len(sourcePositions) > 0:
|
||||
numOfSources := int64(len(app.Spec.GetSources()))
|
||||
@@ -1357,6 +1379,7 @@ func NewApplicationDiffCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
AppNamespace: &appNs,
|
||||
Revisions: revisions,
|
||||
SourcePositions: sourcePositions,
|
||||
NoCache: &hardRefresh,
|
||||
}
|
||||
res, err := appIf.GetManifests(ctx, &q)
|
||||
errors.CheckError(err)
|
||||
@@ -1368,6 +1391,7 @@ func NewApplicationDiffCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
Name: &appName,
|
||||
Revision: &revision,
|
||||
AppNamespace: &appNs,
|
||||
NoCache: &hardRefresh,
|
||||
}
|
||||
res, err := appIf.GetManifests(ctx, &q)
|
||||
errors.CheckError(err)
|
||||
@@ -1398,7 +1422,8 @@ func NewApplicationDiffCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
}
|
||||
}
|
||||
proj := getProject(ctx, c, clientOpts, app.Spec.Project)
|
||||
foundDiffs := findandPrintDiff(ctx, app, proj.Project, resources, argoSettings, diffOption, ignoreNormalizerOpts)
|
||||
|
||||
foundDiffs := findAndPrintDiff(ctx, app, proj.Project, resources, argoSettings, diffOption, ignoreNormalizerOpts, serverSideDiff, appIf, app.GetName(), app.GetNamespace())
|
||||
if foundDiffs && exitCode {
|
||||
os.Exit(diffExitCode)
|
||||
}
|
||||
@@ -1407,11 +1432,12 @@ func NewApplicationDiffCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
command.Flags().BoolVar(&refresh, "refresh", false, "Refresh application data when retrieving")
|
||||
command.Flags().BoolVar(&hardRefresh, "hard-refresh", false, "Refresh application data as well as target manifests cache")
|
||||
command.Flags().BoolVar(&exitCode, "exit-code", true, "Return non-zero exit code when there is a diff. May also return non-zero exit code if there is an error.")
|
||||
command.Flags().IntVar(&diffExitCode, "diff-exit-code", 1, "Return specified exit code when there is a diff. Typical error code is 20.")
|
||||
command.Flags().IntVar(&diffExitCode, "diff-exit-code", 1, "Return specified exit code when there is a diff. Typical error code is 20 but use another exit code if you want to differentiate from the generic exit code (20) returned by all CLI commands.")
|
||||
command.Flags().StringVar(&local, "local", "", "Compare live app to a local manifests")
|
||||
command.Flags().StringVar(&revision, "revision", "", "Compare live app to a particular revision")
|
||||
command.Flags().StringVar(&localRepoRoot, "local-repo-root", "/", "Path to the repository root. Used together with --local allows setting the repository root")
|
||||
command.Flags().BoolVar(&serverSideGenerate, "server-side-generate", false, "Used with --local, this will send your manifests to the server for diffing")
|
||||
command.Flags().BoolVar(&serverSideDiff, "server-side-diff", false, "Use server-side diff to calculate the diff. This will default to true if the ServerSideDiff annotation is set on the application.")
|
||||
command.Flags().StringArrayVar(&localIncludes, "local-include", []string{"*.yaml", "*.yml", "*.json"}, "Used with --server-side-generate, specify patterns of filenames to send. Matching is based on filename and not path.")
|
||||
command.Flags().StringVarP(&appNamespace, "app-namespace", "N", "", "Only render the difference in namespace")
|
||||
command.Flags().StringArrayVar(&revisions, "revisions", []string{}, "Show manifests at specific revisions for source position in source-positions")
|
||||
@@ -1421,6 +1447,101 @@ func NewApplicationDiffCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
return command
|
||||
}
|
||||
|
||||
// printResourceDiff prints the diff header and calls cli.PrintDiff for a resource
|
||||
func printResourceDiff(group, kind, namespace, name string, live, target *unstructured.Unstructured) {
|
||||
fmt.Printf("\n===== %s/%s %s/%s ======\n", group, kind, namespace, name)
|
||||
_ = cli.PrintDiff(name, live, target)
|
||||
}
|
||||
|
||||
// findAndPrintServerSideDiff performs a server-side diff by making requests to the api server and prints the response
|
||||
func findAndPrintServerSideDiff(ctx context.Context, app *argoappv1.Application, items []objKeyLiveTarget, resources *application.ManagedResourcesResponse, appIf application.ApplicationServiceClient, appName, appNs string) bool {
|
||||
// Process each item for server-side diff
|
||||
foundDiffs := false
|
||||
for _, item := range items {
|
||||
if item.target != nil && hook.IsHook(item.target) || item.live != nil && hook.IsHook(item.live) {
|
||||
continue
|
||||
}
|
||||
|
||||
// For server-side diff, we need to create aligned arrays for this specific resource
|
||||
var liveResource *argoappv1.ResourceDiff
|
||||
var targetManifest string
|
||||
|
||||
if item.live != nil {
|
||||
for _, res := range resources.Items {
|
||||
if res.Group == item.key.Group && res.Kind == item.key.Kind &&
|
||||
res.Namespace == item.key.Namespace && res.Name == item.key.Name {
|
||||
liveResource = res
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if liveResource == nil {
|
||||
// Create empty live resource for creation case
|
||||
liveResource = &argoappv1.ResourceDiff{
|
||||
Group: item.key.Group,
|
||||
Kind: item.key.Kind,
|
||||
Namespace: item.key.Namespace,
|
||||
Name: item.key.Name,
|
||||
LiveState: "",
|
||||
TargetState: "",
|
||||
Modified: true,
|
||||
}
|
||||
}
|
||||
|
||||
if item.target != nil {
|
||||
jsonBytes, err := json.Marshal(item.target)
|
||||
if err != nil {
|
||||
errors.CheckError(fmt.Errorf("error marshaling target object: %w", err))
|
||||
}
|
||||
targetManifest = string(jsonBytes)
|
||||
}
|
||||
|
||||
// Call server-side diff for this individual resource
|
||||
serverSideDiffQuery := &application.ApplicationServerSideDiffQuery{
|
||||
AppName: &appName,
|
||||
AppNamespace: &appNs,
|
||||
Project: &app.Spec.Project,
|
||||
LiveResources: []*argoappv1.ResourceDiff{liveResource},
|
||||
TargetManifests: []string{targetManifest},
|
||||
}
|
||||
|
||||
serverSideDiffRes, err := appIf.ServerSideDiff(ctx, serverSideDiffQuery)
|
||||
if err != nil {
|
||||
errors.CheckError(err)
|
||||
}
|
||||
|
||||
// Extract diff for this resource
|
||||
for _, resultItem := range serverSideDiffRes.Items {
|
||||
if resultItem.Hook || (!resultItem.Modified && resultItem.TargetState != "" && resultItem.LiveState != "") {
|
||||
continue
|
||||
}
|
||||
|
||||
if resultItem.Modified || resultItem.TargetState == "" || resultItem.LiveState == "" {
|
||||
var live, target *unstructured.Unstructured
|
||||
|
||||
if resultItem.TargetState != "" && resultItem.TargetState != "null" {
|
||||
target = &unstructured.Unstructured{}
|
||||
err = json.Unmarshal([]byte(resultItem.TargetState), target)
|
||||
errors.CheckError(err)
|
||||
}
|
||||
|
||||
if resultItem.LiveState != "" && resultItem.LiveState != "null" {
|
||||
live = &unstructured.Unstructured{}
|
||||
err = json.Unmarshal([]byte(resultItem.LiveState), live)
|
||||
errors.CheckError(err)
|
||||
}
|
||||
|
||||
// Print resulting diff for this resource
|
||||
foundDiffs = true
|
||||
printResourceDiff(resultItem.Group, resultItem.Kind, resultItem.Namespace, resultItem.Name, live, target)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return foundDiffs
|
||||
}
|
||||
|
||||
// DifferenceOption struct to store diff options
|
||||
type DifferenceOption struct {
|
||||
local string
|
||||
@@ -1432,47 +1553,15 @@ type DifferenceOption struct {
|
||||
revisions []string
|
||||
}
|
||||
|
||||
// findandPrintDiff ... Prints difference between application current state and state stored in git or locally, returns boolean as true if difference is found else returns false
|
||||
func findandPrintDiff(ctx context.Context, app *argoappv1.Application, proj *argoappv1.AppProject, resources *application.ManagedResourcesResponse, argoSettings *settings.Settings, diffOptions *DifferenceOption, ignoreNormalizerOpts normalizers.IgnoreNormalizerOpts) bool {
|
||||
// findAndPrintDiff ... Prints difference between application current state and state stored in git or locally, returns boolean as true if difference is found else returns false
|
||||
func findAndPrintDiff(ctx context.Context, app *argoappv1.Application, proj *argoappv1.AppProject, resources *application.ManagedResourcesResponse, argoSettings *settings.Settings, diffOptions *DifferenceOption, ignoreNormalizerOpts normalizers.IgnoreNormalizerOpts, useServerSideDiff bool, appIf application.ApplicationServiceClient, appName, appNs string) bool {
|
||||
var foundDiffs bool
|
||||
liveObjs, err := cmdutil.LiveObjects(resources.Items)
|
||||
|
||||
items, err := prepareObjectsForDiff(ctx, app, proj, resources, argoSettings, diffOptions)
|
||||
errors.CheckError(err)
|
||||
items := make([]objKeyLiveTarget, 0)
|
||||
switch {
|
||||
case diffOptions.local != "":
|
||||
localObjs := groupObjsByKey(getLocalObjects(ctx, app, proj, diffOptions.local, diffOptions.localRepoRoot, argoSettings.AppLabelKey, diffOptions.cluster.Info.ServerVersion, diffOptions.cluster.Info.APIVersions, argoSettings.KustomizeOptions, argoSettings.TrackingMethod), liveObjs, app.Spec.Destination.Namespace)
|
||||
items = groupObjsForDiff(resources, localObjs, items, argoSettings, app.InstanceName(argoSettings.ControllerNamespace), app.Spec.Destination.Namespace)
|
||||
case diffOptions.revision != "" || len(diffOptions.revisions) > 0:
|
||||
var unstructureds []*unstructured.Unstructured
|
||||
for _, mfst := range diffOptions.res.Manifests {
|
||||
obj, err := argoappv1.UnmarshalToUnstructured(mfst)
|
||||
errors.CheckError(err)
|
||||
unstructureds = append(unstructureds, obj)
|
||||
}
|
||||
groupedObjs := groupObjsByKey(unstructureds, liveObjs, app.Spec.Destination.Namespace)
|
||||
items = groupObjsForDiff(resources, groupedObjs, items, argoSettings, app.InstanceName(argoSettings.ControllerNamespace), app.Spec.Destination.Namespace)
|
||||
case diffOptions.serversideRes != nil:
|
||||
var unstructureds []*unstructured.Unstructured
|
||||
for _, mfst := range diffOptions.serversideRes.Manifests {
|
||||
obj, err := argoappv1.UnmarshalToUnstructured(mfst)
|
||||
errors.CheckError(err)
|
||||
unstructureds = append(unstructureds, obj)
|
||||
}
|
||||
groupedObjs := groupObjsByKey(unstructureds, liveObjs, app.Spec.Destination.Namespace)
|
||||
items = groupObjsForDiff(resources, groupedObjs, items, argoSettings, app.InstanceName(argoSettings.ControllerNamespace), app.Spec.Destination.Namespace)
|
||||
default:
|
||||
for i := range resources.Items {
|
||||
res := resources.Items[i]
|
||||
live := &unstructured.Unstructured{}
|
||||
err := json.Unmarshal([]byte(res.NormalizedLiveState), &live)
|
||||
errors.CheckError(err)
|
||||
|
||||
target := &unstructured.Unstructured{}
|
||||
err = json.Unmarshal([]byte(res.TargetState), &target)
|
||||
errors.CheckError(err)
|
||||
|
||||
items = append(items, objKeyLiveTarget{kube.NewResourceKey(res.Group, res.Kind, res.Namespace, res.Name), live, target})
|
||||
}
|
||||
if useServerSideDiff {
|
||||
return findAndPrintServerSideDiff(ctx, app, items, resources, appIf, appName, appNs)
|
||||
}
|
||||
|
||||
for _, item := range items {
|
||||
@@ -1499,7 +1588,6 @@ func findandPrintDiff(ctx context.Context, app *argoappv1.Application, proj *arg
|
||||
errors.CheckError(err)
|
||||
|
||||
if diffRes.Modified || item.target == nil || item.live == nil {
|
||||
fmt.Printf("\n===== %s/%s %s/%s ======\n", item.key.Group, item.key.Kind, item.key.Namespace, item.key.Name)
|
||||
var live *unstructured.Unstructured
|
||||
var target *unstructured.Unstructured
|
||||
if item.target != nil && item.live != nil {
|
||||
@@ -1511,10 +1599,8 @@ func findandPrintDiff(ctx context.Context, app *argoappv1.Application, proj *arg
|
||||
live = item.live
|
||||
target = item.target
|
||||
}
|
||||
if !foundDiffs {
|
||||
foundDiffs = true
|
||||
}
|
||||
_ = cli.PrintDiff(item.key.Name, live, target)
|
||||
foundDiffs = true
|
||||
printResourceDiff(item.key.Group, item.key.Kind, item.key.Namespace, item.key.Name, live, target)
|
||||
}
|
||||
}
|
||||
return foundDiffs
|
||||
@@ -2001,6 +2087,7 @@ func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
applyOutOfSyncOnly bool
|
||||
async bool
|
||||
retryLimit int64
|
||||
retryRefresh bool
|
||||
retryBackoffDuration time.Duration
|
||||
retryBackoffMaxDuration time.Duration
|
||||
retryBackoffFactor int64
|
||||
@@ -2272,9 +2359,10 @@ func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
default:
|
||||
log.Fatalf("Unknown sync strategy: '%s'", strategy)
|
||||
}
|
||||
if retryLimit > 0 {
|
||||
if retryLimit != 0 {
|
||||
syncReq.RetryStrategy = &argoappv1.RetryStrategy{
|
||||
Limit: retryLimit,
|
||||
Limit: retryLimit,
|
||||
Refresh: retryRefresh,
|
||||
Backoff: &argoappv1.Backoff{
|
||||
Duration: retryBackoffDuration.String(),
|
||||
MaxDuration: retryBackoffMaxDuration.String(),
|
||||
@@ -2296,7 +2384,11 @@ func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
fmt.Printf("====== Previewing differences between live and desired state of application %s ======\n", appQualifiedName)
|
||||
|
||||
proj := getProject(ctx, c, clientOpts, app.Spec.Project)
|
||||
foundDiffs = findandPrintDiff(ctx, app, proj.Project, resources, argoSettings, diffOption, ignoreNormalizerOpts)
|
||||
|
||||
// Check if application has ServerSideDiff annotation
|
||||
serverSideDiff := resourceutil.HasAnnotationOption(app, argocommon.AnnotationCompareOptions, "ServerSideDiff=true")
|
||||
|
||||
foundDiffs = findAndPrintDiff(ctx, app, proj.Project, resources, argoSettings, diffOption, ignoreNormalizerOpts, serverSideDiff, appIf, appName, appNs)
|
||||
if !foundDiffs {
|
||||
fmt.Printf("====== No Differences found ======\n")
|
||||
// if no differences found, then no need to sync
|
||||
@@ -2339,6 +2431,7 @@ func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
command.Flags().StringArrayVar(&labels, "label", []string{}, "Sync only specific resources with a label. This option may be specified repeatedly.")
|
||||
command.Flags().UintVar(&timeout, "timeout", defaultCheckTimeoutSeconds, "Time out after this many seconds")
|
||||
command.Flags().Int64Var(&retryLimit, "retry-limit", 0, "Max number of allowed sync retries")
|
||||
command.Flags().BoolVar(&retryRefresh, "retry-refresh", false, "Indicates if the latest revision should be used on retry instead of the initial one")
|
||||
command.Flags().DurationVar(&retryBackoffDuration, "retry-backoff-duration", argoappv1.DefaultSyncRetryDuration, "Retry backoff base duration. Input needs to be a duration (e.g. 2m, 1h)")
|
||||
command.Flags().DurationVar(&retryBackoffMaxDuration, "retry-backoff-max-duration", argoappv1.DefaultSyncRetryMaxDuration, "Max retry backoff duration. Input needs to be a duration (e.g. 2m, 1h)")
|
||||
command.Flags().Int64Var(&retryBackoffFactor, "retry-backoff-factor", argoappv1.DefaultSyncRetryFactor, "Factor multiplies the base duration after each failed retry")
|
||||
@@ -3396,7 +3489,7 @@ func NewApplicationRemoveSourceCommand(clientOpts *argocdclient.ClientOptions) *
|
||||
Short: "Remove a source from multiple sources application.",
|
||||
Example: ` # Remove the source at position 1 from application's sources. Counting starts at 1.
|
||||
argocd app remove-source myapplication --source-position 1
|
||||
|
||||
|
||||
# Remove the source named "test" from application's sources.
|
||||
argocd app remove-source myapplication --source-name test`,
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
@@ -3519,3 +3612,60 @@ func NewApplicationConfirmDeletionCommand(clientOpts *argocdclient.ClientOptions
|
||||
command.Flags().StringVarP(&appNamespace, "app-namespace", "N", "", "Namespace of the target application where the source will be appended")
|
||||
return command
|
||||
}
|
||||
|
||||
// prepareObjectsForDiff prepares objects for diffing using the switch statement
|
||||
// to handle different diff options and building the objKeyLiveTarget items
|
||||
func prepareObjectsForDiff(ctx context.Context, app *argoappv1.Application, proj *argoappv1.AppProject, resources *application.ManagedResourcesResponse, argoSettings *settings.Settings, diffOptions *DifferenceOption) ([]objKeyLiveTarget, error) {
|
||||
liveObjs, err := cmdutil.LiveObjects(resources.Items)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
items := make([]objKeyLiveTarget, 0)
|
||||
|
||||
switch {
|
||||
case diffOptions.local != "":
|
||||
localObjs := groupObjsByKey(getLocalObjects(ctx, app, proj, diffOptions.local, diffOptions.localRepoRoot, argoSettings.AppLabelKey, diffOptions.cluster.Info.ServerVersion, diffOptions.cluster.Info.APIVersions, argoSettings.KustomizeOptions, argoSettings.TrackingMethod), liveObjs, app.Spec.Destination.Namespace)
|
||||
items = groupObjsForDiff(resources, localObjs, items, argoSettings, app.InstanceName(argoSettings.ControllerNamespace), app.Spec.Destination.Namespace)
|
||||
case diffOptions.revision != "" || len(diffOptions.revisions) > 0:
|
||||
var unstructureds []*unstructured.Unstructured
|
||||
for _, mfst := range diffOptions.res.Manifests {
|
||||
obj, err := argoappv1.UnmarshalToUnstructured(mfst)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
unstructureds = append(unstructureds, obj)
|
||||
}
|
||||
groupedObjs := groupObjsByKey(unstructureds, liveObjs, app.Spec.Destination.Namespace)
|
||||
items = groupObjsForDiff(resources, groupedObjs, items, argoSettings, app.InstanceName(argoSettings.ControllerNamespace), app.Spec.Destination.Namespace)
|
||||
case diffOptions.serversideRes != nil:
|
||||
var unstructureds []*unstructured.Unstructured
|
||||
for _, mfst := range diffOptions.serversideRes.Manifests {
|
||||
obj, err := argoappv1.UnmarshalToUnstructured(mfst)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
unstructureds = append(unstructureds, obj)
|
||||
}
|
||||
groupedObjs := groupObjsByKey(unstructureds, liveObjs, app.Spec.Destination.Namespace)
|
||||
items = groupObjsForDiff(resources, groupedObjs, items, argoSettings, app.InstanceName(argoSettings.ControllerNamespace), app.Spec.Destination.Namespace)
|
||||
default:
|
||||
for i := range resources.Items {
|
||||
res := resources.Items[i]
|
||||
live := &unstructured.Unstructured{}
|
||||
err := json.Unmarshal([]byte(res.NormalizedLiveState), &live)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
target := &unstructured.Unstructured{}
|
||||
err = json.Unmarshal([]byte(res.TargetState), &target)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
items = append(items, objKeyLiveTarget{kube.NewResourceKey(res.Group, res.Kind, res.Namespace, res.Name), live, target})
|
||||
}
|
||||
}
|
||||
|
||||
return items, nil
|
||||
}
|
||||
|
||||
@@ -8,23 +8,23 @@ import (
|
||||
"strconv"
|
||||
"text/tabwriter"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/util/templates"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/cmd/util"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"google.golang.org/grpc/codes"
|
||||
"k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/yaml"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/cmd/argocd/commands/headless"
|
||||
"github.com/argoproj/argo-cd/v3/cmd/util"
|
||||
argocdclient "github.com/argoproj/argo-cd/v3/pkg/apiclient"
|
||||
applicationpkg "github.com/argoproj/argo-cd/v3/pkg/apiclient/application"
|
||||
"github.com/argoproj/argo-cd/v3/pkg/apis/application"
|
||||
"github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v3/util/argo"
|
||||
"github.com/argoproj/argo-cd/v3/util/errors"
|
||||
"github.com/argoproj/argo-cd/v3/util/grpc"
|
||||
utilio "github.com/argoproj/argo-cd/v3/util/io"
|
||||
"github.com/argoproj/argo-cd/v3/util/templates"
|
||||
)
|
||||
|
||||
type DisplayedAction struct {
|
||||
@@ -192,7 +192,26 @@ func NewApplicationResourceActionsRunCommand(clientOpts *argocdclient.ClientOpti
|
||||
obj := filteredObjects[i]
|
||||
gvk := obj.GroupVersionKind()
|
||||
objResourceName := obj.GetName()
|
||||
_, err := appIf.RunResourceAction(ctx, &applicationpkg.ResourceActionRunRequest{
|
||||
_, err := appIf.RunResourceActionV2(ctx, &applicationpkg.ResourceActionRunRequestV2{
|
||||
Name: &appName,
|
||||
AppNamespace: &appNs,
|
||||
Namespace: ptr.To(obj.GetNamespace()),
|
||||
ResourceName: ptr.To(objResourceName),
|
||||
Group: ptr.To(gvk.Group),
|
||||
Kind: ptr.To(gvk.Kind),
|
||||
Version: ptr.To(gvk.GroupVersion().Version),
|
||||
Action: ptr.To(actionName),
|
||||
// TODO: add support for parameters
|
||||
})
|
||||
if err == nil {
|
||||
continue
|
||||
}
|
||||
if grpc.UnwrapGRPCStatus(err).Code() != codes.Unimplemented {
|
||||
errors.CheckError(err)
|
||||
}
|
||||
fmt.Println("RunResourceActionV2 is not supported by the server, falling back to RunResourceAction.")
|
||||
//nolint:staticcheck // RunResourceAction is deprecated, but we still need to support it for backward compatibility.
|
||||
_, err = appIf.RunResourceAction(ctx, &applicationpkg.ResourceActionRunRequest{
|
||||
Name: &appName,
|
||||
AppNamespace: &appNs,
|
||||
Namespace: ptr.To(obj.GetNamespace()),
|
||||
|
||||
@@ -2,11 +2,14 @@ package commands
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"strings"
|
||||
"testing"
|
||||
"text/tabwriter"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
|
||||
)
|
||||
@@ -117,3 +120,561 @@ func TestPrintResourcesTree(t *testing.T) {
|
||||
|
||||
assert.Equal(t, expectation, output)
|
||||
}
|
||||
|
||||
func TestFilterFieldsFromObject(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
obj unstructured.Unstructured
|
||||
filteredFields []string
|
||||
expectedFields []string
|
||||
unexpectedFields []string
|
||||
}{
|
||||
{
|
||||
name: "filter nested field",
|
||||
obj: unstructured.Unstructured{
|
||||
Object: map[string]any{
|
||||
"apiVersion": "vX",
|
||||
"kind": "kind",
|
||||
"metadata": map[string]any{
|
||||
"name": "test",
|
||||
},
|
||||
"spec": map[string]any{
|
||||
"testfield": map[string]any{
|
||||
"nestedtest": "test",
|
||||
},
|
||||
"testfield2": "test",
|
||||
},
|
||||
},
|
||||
},
|
||||
filteredFields: []string{"spec.testfield.nestedtest"},
|
||||
expectedFields: []string{"spec.testfield.nestedtest"},
|
||||
unexpectedFields: []string{"spec.testfield2"},
|
||||
},
|
||||
{
|
||||
name: "filter multiple fields",
|
||||
obj: unstructured.Unstructured{
|
||||
Object: map[string]any{
|
||||
"apiVersion": "vX",
|
||||
"kind": "kind",
|
||||
"metadata": map[string]any{
|
||||
"name": "test",
|
||||
},
|
||||
"spec": map[string]any{
|
||||
"testfield": map[string]any{
|
||||
"nestedtest": "test",
|
||||
},
|
||||
"testfield2": "test",
|
||||
"testfield3": "deleteme",
|
||||
},
|
||||
},
|
||||
},
|
||||
filteredFields: []string{"spec.testfield.nestedtest", "spec.testfield3"},
|
||||
expectedFields: []string{"spec.testfield.nestedtest"},
|
||||
unexpectedFields: []string{"spec.testfield2"},
|
||||
},
|
||||
{
|
||||
name: "filter nested list object",
|
||||
obj: unstructured.Unstructured{
|
||||
Object: map[string]any{
|
||||
"apiVersion": "vX",
|
||||
"kind": "kind",
|
||||
"metadata": map[string]any{
|
||||
"name": "test",
|
||||
},
|
||||
"spec": map[string]any{
|
||||
"testfield": map[string]any{
|
||||
"nestedtest": "test",
|
||||
},
|
||||
"testfield2": "test",
|
||||
},
|
||||
},
|
||||
},
|
||||
filteredFields: []string{"spec.testfield.nestedtest"},
|
||||
expectedFields: []string{"spec.testfield.nestedtest"},
|
||||
unexpectedFields: []string{"spec.testfield2"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tt.obj.SetName("test-object")
|
||||
|
||||
filtered := filterFieldsFromObject(&tt.obj, tt.filteredFields)
|
||||
|
||||
for _, field := range tt.expectedFields {
|
||||
fieldPath := strings.Split(field, ".")
|
||||
_, exists, err := unstructured.NestedFieldCopy(filtered.Object, fieldPath...)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, exists, "Expected field %s to exist", field)
|
||||
}
|
||||
|
||||
for _, field := range tt.unexpectedFields {
|
||||
fieldPath := strings.Split(field, ".")
|
||||
_, exists, err := unstructured.NestedFieldCopy(filtered.Object, fieldPath...)
|
||||
require.NoError(t, err)
|
||||
assert.False(t, exists, "Expected field %s to not exist", field)
|
||||
}
|
||||
|
||||
assert.Equal(t, tt.obj.GetName(), filtered.GetName())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractNestedItem(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
obj map[string]any
|
||||
fields []string
|
||||
depth int
|
||||
expected map[string]any
|
||||
}{
|
||||
{
|
||||
name: "extract simple nested item",
|
||||
obj: map[string]any{
|
||||
"listofitems": []any{
|
||||
map[string]any{
|
||||
"extract": "123",
|
||||
"dontextract": "abc",
|
||||
},
|
||||
map[string]any{
|
||||
"extract": "456",
|
||||
"dontextract": "def",
|
||||
},
|
||||
map[string]any{
|
||||
"extract": "789",
|
||||
"dontextract": "ghi",
|
||||
},
|
||||
},
|
||||
},
|
||||
fields: []string{"listofitems", "extract"},
|
||||
depth: 0,
|
||||
expected: map[string]any{
|
||||
"listofitems": []any{
|
||||
map[string]any{
|
||||
"extract": "123",
|
||||
},
|
||||
map[string]any{
|
||||
"extract": "456",
|
||||
},
|
||||
map[string]any{
|
||||
"extract": "789",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "double nested list of objects",
|
||||
obj: map[string]any{
|
||||
"listofitems": []any{
|
||||
map[string]any{
|
||||
"doublenested": []any{
|
||||
map[string]any{
|
||||
"extract": "123",
|
||||
},
|
||||
},
|
||||
"dontextract": "abc",
|
||||
},
|
||||
map[string]any{
|
||||
"doublenested": []any{
|
||||
map[string]any{
|
||||
"extract": "456",
|
||||
},
|
||||
},
|
||||
"dontextract": "def",
|
||||
},
|
||||
map[string]any{
|
||||
"doublenested": []any{
|
||||
map[string]any{
|
||||
"extract": "789",
|
||||
},
|
||||
},
|
||||
"dontextract": "ghi",
|
||||
},
|
||||
},
|
||||
},
|
||||
fields: []string{"listofitems", "doublenested", "extract"},
|
||||
depth: 0,
|
||||
expected: map[string]any{
|
||||
"listofitems": []any{
|
||||
map[string]any{
|
||||
"doublenested": []any{
|
||||
map[string]any{
|
||||
"extract": "123",
|
||||
},
|
||||
},
|
||||
},
|
||||
map[string]any{
|
||||
"doublenested": []any{
|
||||
map[string]any{
|
||||
"extract": "456",
|
||||
},
|
||||
},
|
||||
},
|
||||
map[string]any{
|
||||
"doublenested": []any{
|
||||
map[string]any{
|
||||
"extract": "789",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "depth is greater then list of field size",
|
||||
obj: map[string]any{"test1": "1234567890"},
|
||||
fields: []string{"test1"},
|
||||
depth: 4,
|
||||
expected: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
filteredObj := extractNestedItem(tt.obj, tt.fields, tt.depth)
|
||||
assert.Equal(t, tt.expected, filteredObj, "Did not get the correct filtered obj")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractItemsFromList(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
list []any
|
||||
fields []string
|
||||
expected []any
|
||||
}{
|
||||
{
|
||||
name: "test simple field",
|
||||
list: []any{
|
||||
map[string]any{"extract": "value1", "dontextract": "valueA"},
|
||||
map[string]any{"extract": "value2", "dontextract": "valueB"},
|
||||
map[string]any{"extract": "value3", "dontextract": "valueC"},
|
||||
},
|
||||
fields: []string{"extract"},
|
||||
expected: []any{
|
||||
map[string]any{"extract": "value1"},
|
||||
map[string]any{"extract": "value2"},
|
||||
map[string]any{"extract": "value3"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "test simple field with some depth",
|
||||
list: []any{
|
||||
map[string]any{
|
||||
"test1": map[string]any{
|
||||
"test2": map[string]any{
|
||||
"extract": "123",
|
||||
"dontextract": "abc",
|
||||
},
|
||||
},
|
||||
},
|
||||
map[string]any{
|
||||
"test1": map[string]any{
|
||||
"test2": map[string]any{
|
||||
"extract": "456",
|
||||
"dontextract": "def",
|
||||
},
|
||||
},
|
||||
},
|
||||
map[string]any{
|
||||
"test1": map[string]any{
|
||||
"test2": map[string]any{
|
||||
"extract": "789",
|
||||
"dontextract": "ghi",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
fields: []string{"test1", "test2", "extract"},
|
||||
expected: []any{
|
||||
map[string]any{
|
||||
"test1": map[string]any{
|
||||
"test2": map[string]any{
|
||||
"extract": "123",
|
||||
},
|
||||
},
|
||||
},
|
||||
map[string]any{
|
||||
"test1": map[string]any{
|
||||
"test2": map[string]any{
|
||||
"extract": "456",
|
||||
},
|
||||
},
|
||||
},
|
||||
map[string]any{
|
||||
"test1": map[string]any{
|
||||
"test2": map[string]any{
|
||||
"extract": "789",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "test a missing field",
|
||||
list: []any{
|
||||
map[string]any{"test1": "123"},
|
||||
map[string]any{"test1": "456"},
|
||||
map[string]any{"test1": "789"},
|
||||
},
|
||||
fields: []string{"test2"},
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
name: "test getting an object",
|
||||
list: []any{
|
||||
map[string]any{
|
||||
"extract": map[string]any{
|
||||
"keyA": "valueA",
|
||||
"keyB": "valueB",
|
||||
"keyC": "valueC",
|
||||
},
|
||||
"dontextract": map[string]any{
|
||||
"key1": "value1",
|
||||
"key2": "value2",
|
||||
"key3": "value3",
|
||||
},
|
||||
},
|
||||
map[string]any{
|
||||
"extract": map[string]any{
|
||||
"keyD": "valueD",
|
||||
"keyE": "valueE",
|
||||
"keyF": "valueF",
|
||||
},
|
||||
"dontextract": map[string]any{
|
||||
"key4": "value4",
|
||||
"key5": "value5",
|
||||
"key6": "value6",
|
||||
},
|
||||
},
|
||||
},
|
||||
fields: []string{"extract"},
|
||||
expected: []any{
|
||||
map[string]any{
|
||||
"extract": map[string]any{
|
||||
"keyA": "valueA",
|
||||
"keyB": "valueB",
|
||||
"keyC": "valueC",
|
||||
},
|
||||
},
|
||||
map[string]any{
|
||||
"extract": map[string]any{
|
||||
"keyD": "valueD",
|
||||
"keyE": "valueE",
|
||||
"keyF": "valueF",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
extractedList := extractItemsFromList(tt.list, tt.fields)
|
||||
assert.Equal(t, tt.expected, extractedList, "Lists were not equal")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestReconstructObject(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
extracted []any
|
||||
fields []string
|
||||
depth int
|
||||
expected map[string]any
|
||||
}{
|
||||
{
|
||||
name: "simple single field at depth 0",
|
||||
extracted: []any{"value1", "value2"},
|
||||
fields: []string{"items"},
|
||||
depth: 0,
|
||||
expected: map[string]any{
|
||||
"items": []any{"value1", "value2"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "object nested at depth 1",
|
||||
extracted: []any{map[string]any{"key": "value"}},
|
||||
fields: []string{"test1", "test2"},
|
||||
depth: 1,
|
||||
expected: map[string]any{
|
||||
"test1": map[string]any{
|
||||
"test2": []any{map[string]any{"key": "value"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "empty list of extracted items",
|
||||
extracted: []any{},
|
||||
fields: []string{"test1"},
|
||||
depth: 0,
|
||||
expected: map[string]any{
|
||||
"test1": []any{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "complex object nesteed at depth 2",
|
||||
extracted: []any{map[string]any{
|
||||
"obj1": map[string]any{
|
||||
"key1": "value1",
|
||||
"key2": "value2",
|
||||
},
|
||||
"obj2": map[string]any{
|
||||
"keyA": "valueA",
|
||||
"keyB": "valueB",
|
||||
},
|
||||
}},
|
||||
fields: []string{"test1", "test2", "test3"},
|
||||
depth: 2,
|
||||
expected: map[string]any{
|
||||
"test1": map[string]any{
|
||||
"test2": map[string]any{
|
||||
"test3": []any{
|
||||
map[string]any{
|
||||
"obj1": map[string]any{
|
||||
"key1": "value1",
|
||||
"key2": "value2",
|
||||
},
|
||||
"obj2": map[string]any{
|
||||
"keyA": "valueA",
|
||||
"keyB": "valueB",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
filteredObj := reconstructObject(tt.extracted, tt.fields, tt.depth)
|
||||
assert.Equal(t, tt.expected, filteredObj, "objects were not equal")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrintManifests(t *testing.T) {
|
||||
obj := unstructured.Unstructured{
|
||||
Object: map[string]any{
|
||||
"apiVersion": "vX",
|
||||
"kind": "test",
|
||||
"metadata": map[string]any{
|
||||
"name": "unit-test",
|
||||
},
|
||||
"spec": map[string]any{
|
||||
"testfield": "testvalue",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
expectedYAML := `apiVersion: vX
|
||||
kind: test
|
||||
metadata:
|
||||
name: unit-test
|
||||
spec:
|
||||
testfield: testvalue
|
||||
`
|
||||
|
||||
output, _ := captureOutput(func() error {
|
||||
printManifests(&[]unstructured.Unstructured{obj}, false, true, "yaml")
|
||||
return nil
|
||||
})
|
||||
assert.Equal(t, expectedYAML+"\n", output, "Incorrect yaml output for printManifests")
|
||||
|
||||
output, _ = captureOutput(func() error {
|
||||
printManifests(&[]unstructured.Unstructured{obj, obj}, false, true, "yaml")
|
||||
return nil
|
||||
})
|
||||
assert.Equal(t, expectedYAML+"\n---\n"+expectedYAML+"\n", output, "Incorrect yaml output with multiple objs.")
|
||||
|
||||
expectedJSON := `{
|
||||
"apiVersion": "vX",
|
||||
"kind": "test",
|
||||
"metadata": {
|
||||
"name": "unit-test"
|
||||
},
|
||||
"spec": {
|
||||
"testfield": "testvalue"
|
||||
}
|
||||
}`
|
||||
|
||||
output, _ = captureOutput(func() error {
|
||||
printManifests(&[]unstructured.Unstructured{obj}, false, true, "json")
|
||||
return nil
|
||||
})
|
||||
assert.Equal(t, expectedJSON+"\n", output, "Incorrect json output.")
|
||||
|
||||
output, _ = captureOutput(func() error {
|
||||
printManifests(&[]unstructured.Unstructured{obj, obj}, false, true, "json")
|
||||
return nil
|
||||
})
|
||||
assert.Equal(t, expectedJSON+"\n---\n"+expectedJSON+"\n", output, "Incorrect json output with multiple objs.")
|
||||
|
||||
output, _ = captureOutput(func() error {
|
||||
printManifests(&[]unstructured.Unstructured{obj}, true, true, "wide")
|
||||
return nil
|
||||
})
|
||||
assert.Contains(t, output, "FIELD RESOURCE NAME VALUE", "Missing or incorrect header line for table print with showing names.")
|
||||
assert.Contains(t, output, "apiVersion unit-test vX", "Missing or incorrect row in table related to apiVersion with showing names.")
|
||||
assert.Contains(t, output, "kind unit-test test", "Missing or incorrect line in the table related to kind with showing names.")
|
||||
assert.Contains(t, output, "spec.testfield unit-test testvalue", "Missing or incorrect line in the table related to spec.testfield with showing names.")
|
||||
assert.NotContains(t, output, "metadata.name unit-test testvalue", "Missing or incorrect line in the table related to metadata.name with showing names.")
|
||||
|
||||
output, _ = captureOutput(func() error {
|
||||
printManifests(&[]unstructured.Unstructured{obj}, true, false, "wide")
|
||||
return nil
|
||||
})
|
||||
assert.Contains(t, output, "FIELD VALUE", "Missing or incorrect header line for table print with not showing names.")
|
||||
assert.Contains(t, output, "apiVersion vX", "Missing or incorrect row in table related to apiVersion with not showing names.")
|
||||
assert.Contains(t, output, "kind test", "Missing or incorrect row in the table related to kind with not showing names.")
|
||||
assert.Contains(t, output, "spec.testfield testvalue", "Missing or incorrect row in the table related to spec.testefield with not showing names.")
|
||||
assert.NotContains(t, output, "metadata.name testvalue", "Missing or incorrect row in the tbale related to metadata.name with not showing names.")
|
||||
}
|
||||
|
||||
func TestPrintManifests_FilterNestedListObject_Wide(t *testing.T) {
|
||||
obj := unstructured.Unstructured{
|
||||
Object: map[string]any{
|
||||
"apiVersion": "vX",
|
||||
"kind": "kind",
|
||||
"metadata": map[string]any{
|
||||
"name": "unit-test",
|
||||
},
|
||||
"status": map[string]any{
|
||||
"podIPs": []map[string]any{
|
||||
{
|
||||
"IP": "127.0.0.1",
|
||||
},
|
||||
{
|
||||
"IP": "127.0.0.2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
output, _ := captureOutput(func() error {
|
||||
v, err := json.Marshal(&obj)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var obj2 *unstructured.Unstructured
|
||||
err = json.Unmarshal([]byte(v), &obj2)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
printManifests(&[]unstructured.Unstructured{*obj2}, false, true, "wide")
|
||||
return nil
|
||||
})
|
||||
|
||||
// Verify table header
|
||||
assert.Contains(t, output, "FIELD RESOURCE NAME VALUE", "Missing a line in the table")
|
||||
assert.Contains(t, output, "apiVersion unit-test vX", "Test for apiVersion field failed for wide output")
|
||||
assert.Contains(t, output, "kind unit-test kind", "Test for kind field failed for wide output")
|
||||
assert.Contains(t, output, "status.podIPs[0].IP unit-test 127.0.0.1", "Test for podIP array index 0 field failed for wide output")
|
||||
assert.Contains(t, output, "status.podIPs[1].IP unit-test 127.0.0.2", "Test for podIP array index 1 field failed for wide output")
|
||||
}
|
||||
|
||||
@@ -1,16 +1,22 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
|
||||
"gopkg.in/yaml.v3"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/cmd/argocd/commands/utils"
|
||||
"github.com/argoproj/argo-cd/v3/cmd/util"
|
||||
"github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
@@ -22,15 +28,273 @@ import (
|
||||
utilio "github.com/argoproj/argo-cd/v3/util/io"
|
||||
)
|
||||
|
||||
// NewApplicationGetResourceCommand returns a new instance of the `app get-resource` command
|
||||
func NewApplicationGetResourceCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
var (
|
||||
resourceName string
|
||||
kind string
|
||||
project string
|
||||
filteredFields []string
|
||||
showManagedFields bool
|
||||
output string
|
||||
)
|
||||
command := &cobra.Command{
|
||||
Use: "get-resource APPNAME",
|
||||
Short: "Get details about the live Kubernetes manifests of a resource in an application. The filter-fields flag can be used to only display fields you want to see.",
|
||||
Example: `
|
||||
# Get a specific resource, Pod my-app-pod, in 'my-app' by name in wide format
|
||||
argocd app get-resource my-app --kind Pod --resource-name my-app-pod
|
||||
|
||||
# Get a specific resource, Pod my-app-pod, in 'my-app' by name in yaml format
|
||||
argocd app get-resource my-app --kind Pod --resource-name my-app-pod -o yaml
|
||||
|
||||
# Get a specific resource, Pod my-app-pod, in 'my-app' by name in json format
|
||||
argocd app get-resource my-app --kind Pod --resource-name my-app-pod -o json
|
||||
|
||||
# Get details about all Pods in the application
|
||||
argocd app get-resource my-app --kind Pod
|
||||
|
||||
# Get a specific resource with managed fields, Pod my-app-pod, in 'my-app' by name in wide format
|
||||
argocd app get-resource my-app --kind Pod --resource-name my-app-pod --show-managed-fields
|
||||
|
||||
# Get the the details of a specific field in a resource in 'my-app' in the wide format
|
||||
argocd app get-resource my-app --kind Pod --filter-fields status.podIP
|
||||
|
||||
# Get the details of multiple specific fields in a specific resource in 'my-app' in the wide format
|
||||
argocd app get-resource my-app --kind Pod --resource-name my-app-pod --filter-fields status.podIP,status.hostIP`,
|
||||
}
|
||||
|
||||
command.Run = func(c *cobra.Command, args []string) {
|
||||
ctx := c.Context()
|
||||
|
||||
if len(args) != 1 {
|
||||
c.HelpFunc()(c, args)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
appName, appNs := argo.ParseFromQualifiedName(args[0], "")
|
||||
|
||||
conn, appIf := headless.NewClientOrDie(clientOpts, c).NewApplicationClientOrDie()
|
||||
defer utilio.Close(conn)
|
||||
|
||||
tree, err := appIf.ResourceTree(ctx, &applicationpkg.ResourcesQuery{
|
||||
ApplicationName: &appName,
|
||||
AppNamespace: &appNs,
|
||||
})
|
||||
errors.CheckError(err)
|
||||
|
||||
// Get manifests of resources
|
||||
// If resource name is "" find all resources of that kind
|
||||
var resources []unstructured.Unstructured
|
||||
var fetchedStr string
|
||||
for _, r := range tree.Nodes {
|
||||
if (resourceName != "" && r.Name != resourceName) || r.Kind != kind {
|
||||
continue
|
||||
}
|
||||
resource, err := appIf.GetResource(ctx, &applicationpkg.ApplicationResourceRequest{
|
||||
Name: &appName,
|
||||
AppNamespace: &appNs,
|
||||
Group: &r.Group,
|
||||
Kind: &r.Kind,
|
||||
Namespace: &r.Namespace,
|
||||
Project: &project,
|
||||
ResourceName: &r.Name,
|
||||
Version: &r.Version,
|
||||
})
|
||||
errors.CheckError(err)
|
||||
manifest := resource.GetManifest()
|
||||
|
||||
var obj *unstructured.Unstructured
|
||||
err = json.Unmarshal([]byte(manifest), &obj)
|
||||
errors.CheckError(err)
|
||||
|
||||
if !showManagedFields {
|
||||
unstructured.RemoveNestedField(obj.Object, "metadata", "managedFields")
|
||||
}
|
||||
|
||||
if len(filteredFields) != 0 {
|
||||
obj = filterFieldsFromObject(obj, filteredFields)
|
||||
}
|
||||
|
||||
fetchedStr += obj.GetName() + ", "
|
||||
resources = append(resources, *obj)
|
||||
}
|
||||
printManifests(&resources, len(filteredFields) > 0, resourceName == "", output)
|
||||
|
||||
if fetchedStr != "" {
|
||||
fetchedStr = strings.TrimSuffix(fetchedStr, ", ")
|
||||
}
|
||||
log.Infof("Resources '%s' fetched", fetchedStr)
|
||||
}
|
||||
|
||||
command.Flags().StringVar(&resourceName, "resource-name", "", "Name of resource, if none is included will output details of all resources with specified kind")
|
||||
command.Flags().StringVar(&kind, "kind", "", "Kind of resource [REQUIRED]")
|
||||
err := command.MarkFlagRequired("kind")
|
||||
errors.CheckError(err)
|
||||
command.Flags().StringVar(&project, "project", "", "Project of resource")
|
||||
command.Flags().StringSliceVar(&filteredFields, "filter-fields", nil, "A comma separated list of fields to display, if not provided will output the entire manifest")
|
||||
command.Flags().BoolVar(&showManagedFields, "show-managed-fields", false, "Show managed fields in the output manifest")
|
||||
command.Flags().StringVarP(&output, "output", "o", "wide", "Format of the output, wide, yaml, or json")
|
||||
return command
|
||||
}
|
||||
|
||||
// filterFieldsFromObject creates a new unstructured object containing only the specified fields from the source object.
|
||||
func filterFieldsFromObject(obj *unstructured.Unstructured, filteredFields []string) *unstructured.Unstructured {
|
||||
var filteredObj unstructured.Unstructured
|
||||
filteredObj.Object = make(map[string]any)
|
||||
|
||||
for _, f := range filteredFields {
|
||||
fields := strings.Split(f, ".")
|
||||
|
||||
value, exists, err := unstructured.NestedFieldCopy(obj.Object, fields...)
|
||||
if exists {
|
||||
errors.CheckError(err)
|
||||
err = unstructured.SetNestedField(filteredObj.Object, value, fields...)
|
||||
errors.CheckError(err)
|
||||
} else {
|
||||
// If doesn't exist assume its a nested inside a list of objects
|
||||
value := extractNestedItem(obj.Object, fields, 0)
|
||||
filteredObj.Object = value
|
||||
}
|
||||
}
|
||||
filteredObj.SetName(obj.GetName())
|
||||
return &filteredObj
|
||||
}
|
||||
|
||||
// extractNestedItem recursively extracts an item that may be nested inside a list of objects.
|
||||
func extractNestedItem(obj map[string]any, fields []string, depth int) map[string]any {
|
||||
if depth >= len(fields) {
|
||||
return nil
|
||||
}
|
||||
|
||||
value, exists, _ := unstructured.NestedFieldCopy(obj, fields[:depth+1]...)
|
||||
list, ok := value.([]any)
|
||||
if !exists || !ok {
|
||||
return extractNestedItem(obj, fields, depth+1)
|
||||
}
|
||||
|
||||
extractedItems := extractItemsFromList(list, fields[depth+1:])
|
||||
if len(extractedItems) == 0 {
|
||||
for _, e := range list {
|
||||
if o, ok := e.(map[string]any); ok {
|
||||
result := extractNestedItem(o, fields[depth+1:], 0)
|
||||
extractedItems = append(extractedItems, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
filteredObj := reconstructObject(extractedItems, fields, depth)
|
||||
return filteredObj
|
||||
}
|
||||
|
||||
// extractItemsFromList processes a list of objects and extracts specific fields from each item.
|
||||
func extractItemsFromList(list []any, fields []string) []any {
|
||||
var extratedObjs []any
|
||||
for _, e := range list {
|
||||
extractedObj := make(map[string]any)
|
||||
if o, ok := e.(map[string]any); ok {
|
||||
value, exists, _ := unstructured.NestedFieldCopy(o, fields...)
|
||||
if !exists {
|
||||
continue
|
||||
}
|
||||
err := unstructured.SetNestedField(extractedObj, value, fields...)
|
||||
errors.CheckError(err)
|
||||
extratedObjs = append(extratedObjs, extractedObj)
|
||||
}
|
||||
}
|
||||
return extratedObjs
|
||||
}
|
||||
|
||||
// reconstructObject rebuilds the original object structure by placing extracted items back into their proper nested location.
|
||||
func reconstructObject(extracted []any, fields []string, depth int) map[string]any {
|
||||
obj := make(map[string]any)
|
||||
err := unstructured.SetNestedField(obj, extracted, fields[:depth+1]...)
|
||||
errors.CheckError(err)
|
||||
return obj
|
||||
}
|
||||
|
||||
// printManifests outputs resource manifests in the specified format (wide, JSON, or YAML).
|
||||
func printManifests(objs *[]unstructured.Unstructured, filteredFields bool, showName bool, output string) {
|
||||
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
|
||||
if showName {
|
||||
fmt.Fprintf(w, "FIELD\tRESOURCE NAME\tVALUE\n")
|
||||
} else {
|
||||
fmt.Fprintf(w, "FIELD\tVALUE\n")
|
||||
}
|
||||
|
||||
for i, o := range *objs {
|
||||
if output == "json" || output == "yaml" {
|
||||
var formattedManifest []byte
|
||||
var err error
|
||||
if output == "json" {
|
||||
formattedManifest, err = json.MarshalIndent(o.Object, "", " ")
|
||||
} else {
|
||||
formattedManifest, err = yaml.Marshal(o.Object)
|
||||
}
|
||||
errors.CheckError(err)
|
||||
|
||||
fmt.Println(string(formattedManifest))
|
||||
if len(*objs) > 1 && i != len(*objs)-1 {
|
||||
fmt.Println("---")
|
||||
}
|
||||
} else {
|
||||
name := o.GetName()
|
||||
if filteredFields {
|
||||
unstructured.RemoveNestedField(o.Object, "metadata", "name")
|
||||
}
|
||||
|
||||
printManifestAsTable(w, name, showName, o.Object, "")
|
||||
}
|
||||
}
|
||||
|
||||
if output != "json" && output != "yaml" {
|
||||
err := w.Flush()
|
||||
errors.CheckError(err)
|
||||
}
|
||||
}
|
||||
|
||||
// printManifestAsTable recursively prints a manifest object as a tabular view with nested fields flattened.
|
||||
func printManifestAsTable(w *tabwriter.Writer, name string, showName bool, obj map[string]any, parentField string) {
|
||||
for key, value := range obj {
|
||||
field := parentField + key
|
||||
switch v := value.(type) {
|
||||
case map[string]any:
|
||||
printManifestAsTable(w, name, showName, v, field+".")
|
||||
case []any:
|
||||
for i, e := range v {
|
||||
index := "[" + strconv.Itoa(i) + "]"
|
||||
|
||||
if innerObj, ok := e.(map[string]any); ok {
|
||||
printManifestAsTable(w, name, showName, innerObj, field+index+".")
|
||||
} else {
|
||||
if showName {
|
||||
fmt.Fprintf(w, "%v\t%v\t%v\n", field+index, name, e)
|
||||
} else {
|
||||
fmt.Fprintf(w, "%v\t%v\n", field+index, e)
|
||||
}
|
||||
}
|
||||
}
|
||||
default:
|
||||
if showName {
|
||||
fmt.Fprintf(w, "%v\t%v\t%v\n", field, name, v)
|
||||
} else {
|
||||
fmt.Fprintf(w, "%v\t%v\n", field, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func NewApplicationPatchResourceCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
var patch string
|
||||
var patchType string
|
||||
var resourceName string
|
||||
var namespace string
|
||||
var kind string
|
||||
var group string
|
||||
var all bool
|
||||
var project string
|
||||
var (
|
||||
patch string
|
||||
patchType string
|
||||
resourceName string
|
||||
namespace string
|
||||
kind string
|
||||
group string
|
||||
all bool
|
||||
project string
|
||||
)
|
||||
command := &cobra.Command{
|
||||
Use: "patch-resource APPNAME",
|
||||
Short: "Patch resource in an application",
|
||||
@@ -90,14 +354,16 @@ func NewApplicationPatchResourceCommand(clientOpts *argocdclient.ClientOptions)
|
||||
}
|
||||
|
||||
func NewApplicationDeleteResourceCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
var resourceName string
|
||||
var namespace string
|
||||
var kind string
|
||||
var group string
|
||||
var force bool
|
||||
var orphan bool
|
||||
var all bool
|
||||
var project string
|
||||
var (
|
||||
resourceName string
|
||||
namespace string
|
||||
kind string
|
||||
group string
|
||||
force bool
|
||||
orphan bool
|
||||
all bool
|
||||
project string
|
||||
)
|
||||
command := &cobra.Command{
|
||||
Use: "delete-resource APPNAME",
|
||||
Short: "Delete resource in an application",
|
||||
@@ -253,13 +519,16 @@ func printResources(listAll bool, orphaned bool, appResourceTree *v1alpha1.Appli
|
||||
}
|
||||
}
|
||||
}
|
||||
_ = w.Flush()
|
||||
err := w.Flush()
|
||||
errors.CheckError(err)
|
||||
}
|
||||
|
||||
func NewApplicationListResourcesCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
var orphaned bool
|
||||
var output string
|
||||
var project string
|
||||
var (
|
||||
orphaned bool
|
||||
output string
|
||||
project string
|
||||
)
|
||||
command := &cobra.Command{
|
||||
Use: "resources APPNAME",
|
||||
Short: "List resource of application",
|
||||
|
||||
@@ -2228,10 +2228,15 @@ func (c *fakeAppServiceClient) ListResourceActions(_ context.Context, _ *applica
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// nolint:staticcheck // ResourceActionRunRequest is deprecated, but we still need to implement it to satisfy the server interface.
|
||||
func (c *fakeAppServiceClient) RunResourceAction(_ context.Context, _ *applicationpkg.ResourceActionRunRequest, _ ...grpc.CallOption) (*applicationpkg.ApplicationResponse, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (c *fakeAppServiceClient) RunResourceActionV2(_ context.Context, _ *applicationpkg.ResourceActionRunRequestV2, _ ...grpc.CallOption) (*applicationpkg.ApplicationResponse, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (c *fakeAppServiceClient) DeleteResource(_ context.Context, _ *applicationpkg.ApplicationResourceDeleteRequest, _ ...grpc.CallOption) (*applicationpkg.ApplicationResponse, error) {
|
||||
return nil, nil
|
||||
}
|
||||
@@ -2248,6 +2253,10 @@ func (c *fakeAppServiceClient) ListResourceLinks(_ context.Context, _ *applicati
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (c *fakeAppServiceClient) ServerSideDiff(_ context.Context, _ *applicationpkg.ApplicationServerSideDiffQuery, _ ...grpc.CallOption) (*applicationpkg.ApplicationServerSideDiffResponse, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
type fakeAcdClient struct {
|
||||
simulateTimeout uint
|
||||
}
|
||||
|
||||
@@ -6,6 +6,8 @@ import (
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/util/cli"
|
||||
)
|
||||
|
||||
// NewBcryptCmd represents the bcrypt command
|
||||
@@ -15,22 +17,25 @@ func NewBcryptCmd() *cobra.Command {
|
||||
Use: "bcrypt",
|
||||
Short: "Generate bcrypt hash for any password",
|
||||
Example: `# Generate bcrypt hash for any password
|
||||
argocd account bcrypt --password YOUR_PASSWORD`,
|
||||
argocd account bcrypt --password YOUR_PASSWORD
|
||||
|
||||
# Prompt for password input
|
||||
argocd account bcrypt
|
||||
|
||||
# Read password from stdin
|
||||
echo -e "password" | argocd account bcrypt`,
|
||||
Run: func(cmd *cobra.Command, _ []string) {
|
||||
password = cli.PromptPassword(password)
|
||||
bytePassword := []byte(password)
|
||||
// Hashing the password
|
||||
hash, err := bcrypt.GenerateFromPassword(bytePassword, bcrypt.DefaultCost)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to genarate bcrypt hash: %v", err)
|
||||
log.Fatalf("Failed to generate bcrypt hash: %v", err)
|
||||
}
|
||||
fmt.Fprint(cmd.OutOrStdout(), string(hash))
|
||||
},
|
||||
}
|
||||
|
||||
bcryptCmd.Flags().StringVar(&password, "password", "", "Password for which bcrypt hash is generated")
|
||||
err := bcryptCmd.MarkFlagRequired("password")
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return bcryptCmd
|
||||
}
|
||||
|
||||
@@ -2,9 +2,11 @@ package commands
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
)
|
||||
|
||||
@@ -20,3 +22,27 @@ func TestGeneratePassword(t *testing.T) {
|
||||
err = bcrypt.CompareHashAndPassword(output.Bytes(), []byte("abc"))
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestGeneratePasswordWithStdin(t *testing.T) {
|
||||
oldStdin := os.Stdin
|
||||
defer func() {
|
||||
os.Stdin = oldStdin
|
||||
}()
|
||||
|
||||
input := bytes.NewBufferString("abc\n")
|
||||
r, w, _ := os.Pipe()
|
||||
_, _ = w.Write(input.Bytes())
|
||||
w.Close()
|
||||
os.Stdin = r
|
||||
|
||||
bcryptCmd := NewBcryptCmd()
|
||||
bcryptCmd.SetArgs([]string{})
|
||||
output := new(bytes.Buffer)
|
||||
bcryptCmd.SetOut(output)
|
||||
|
||||
err := bcryptCmd.Execute()
|
||||
require.NoError(t, err)
|
||||
|
||||
err = bcrypt.CompareHashAndPassword(output.Bytes(), []byte("abc"))
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package commands
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
@@ -27,6 +28,10 @@ argocd configure --prompts-enabled=false`,
|
||||
Run: func(_ *cobra.Command, _ []string) {
|
||||
localCfg, err := localconfig.ReadLocalConfig(globalClientOpts.ConfigPath)
|
||||
errors.CheckError(err)
|
||||
if localCfg == nil {
|
||||
fmt.Println("No local configuration found")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
localCfg.PromptsEnabled = promptsEnabled
|
||||
|
||||
|
||||
@@ -42,6 +42,7 @@ func NewLoginCommand(globalClientOpts *argocdclient.ClientOptions) *cobra.Comman
|
||||
username string
|
||||
password string
|
||||
sso bool
|
||||
callback string
|
||||
ssoPort int
|
||||
skipTestTLS bool
|
||||
ssoLaunchBrowser bool
|
||||
@@ -138,7 +139,7 @@ argocd login cd.argoproj.io --core`,
|
||||
errors.CheckError(err)
|
||||
oauth2conf, provider, err := acdClient.OIDCConfig(ctx, acdSet)
|
||||
errors.CheckError(err)
|
||||
tokenString, refreshToken = oauth2Login(ctx, ssoPort, acdSet.GetOIDCConfig(), oauth2conf, provider, ssoLaunchBrowser)
|
||||
tokenString, refreshToken = oauth2Login(ctx, callback, ssoPort, acdSet.GetOIDCConfig(), oauth2conf, provider, ssoLaunchBrowser)
|
||||
}
|
||||
parser := jwt.NewParser(jwt.WithoutClaimsValidation())
|
||||
claims := jwt.MapClaims{}
|
||||
@@ -185,8 +186,8 @@ argocd login cd.argoproj.io --core`,
|
||||
command.Flags().StringVar(&password, "password", "", "The password of an account to authenticate")
|
||||
command.Flags().BoolVar(&sso, "sso", false, "Perform SSO login")
|
||||
command.Flags().IntVar(&ssoPort, "sso-port", DefaultSSOLocalPort, "Port to run local OAuth2 login application")
|
||||
command.Flags().
|
||||
BoolVar(&skipTestTLS, "skip-test-tls", false, "Skip testing whether the server is configured with TLS (this can help when the command hangs for no apparent reason)")
|
||||
command.Flags().StringVar(&callback, "callback", "", "Scheme, Host and Port for the callback URL")
|
||||
command.Flags().BoolVar(&skipTestTLS, "skip-test-tls", false, "Skip testing whether the server is configured with TLS (this can help when the command hangs for no apparent reason)")
|
||||
command.Flags().BoolVar(&ssoLaunchBrowser, "sso-launch-browser", true, "Automatically launch the system default browser when performing SSO login")
|
||||
return command
|
||||
}
|
||||
@@ -205,13 +206,19 @@ func userDisplayName(claims jwt.MapClaims) string {
|
||||
// returns the JWT token and a refresh token (if supported)
|
||||
func oauth2Login(
|
||||
ctx context.Context,
|
||||
callback string,
|
||||
port int,
|
||||
oidcSettings *settingspkg.OIDCConfig,
|
||||
oauth2conf *oauth2.Config,
|
||||
provider *oidc.Provider,
|
||||
ssoLaunchBrowser bool,
|
||||
) (string, string) {
|
||||
oauth2conf.RedirectURL = fmt.Sprintf("http://localhost:%d/auth/callback", port)
|
||||
redirectBase := callback
|
||||
if redirectBase == "" {
|
||||
redirectBase = "http://localhost:" + strconv.Itoa(port)
|
||||
}
|
||||
|
||||
oauth2conf.RedirectURL = redirectBase + "/auth/callback"
|
||||
oidcConf, err := oidcutil.ParseConfig(provider)
|
||||
errors.CheckError(err)
|
||||
log.Debug("OIDC Configuration:")
|
||||
|
||||
@@ -19,9 +19,12 @@ func NewLogoutCommand(globalClientOpts *argocdclient.ClientOptions) *cobra.Comma
|
||||
Use: "logout CONTEXT",
|
||||
Short: "Log out from Argo CD",
|
||||
Long: "Log out from Argo CD",
|
||||
Example: `# To log out of argocd
|
||||
$ argocd logout
|
||||
Example: `# Logout from the active Argo CD context
|
||||
# This can be helpful for security reasons or when you want to switch between different Argo CD contexts or accounts.
|
||||
argocd logout CONTEXT
|
||||
|
||||
# Logout from a specific context named 'cd.argoproj.io'
|
||||
argocd logout cd.argoproj.io
|
||||
`,
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
if len(args) == 0 {
|
||||
|
||||
@@ -605,8 +605,17 @@ ID ISSUED-AT EXPIRES-AT
|
||||
fmt.Printf(printRoleFmtStr, "Description:", role.Description)
|
||||
fmt.Printf("Policies:\n")
|
||||
fmt.Printf("%s\n", proj.ProjectPoliciesString())
|
||||
fmt.Printf("Groups:\n")
|
||||
// if the group exists in the role
|
||||
// range over each group and print it
|
||||
if v1alpha1.RoleGroupExists(role) {
|
||||
for _, group := range role.Groups {
|
||||
fmt.Printf(" - %s\n", group)
|
||||
}
|
||||
} else {
|
||||
fmt.Println("<none>")
|
||||
}
|
||||
fmt.Printf("JWT Tokens:\n")
|
||||
// TODO(jessesuen): print groups
|
||||
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
|
||||
fmt.Fprintf(w, "ID\tISSUED-AT\tEXPIRES-AT\n")
|
||||
for _, token := range proj.Status.JWTTokensByRole[roleName].Items {
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
func NewReloginCommand(globalClientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
var (
|
||||
password string
|
||||
callback string
|
||||
ssoPort int
|
||||
ssoLaunchBrowser bool
|
||||
)
|
||||
@@ -73,7 +74,7 @@ func NewReloginCommand(globalClientOpts *argocdclient.ClientOptions) *cobra.Comm
|
||||
errors.CheckError(err)
|
||||
oauth2conf, provider, err := acdClient.OIDCConfig(ctx, acdSet)
|
||||
errors.CheckError(err)
|
||||
tokenString, refreshToken = oauth2Login(ctx, ssoPort, acdSet.GetOIDCConfig(), oauth2conf, provider, ssoLaunchBrowser)
|
||||
tokenString, refreshToken = oauth2Login(ctx, callback, ssoPort, acdSet.GetOIDCConfig(), oauth2conf, provider, ssoLaunchBrowser)
|
||||
}
|
||||
|
||||
localCfg.UpsertUser(localconfig.User{
|
||||
@@ -100,6 +101,7 @@ argocd login cd.argoproj.io --core
|
||||
}
|
||||
command.Flags().StringVar(&password, "password", "", "The password of an account to authenticate")
|
||||
command.Flags().IntVar(&ssoPort, "sso-port", DefaultSSOLocalPort, "Port to run local OAuth2 login application")
|
||||
command.Flags().StringVar(&callback, "callback", "", "Host and Port for the callback URL")
|
||||
command.Flags().BoolVar(&ssoLaunchBrowser, "sso-launch-browser", true, "Automatically launch the default browser when performing SSO login")
|
||||
return command
|
||||
}
|
||||
|
||||
@@ -270,6 +270,19 @@ func NewRepoRemoveCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command
|
||||
command := &cobra.Command{
|
||||
Use: "rm REPO ...",
|
||||
Short: "Remove configured repositories",
|
||||
Example: `
|
||||
# Remove a single repository
|
||||
argocd repo rm https://github.com/yourusername/your-repo.git
|
||||
|
||||
# Remove multiple repositories
|
||||
argocd repo rm https://github.com/yourusername/your-repo.git https://git.example.com/repo2.git
|
||||
|
||||
# Remove repositories for a specific project
|
||||
argocd repo rm https://github.com/yourusername/your-repo.git --project myproject
|
||||
|
||||
# Remove repository using SSH URL
|
||||
argocd repo rm git@github.com:yourusername/your-repo.git
|
||||
`,
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
ctx := c.Context()
|
||||
|
||||
@@ -330,22 +343,44 @@ func NewRepoListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
command := &cobra.Command{
|
||||
Use: "list",
|
||||
Short: "List configured repositories",
|
||||
Example: `
|
||||
# List all repositories
|
||||
argocd repo list
|
||||
|
||||
# List repositories in wide format
|
||||
argocd repo list -o wide
|
||||
|
||||
# List repositories in YAML format
|
||||
argocd repo list -o yaml
|
||||
|
||||
# List repositories in JSON format
|
||||
argocd repo list -o json
|
||||
|
||||
# List urls of repositories
|
||||
argocd repo list -o url
|
||||
|
||||
# Force refresh of cached repository connection status
|
||||
argocd repo list --refresh hard
|
||||
`,
|
||||
Run: func(c *cobra.Command, _ []string) {
|
||||
ctx := c.Context()
|
||||
|
||||
conn, repoIf := headless.NewClientOrDie(clientOpts, c).NewRepoClientOrDie()
|
||||
defer utilio.Close(conn)
|
||||
forceRefresh := false
|
||||
|
||||
switch refresh {
|
||||
case "":
|
||||
case "hard":
|
||||
forceRefresh = true
|
||||
default:
|
||||
err := stderrors.New("--refresh must be one of: 'hard'")
|
||||
err := fmt.Errorf("unknown refresh value: %s. Supported values: hard", refresh)
|
||||
errors.CheckError(err)
|
||||
}
|
||||
|
||||
repos, err := repoIf.ListRepositories(ctx, &repositorypkg.RepoQuery{ForceRefresh: forceRefresh})
|
||||
errors.CheckError(err)
|
||||
|
||||
switch output {
|
||||
case "yaml", "json":
|
||||
err := PrintResourceList(repos.Items, output, false)
|
||||
@@ -356,12 +391,12 @@ func NewRepoListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
case "wide", "":
|
||||
printRepoTable(repos.Items)
|
||||
default:
|
||||
errors.CheckError(fmt.Errorf("unknown output format: %s", output))
|
||||
errors.CheckError(fmt.Errorf("unknown output format: %s. Supported formats: yaml|json|url|wide", output))
|
||||
}
|
||||
},
|
||||
}
|
||||
command.Flags().StringVarP(&output, "output", "o", "wide", "Output format. One of: json|yaml|wide|url")
|
||||
command.Flags().StringVar(&refresh, "refresh", "", "Force a cache refresh on connection status , must be one of: 'hard'")
|
||||
command.Flags().StringVarP(&output, "output", "o", "wide", "Output format. Supported formats: yaml|json|url|wide")
|
||||
command.Flags().StringVar(&refresh, "refresh", "", "Force a cache refresh on connection status. Supported values: hard")
|
||||
return command
|
||||
}
|
||||
|
||||
@@ -372,9 +407,26 @@ func NewRepoGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
refresh string
|
||||
project string
|
||||
)
|
||||
|
||||
// For better readability and easier formatting
|
||||
repoGetExamples := `
|
||||
# Get Git or Helm repository details in wide format (default, '-o wide')
|
||||
argocd repo get https://git.example.com/repos/repo
|
||||
|
||||
# Get repository details in YAML format
|
||||
argocd repo get https://git.example.com/repos/repo -o yaml
|
||||
|
||||
# Get repository details in JSON format
|
||||
argocd repo get https://git.example.com/repos/repo -o json
|
||||
|
||||
# Get repository URL
|
||||
argocd repo get https://git.example.com/repos/repo -o url
|
||||
`
|
||||
|
||||
command := &cobra.Command{
|
||||
Use: "get REPO",
|
||||
Short: "Get a configured repository by URL",
|
||||
Use: "get REPO",
|
||||
Short: "Get a configured repository by URL",
|
||||
Example: repoGetExamples,
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
ctx := c.Context()
|
||||
|
||||
@@ -393,11 +445,12 @@ func NewRepoGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
case "hard":
|
||||
forceRefresh = true
|
||||
default:
|
||||
err := stderrors.New("--refresh must be one of: 'hard'")
|
||||
err := fmt.Errorf("unknown refresh value: %s. Supported values: hard", refresh)
|
||||
errors.CheckError(err)
|
||||
}
|
||||
repo, err := repoIf.Get(ctx, &repositorypkg.RepoQuery{Repo: repoURL, ForceRefresh: forceRefresh, AppProject: project})
|
||||
errors.CheckError(err)
|
||||
|
||||
switch output {
|
||||
case "yaml", "json":
|
||||
err := PrintResource(repo, output)
|
||||
@@ -408,13 +461,13 @@ func NewRepoGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
case "wide", "":
|
||||
printRepoTable(appsv1.Repositories{repo})
|
||||
default:
|
||||
errors.CheckError(fmt.Errorf("unknown output format: %s", output))
|
||||
errors.CheckError(fmt.Errorf("unknown output format: %s. Supported formats: yaml|json|url|wide", output))
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
command.Flags().StringVar(&project, "project", "", "project of the repository")
|
||||
command.Flags().StringVarP(&output, "output", "o", "wide", "Output format. One of: json|yaml|wide|url")
|
||||
command.Flags().StringVar(&refresh, "refresh", "", "Force a cache refresh on connection status , must be one of: 'hard'")
|
||||
command.Flags().StringVar(&refresh, "refresh", "", "Force a cache refresh on connection status. Supported values: hard")
|
||||
return command
|
||||
}
|
||||
|
||||
@@ -20,7 +20,6 @@ import (
|
||||
reposerver "github.com/argoproj/argo-cd/v3/cmd/argocd-repo-server/commands"
|
||||
apiserver "github.com/argoproj/argo-cd/v3/cmd/argocd-server/commands"
|
||||
cli "github.com/argoproj/argo-cd/v3/cmd/argocd/commands"
|
||||
"github.com/argoproj/argo-cd/v3/cmd/util"
|
||||
"github.com/argoproj/argo-cd/v3/util/log"
|
||||
)
|
||||
|
||||
@@ -74,7 +73,6 @@ func main() {
|
||||
command = cli.NewCommand()
|
||||
isArgocdCLI = true
|
||||
}
|
||||
util.SetAutoMaxProcs(isArgocdCLI)
|
||||
|
||||
if isArgocdCLI {
|
||||
// silence errors and usages since we'll be printing them manually.
|
||||
|
||||
@@ -10,8 +10,6 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"go.uber.org/automaxprocs/maxprocs"
|
||||
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
@@ -92,6 +90,7 @@ type AppOptions struct {
|
||||
retryBackoffDuration time.Duration
|
||||
retryBackoffMaxDuration time.Duration
|
||||
retryBackoffFactor int64
|
||||
retryRefresh bool
|
||||
ref string
|
||||
SourceName string
|
||||
drySourceRepo string
|
||||
@@ -102,19 +101,6 @@ type AppOptions struct {
|
||||
hydrateToBranch string
|
||||
}
|
||||
|
||||
// SetAutoMaxProcs sets the GOMAXPROCS value based on the binary name.
|
||||
// It suppresses logs for CLI binaries and logs the setting for services.
|
||||
func SetAutoMaxProcs(isCLI bool) {
|
||||
if isCLI {
|
||||
_, _ = maxprocs.Set() // Intentionally ignore errors for CLI binaries
|
||||
} else {
|
||||
_, err := maxprocs.Set(maxprocs.Logger(log.Infof))
|
||||
if err != nil {
|
||||
log.Errorf("Error setting GOMAXPROCS: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func AddAppFlags(command *cobra.Command, opts *AppOptions) {
|
||||
command.Flags().StringVar(&opts.repoURL, "repo", "", "Repository URL, ignored if a file is set")
|
||||
command.Flags().StringVar(&opts.appPath, "path", "", "Path in repository to the app directory, ignored if a file is set")
|
||||
@@ -183,6 +169,7 @@ func AddAppFlags(command *cobra.Command, opts *AppOptions) {
|
||||
command.Flags().DurationVar(&opts.retryBackoffDuration, "sync-retry-backoff-duration", argoappv1.DefaultSyncRetryDuration, "Sync retry backoff base duration. Input needs to be a duration (e.g. 2m, 1h)")
|
||||
command.Flags().DurationVar(&opts.retryBackoffMaxDuration, "sync-retry-backoff-max-duration", argoappv1.DefaultSyncRetryMaxDuration, "Max sync retry backoff duration. Input needs to be a duration (e.g. 2m, 1h)")
|
||||
command.Flags().Int64Var(&opts.retryBackoffFactor, "sync-retry-backoff-factor", argoappv1.DefaultSyncRetryFactor, "Factor multiplies the base duration after each failed sync retry")
|
||||
command.Flags().BoolVar(&opts.retryRefresh, "sync-retry-refresh", false, "Indicates if the latest revision should be used on retry instead of the initial one")
|
||||
command.Flags().StringVar(&opts.ref, "ref", "", "Ref is reference to another source within sources field")
|
||||
command.Flags().StringVar(&opts.SourceName, "source-name", "", "Name of the source from the list of sources of the app.")
|
||||
}
|
||||
@@ -276,6 +263,7 @@ func SetAppSpecOptions(flags *pflag.FlagSet, spec *argoappv1.ApplicationSpec, ap
|
||||
MaxDuration: appOpts.retryBackoffMaxDuration.String(),
|
||||
Factor: ptr.To(appOpts.retryBackoffFactor),
|
||||
},
|
||||
Refresh: appOpts.retryRefresh,
|
||||
}
|
||||
case appOpts.retryLimit == 0:
|
||||
if spec.SyncPolicy.IsZero() {
|
||||
@@ -286,6 +274,14 @@ func SetAppSpecOptions(flags *pflag.FlagSet, spec *argoappv1.ApplicationSpec, ap
|
||||
default:
|
||||
log.Fatalf("Invalid sync-retry-limit [%d]", appOpts.retryLimit)
|
||||
}
|
||||
case "sync-retry-refresh":
|
||||
if spec.SyncPolicy == nil {
|
||||
spec.SyncPolicy = &argoappv1.SyncPolicy{}
|
||||
}
|
||||
if spec.SyncPolicy.Retry == nil {
|
||||
spec.SyncPolicy.Retry = &argoappv1.RetryStrategy{}
|
||||
}
|
||||
spec.SyncPolicy.Retry.Refresh = appOpts.retryRefresh
|
||||
}
|
||||
})
|
||||
if flags.Changed("auto-prune") {
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package util
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"log"
|
||||
"os"
|
||||
"testing"
|
||||
@@ -275,6 +274,13 @@ func Test_setAppSpecOptions(t *testing.T) {
|
||||
require.NoError(t, f.SetFlag("sync-retry-limit", "0"))
|
||||
assert.Nil(t, f.spec.SyncPolicy.Retry)
|
||||
})
|
||||
t.Run("RetryRefresh", func(t *testing.T) {
|
||||
require.NoError(t, f.SetFlag("sync-retry-refresh", "true"))
|
||||
assert.True(t, f.spec.SyncPolicy.Retry.Refresh)
|
||||
|
||||
require.NoError(t, f.SetFlag("sync-retry-refresh", "false"))
|
||||
assert.False(t, f.spec.SyncPolicy.Retry.Refresh)
|
||||
})
|
||||
t.Run("Kustomize", func(t *testing.T) {
|
||||
require.NoError(t, f.SetFlag("kustomize-replica", "my-deployment=2"))
|
||||
require.NoError(t, f.SetFlag("kustomize-replica", "my-statefulset=4"))
|
||||
@@ -573,27 +579,3 @@ func TestFilterResources(t *testing.T) {
|
||||
assert.Nil(t, filteredResources)
|
||||
})
|
||||
}
|
||||
|
||||
func TestSetAutoMaxProcs(t *testing.T) {
|
||||
t.Run("CLI mode ignores errors", func(t *testing.T) {
|
||||
logBuffer := &bytes.Buffer{}
|
||||
oldLogger := log.Default()
|
||||
log.SetOutput(logBuffer)
|
||||
defer log.SetOutput(oldLogger.Writer())
|
||||
|
||||
SetAutoMaxProcs(true)
|
||||
|
||||
assert.Empty(t, logBuffer.String(), "Expected no log output when isCLI is true")
|
||||
})
|
||||
|
||||
t.Run("Non-CLI mode logs error on failure", func(t *testing.T) {
|
||||
logBuffer := &bytes.Buffer{}
|
||||
oldLogger := log.Default()
|
||||
log.SetOutput(logBuffer)
|
||||
defer log.SetOutput(oldLogger.Writer())
|
||||
|
||||
SetAutoMaxProcs(false)
|
||||
|
||||
assert.NotContains(t, logBuffer.String(), "Error setting GOMAXPROCS", "Unexpected log output detected")
|
||||
})
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
|
||||
grpc_retry "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/retry"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
|
||||
@@ -44,15 +45,14 @@ func NewConnection(address string) (*grpc.ClientConn, error) {
|
||||
}
|
||||
unaryInterceptors := []grpc.UnaryClientInterceptor{grpc_retry.UnaryClientInterceptor(retryOpts...)}
|
||||
dialOpts := []grpc.DialOption{
|
||||
grpc.WithStreamInterceptor(grpc_retry.StreamClientInterceptor(retryOpts...)),
|
||||
grpc.WithStreamInterceptor(grpc_util.RetryOnlyForServerStreamInterceptor(retryOpts...)),
|
||||
grpc.WithChainUnaryInterceptor(unaryInterceptors...),
|
||||
grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(MaxGRPCMessageSize), grpc.MaxCallSendMsgSize(MaxGRPCMessageSize)),
|
||||
grpc.WithUnaryInterceptor(grpc_util.OTELUnaryClientInterceptor()),
|
||||
grpc.WithStreamInterceptor(grpc_util.OTELStreamClientInterceptor()),
|
||||
grpc.WithStatsHandler(otelgrpc.NewClientHandler()),
|
||||
}
|
||||
|
||||
dialOpts = append(dialOpts, grpc.WithTransportCredentials(insecure.NewCredentials()))
|
||||
conn, err := grpc_util.BlockingDial(context.Background(), "unix", address, nil, dialOpts...)
|
||||
conn, err := grpc_util.BlockingNewClient(context.Background(), "unix", address, nil, dialOpts...)
|
||||
if err != nil {
|
||||
log.Errorf("Unable to connect to config management plugin service with address %s", address)
|
||||
return nil, err
|
||||
|
||||
@@ -49,13 +49,11 @@ func NewServer(initConstants plugin.CMPServerInitConstants) (*ArgoCDCMPServer, e
|
||||
|
||||
serverLog := log.NewEntry(log.StandardLogger())
|
||||
streamInterceptors := []grpc.StreamServerInterceptor{
|
||||
otelgrpc.StreamServerInterceptor(), //nolint:staticcheck // TODO: ignore SA1019 for depreciation: see https://github.com/argoproj/argo-cd/issues/18258
|
||||
logging.StreamServerInterceptor(grpc_util.InterceptorLogger(serverLog)),
|
||||
serverMetrics.StreamServerInterceptor(),
|
||||
recovery.StreamServerInterceptor(recovery.WithRecoveryHandler(grpc_util.LoggerRecoveryHandler(serverLog))),
|
||||
}
|
||||
unaryInterceptors := []grpc.UnaryServerInterceptor{
|
||||
otelgrpc.UnaryServerInterceptor(), //nolint:staticcheck // TODO: ignore SA1019 for depreciation: see https://github.com/argoproj/argo-cd/issues/18258
|
||||
logging.UnaryServerInterceptor(grpc_util.InterceptorLogger(serverLog)),
|
||||
serverMetrics.UnaryServerInterceptor(),
|
||||
recovery.UnaryServerInterceptor(recovery.WithRecoveryHandler(grpc_util.LoggerRecoveryHandler(serverLog))),
|
||||
@@ -71,6 +69,7 @@ func NewServer(initConstants plugin.CMPServerInitConstants) (*ArgoCDCMPServer, e
|
||||
MinTime: common.GetGRPCKeepAliveEnforcementMinimum(),
|
||||
},
|
||||
),
|
||||
grpc.StatsHandler(otelgrpc.NewServerHandler()),
|
||||
}
|
||||
|
||||
return &ArgoCDCMPServer{
|
||||
|
||||
@@ -40,9 +40,7 @@ func NewConnection(address string) (*grpc.ClientConn, error) {
|
||||
var opts []grpc.DialOption
|
||||
opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials()))
|
||||
|
||||
// TODO: switch to grpc.NewClient.
|
||||
//nolint:staticcheck
|
||||
conn, err := grpc.Dial(address, opts...)
|
||||
conn, err := grpc.NewClient(address, opts...)
|
||||
if err != nil {
|
||||
log.Errorf("Unable to connect to commit service with address %s", address)
|
||||
return nil, err
|
||||
|
||||
@@ -7,6 +7,8 @@ import (
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/controller/hydrator"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/commitserver/apiclient"
|
||||
@@ -31,6 +33,43 @@ func NewService(gitCredsStore git.CredsStore, metricsServer *metrics.Server) *Se
|
||||
}
|
||||
}
|
||||
|
||||
type hydratorMetadataFile struct {
|
||||
RepoURL string `json:"repoURL,omitempty"`
|
||||
DrySHA string `json:"drySha,omitempty"`
|
||||
Commands []string `json:"commands,omitempty"`
|
||||
Author string `json:"author,omitempty"`
|
||||
Date string `json:"date,omitempty"`
|
||||
// Subject is the subject line of the DRY commit message, i.e. `git show --format=%s`.
|
||||
Subject string `json:"subject,omitempty"`
|
||||
// Body is the body of the DRY commit message, excluding the subject line, i.e. `git show --format=%b`.
|
||||
// Known Argocd- trailers with valid values are removed, but all other trailers are kept.
|
||||
Body string `json:"body,omitempty"`
|
||||
References []v1alpha1.RevisionReference `json:"references,omitempty"`
|
||||
}
|
||||
|
||||
// TODO: make this configurable via ConfigMap.
|
||||
var manifestHydrationReadmeTemplate = `# Manifest Hydration
|
||||
|
||||
To hydrate the manifests in this repository, run the following commands:
|
||||
|
||||
` + "```shell" + `
|
||||
git clone {{ .RepoURL }}
|
||||
# cd into the cloned directory
|
||||
git checkout {{ .DrySHA }}
|
||||
{{ range $command := .Commands -}}
|
||||
{{ $command }}
|
||||
{{ end -}}` + "```" + `
|
||||
{{ if .References -}}
|
||||
|
||||
## References
|
||||
|
||||
{{ range $ref := .References -}}
|
||||
{{ if $ref.Commit -}}
|
||||
* [{{ $ref.Commit.SHA | mustRegexFind "[0-9a-f]+" | trunc 7 }}]({{ $ref.Commit.RepoURL }}): {{ $ref.Commit.Subject }} ({{ $ref.Commit.Author }})
|
||||
{{ end -}}
|
||||
{{ end -}}
|
||||
{{ end -}}`
|
||||
|
||||
// CommitHydratedManifests handles a commit request. It clones the repository, checks out the sync branch, checks out
|
||||
// the target branch, clears the repository contents, writes the manifests to the repository, commits the changes, and
|
||||
// pushes the changes. It returns the hydrated revision SHA and an error if one occurred.
|
||||
@@ -118,10 +157,25 @@ func (s *Service) handleCommitRequest(logCtx *log.Entry, r *apiclient.CommitHydr
|
||||
return out, "", fmt.Errorf("failed to checkout target branch: %w", err)
|
||||
}
|
||||
|
||||
logCtx.Debug("Clearing repo contents")
|
||||
out, err = gitClient.RemoveContents()
|
||||
if err != nil {
|
||||
return out, "", fmt.Errorf("failed to clear repo: %w", err)
|
||||
logCtx.Debug("Clearing and preparing paths")
|
||||
var pathsToClear []string
|
||||
// range over the paths configured and skip those application
|
||||
// paths that are referencing to root path
|
||||
for _, p := range r.Paths {
|
||||
if hydrator.IsRootPath(p.Path) {
|
||||
// skip adding paths that are referencing root directory
|
||||
logCtx.Debugf("Path %s is referencing root directory, ignoring the path", p.Path)
|
||||
continue
|
||||
}
|
||||
pathsToClear = append(pathsToClear, p.Path)
|
||||
}
|
||||
|
||||
if len(pathsToClear) > 0 {
|
||||
logCtx.Debugf("Clearing paths: %v", pathsToClear)
|
||||
out, err := gitClient.RemoveContents(pathsToClear)
|
||||
if err != nil {
|
||||
return out, "", fmt.Errorf("failed to clear paths %v: %w", pathsToClear, err)
|
||||
}
|
||||
}
|
||||
|
||||
logCtx.Debug("Writing manifests")
|
||||
@@ -210,39 +264,3 @@ func (s *Service) initGitClient(logCtx *log.Entry, r *apiclient.CommitHydratedMa
|
||||
|
||||
return gitClient, dirPath, cleanupOrLog, nil
|
||||
}
|
||||
|
||||
type hydratorMetadataFile struct {
|
||||
RepoURL string `json:"repoURL,omitempty"`
|
||||
DrySHA string `json:"drySha,omitempty"`
|
||||
Commands []string `json:"commands,omitempty"`
|
||||
Author string `json:"author,omitempty"`
|
||||
Date string `json:"date,omitempty"`
|
||||
// Subject is the subject line of the DRY commit message, i.e. `git show --format=%s`.
|
||||
Subject string `json:"subject,omitempty"`
|
||||
// Body is the body of the DRY commit message, excluding the subject line, i.e. `git show --format=%b`.
|
||||
Body string `json:"body,omitempty"`
|
||||
References []v1alpha1.RevisionReference `json:"references,omitempty"`
|
||||
}
|
||||
|
||||
// TODO: make this configurable via ConfigMap.
|
||||
var manifestHydrationReadmeTemplate = `# Manifest Hydration
|
||||
|
||||
To hydrate the manifests in this repository, run the following commands:
|
||||
|
||||
` + "```shell" + `
|
||||
git clone {{ .RepoURL }}
|
||||
# cd into the cloned directory
|
||||
git checkout {{ .DrySHA }}
|
||||
{{ range $command := .Commands -}}
|
||||
{{ $command }}
|
||||
{{ end -}}` + "```" + `
|
||||
{{ if .References -}}
|
||||
|
||||
## References
|
||||
|
||||
{{ range $ref := .References -}}
|
||||
{{ if $ref.Commit -}}
|
||||
* [{{ $ref.Commit.SHA | mustRegexFind "[0-9a-f]+" | trunc 7 }}]({{ $ref.Commit.RepoURL }}): {{ $ref.Commit.Subject }} ({{ $ref.Commit.Author }})
|
||||
{{ end -}}
|
||||
{{ end -}}
|
||||
{{ end -}}`
|
||||
|
||||
@@ -99,7 +99,6 @@ func Test_CommitHydratedManifests(t *testing.T) {
|
||||
mockGitClient.On("SetAuthor", "Argo CD", "argo-cd@example.com").Return("", nil).Once()
|
||||
mockGitClient.On("CheckoutOrOrphan", "env/test", false).Return("", nil).Once()
|
||||
mockGitClient.On("CheckoutOrNew", "main", "env/test", false).Return("", nil).Once()
|
||||
mockGitClient.On("RemoveContents").Return("", nil).Once()
|
||||
mockGitClient.On("CommitAndPush", "main", "test commit message").Return("", nil).Once()
|
||||
mockGitClient.On("CommitSHA").Return("it-worked!", nil).Once()
|
||||
mockRepoClientFactory.On("NewClient", mock.Anything, mock.Anything).Return(mockGitClient, nil).Once()
|
||||
@@ -109,6 +108,178 @@ func Test_CommitHydratedManifests(t *testing.T) {
|
||||
require.NotNil(t, resp)
|
||||
assert.Equal(t, "it-worked!", resp.HydratedSha)
|
||||
})
|
||||
|
||||
t.Run("root path with dot and blank - no directory removal", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
service, mockRepoClientFactory := newServiceWithMocks(t)
|
||||
mockGitClient := gitmocks.NewClient(t)
|
||||
mockGitClient.On("Init").Return(nil).Once()
|
||||
mockGitClient.On("Fetch", mock.Anything).Return(nil).Once()
|
||||
mockGitClient.On("SetAuthor", "Argo CD", "argo-cd@example.com").Return("", nil).Once()
|
||||
mockGitClient.On("CheckoutOrOrphan", "env/test", false).Return("", nil).Once()
|
||||
mockGitClient.On("CheckoutOrNew", "main", "env/test", false).Return("", nil).Once()
|
||||
mockGitClient.On("CommitAndPush", "main", "test commit message").Return("", nil).Once()
|
||||
mockGitClient.On("CommitSHA").Return("root-and-blank-sha", nil).Once()
|
||||
mockRepoClientFactory.On("NewClient", mock.Anything, mock.Anything).Return(mockGitClient, nil).Once()
|
||||
|
||||
requestWithRootAndBlank := &apiclient.CommitHydratedManifestsRequest{
|
||||
Repo: &v1alpha1.Repository{
|
||||
Repo: "https://github.com/argoproj/argocd-example-apps.git",
|
||||
},
|
||||
TargetBranch: "main",
|
||||
SyncBranch: "env/test",
|
||||
CommitMessage: "test commit message",
|
||||
Paths: []*apiclient.PathDetails{
|
||||
{
|
||||
Path: ".",
|
||||
Manifests: []*apiclient.HydratedManifestDetails{
|
||||
{
|
||||
ManifestJSON: `{"apiVersion":"v1","kind":"ConfigMap","metadata":{"name":"test-dot"}}`,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Path: "",
|
||||
Manifests: []*apiclient.HydratedManifestDetails{
|
||||
{
|
||||
ManifestJSON: `{"apiVersion":"v1","kind":"ConfigMap","metadata":{"name":"test-blank"}}`,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
resp, err := service.CommitHydratedManifests(t.Context(), requestWithRootAndBlank)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, resp)
|
||||
assert.Equal(t, "root-and-blank-sha", resp.HydratedSha)
|
||||
})
|
||||
|
||||
t.Run("subdirectory path - triggers directory removal", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
service, mockRepoClientFactory := newServiceWithMocks(t)
|
||||
mockGitClient := gitmocks.NewClient(t)
|
||||
mockGitClient.On("Init").Return(nil).Once()
|
||||
mockGitClient.On("Fetch", mock.Anything).Return(nil).Once()
|
||||
mockGitClient.On("SetAuthor", "Argo CD", "argo-cd@example.com").Return("", nil).Once()
|
||||
mockGitClient.On("CheckoutOrOrphan", "env/test", false).Return("", nil).Once()
|
||||
mockGitClient.On("CheckoutOrNew", "main", "env/test", false).Return("", nil).Once()
|
||||
mockGitClient.On("RemoveContents", []string{"apps/staging"}).Return("", nil).Once()
|
||||
mockGitClient.On("CommitAndPush", "main", "test commit message").Return("", nil).Once()
|
||||
mockGitClient.On("CommitSHA").Return("subdir-path-sha", nil).Once()
|
||||
mockRepoClientFactory.On("NewClient", mock.Anything, mock.Anything).Return(mockGitClient, nil).Once()
|
||||
|
||||
requestWithSubdirPath := &apiclient.CommitHydratedManifestsRequest{
|
||||
Repo: &v1alpha1.Repository{
|
||||
Repo: "https://github.com/argoproj/argocd-example-apps.git",
|
||||
},
|
||||
TargetBranch: "main",
|
||||
SyncBranch: "env/test",
|
||||
CommitMessage: "test commit message",
|
||||
Paths: []*apiclient.PathDetails{
|
||||
{
|
||||
Path: "apps/staging", // subdirectory path
|
||||
Manifests: []*apiclient.HydratedManifestDetails{
|
||||
{
|
||||
ManifestJSON: `{"apiVersion":"v1","kind":"Deployment","metadata":{"name":"test-app"}}`,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
resp, err := service.CommitHydratedManifests(t.Context(), requestWithSubdirPath)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, resp)
|
||||
assert.Equal(t, "subdir-path-sha", resp.HydratedSha)
|
||||
})
|
||||
|
||||
t.Run("mixed paths - root and subdirectory", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
service, mockRepoClientFactory := newServiceWithMocks(t)
|
||||
mockGitClient := gitmocks.NewClient(t)
|
||||
mockGitClient.On("Init").Return(nil).Once()
|
||||
mockGitClient.On("Fetch", mock.Anything).Return(nil).Once()
|
||||
mockGitClient.On("SetAuthor", "Argo CD", "argo-cd@example.com").Return("", nil).Once()
|
||||
mockGitClient.On("CheckoutOrOrphan", "env/test", false).Return("", nil).Once()
|
||||
mockGitClient.On("CheckoutOrNew", "main", "env/test", false).Return("", nil).Once()
|
||||
mockGitClient.On("RemoveContents", []string{"apps/production", "apps/staging"}).Return("", nil).Once()
|
||||
mockGitClient.On("CommitAndPush", "main", "test commit message").Return("", nil).Once()
|
||||
mockGitClient.On("CommitSHA").Return("mixed-paths-sha", nil).Once()
|
||||
mockRepoClientFactory.On("NewClient", mock.Anything, mock.Anything).Return(mockGitClient, nil).Once()
|
||||
|
||||
requestWithMixedPaths := &apiclient.CommitHydratedManifestsRequest{
|
||||
Repo: &v1alpha1.Repository{
|
||||
Repo: "https://github.com/argoproj/argocd-example-apps.git",
|
||||
},
|
||||
TargetBranch: "main",
|
||||
SyncBranch: "env/test",
|
||||
CommitMessage: "test commit message",
|
||||
Paths: []*apiclient.PathDetails{
|
||||
{
|
||||
Path: ".", // root path - should NOT trigger removal
|
||||
Manifests: []*apiclient.HydratedManifestDetails{
|
||||
{
|
||||
ManifestJSON: `{"apiVersion":"v1","kind":"ConfigMap","metadata":{"name":"global-config"}}`,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Path: "apps/production", // subdirectory path - SHOULD trigger removal
|
||||
Manifests: []*apiclient.HydratedManifestDetails{
|
||||
{
|
||||
ManifestJSON: `{"apiVersion":"v1","kind":"Deployment","metadata":{"name":"prod-app"}}`,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Path: "apps/staging", // another subdirectory path - SHOULD trigger removal
|
||||
Manifests: []*apiclient.HydratedManifestDetails{
|
||||
{
|
||||
ManifestJSON: `{"apiVersion":"v1","kind":"Deployment","metadata":{"name":"staging-app"}}`,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
resp, err := service.CommitHydratedManifests(t.Context(), requestWithMixedPaths)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, resp)
|
||||
assert.Equal(t, "mixed-paths-sha", resp.HydratedSha)
|
||||
})
|
||||
|
||||
t.Run("empty paths array", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
service, mockRepoClientFactory := newServiceWithMocks(t)
|
||||
mockGitClient := gitmocks.NewClient(t)
|
||||
mockGitClient.On("Init").Return(nil).Once()
|
||||
mockGitClient.On("Fetch", mock.Anything).Return(nil).Once()
|
||||
mockGitClient.On("SetAuthor", "Argo CD", "argo-cd@example.com").Return("", nil).Once()
|
||||
mockGitClient.On("CheckoutOrOrphan", "env/test", false).Return("", nil).Once()
|
||||
mockGitClient.On("CheckoutOrNew", "main", "env/test", false).Return("", nil).Once()
|
||||
mockGitClient.On("CommitAndPush", "main", "test commit message").Return("", nil).Once()
|
||||
mockGitClient.On("CommitSHA").Return("it-worked!", nil).Once()
|
||||
mockRepoClientFactory.On("NewClient", mock.Anything, mock.Anything).Return(mockGitClient, nil).Once()
|
||||
|
||||
requestWithEmptyPaths := &apiclient.CommitHydratedManifestsRequest{
|
||||
Repo: &v1alpha1.Repository{
|
||||
Repo: "https://github.com/argoproj/argocd-example-apps.git",
|
||||
},
|
||||
TargetBranch: "main",
|
||||
SyncBranch: "env/test",
|
||||
CommitMessage: "test commit message",
|
||||
}
|
||||
|
||||
resp, err := service.CommitHydratedManifests(t.Context(), requestWithEmptyPaths)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, resp)
|
||||
assert.Equal(t, "it-worked!", resp.HydratedSha)
|
||||
})
|
||||
}
|
||||
|
||||
func newServiceWithMocks(t *testing.T) (*Service, *mocks.RepoClientFactory) {
|
||||
|
||||
@@ -2,14 +2,10 @@ package commit
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
"github.com/Masterminds/sprig/v3"
|
||||
log "github.com/sirupsen/logrus"
|
||||
@@ -17,12 +13,17 @@ import (
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/commitserver/apiclient"
|
||||
"github.com/argoproj/argo-cd/v3/common"
|
||||
appv1 "github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v3/util/hydrator"
|
||||
"github.com/argoproj/argo-cd/v3/util/io"
|
||||
)
|
||||
|
||||
var sprigFuncMap = sprig.GenericFuncMap() // a singleton for better performance
|
||||
|
||||
const gitAttributesContents = `*/README.md linguist-generated=true
|
||||
*/hydrator.metadata linguist-generated=true`
|
||||
|
||||
func init() {
|
||||
// Avoid allowing the user to learn things about the environment.
|
||||
delete(sprigFuncMap, "env")
|
||||
@@ -33,36 +34,35 @@ func init() {
|
||||
// WriteForPaths writes the manifests, hydrator.metadata, and README.md files for each path in the provided paths. It
|
||||
// also writes a root-level hydrator.metadata file containing the repo URL and dry SHA.
|
||||
func WriteForPaths(root *os.Root, repoUrl, drySha string, dryCommitMetadata *appv1.RevisionMetadata, paths []*apiclient.PathDetails) error { //nolint:revive //FIXME(var-naming)
|
||||
author := ""
|
||||
message := ""
|
||||
date := ""
|
||||
var references []appv1.RevisionReference
|
||||
if dryCommitMetadata != nil {
|
||||
author = dryCommitMetadata.Author
|
||||
message = dryCommitMetadata.Message
|
||||
if dryCommitMetadata.Date != nil {
|
||||
date = dryCommitMetadata.Date.Format(time.RFC3339)
|
||||
}
|
||||
references = dryCommitMetadata.References
|
||||
hydratorMetadata, err := hydrator.GetCommitMetadata(repoUrl, drySha, dryCommitMetadata)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to retrieve hydrator metadata: %w", err)
|
||||
}
|
||||
|
||||
subject, body, _ := strings.Cut(message, "\n\n")
|
||||
|
||||
// Write the top-level readme.
|
||||
err := writeMetadata(root, "", hydratorMetadataFile{DrySHA: drySha, RepoURL: repoUrl, Author: author, Subject: subject, Body: body, Date: date, References: references})
|
||||
err = writeMetadata(root, "", hydratorMetadata)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to write top-level hydrator metadata: %w", err)
|
||||
}
|
||||
|
||||
// Write .gitattributes
|
||||
err = writeGitAttributes(root)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to write git attributes: %w", err)
|
||||
}
|
||||
|
||||
for _, p := range paths {
|
||||
hydratePath := p.Path
|
||||
if hydratePath == "." {
|
||||
hydratePath = ""
|
||||
}
|
||||
|
||||
err = mkdirAll(root, hydratePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create path: %w", err)
|
||||
// Only create directory if path is not empty (root directory case)
|
||||
if hydratePath != "" {
|
||||
err = root.MkdirAll(hydratePath, 0o755)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create path: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Write the manifests
|
||||
@@ -72,7 +72,7 @@ func WriteForPaths(root *os.Root, repoUrl, drySha string, dryCommitMetadata *app
|
||||
}
|
||||
|
||||
// Write hydrator.metadata containing information about the hydration process.
|
||||
hydratorMetadata := hydratorMetadataFile{
|
||||
hydratorMetadata := hydrator.HydratorCommitMetadata{
|
||||
Commands: p.Commands,
|
||||
DrySHA: drySha,
|
||||
RepoURL: repoUrl,
|
||||
@@ -92,7 +92,7 @@ func WriteForPaths(root *os.Root, repoUrl, drySha string, dryCommitMetadata *app
|
||||
}
|
||||
|
||||
// writeMetadata writes the metadata to the hydrator.metadata file.
|
||||
func writeMetadata(root *os.Root, dirPath string, metadata hydratorMetadataFile) error {
|
||||
func writeMetadata(root *os.Root, dirPath string, metadata hydrator.HydratorCommitMetadata) error {
|
||||
hydratorMetadataPath := filepath.Join(dirPath, "hydrator.metadata")
|
||||
f, err := root.Create(hydratorMetadataPath)
|
||||
if err != nil {
|
||||
@@ -111,7 +111,7 @@ func writeMetadata(root *os.Root, dirPath string, metadata hydratorMetadataFile)
|
||||
}
|
||||
|
||||
// writeReadme writes the readme to the README.md file.
|
||||
func writeReadme(root *os.Root, dirPath string, metadata hydratorMetadataFile) error {
|
||||
func writeReadme(root *os.Root, dirPath string, metadata hydrator.HydratorCommitMetadata) error {
|
||||
readmeTemplate, err := template.New("readme").Funcs(sprigFuncMap).Parse(manifestHydrationReadmeTemplate)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse readme template: %w", err)
|
||||
@@ -134,6 +134,30 @@ func writeReadme(root *os.Root, dirPath string, metadata hydratorMetadataFile) e
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeGitAttributes(root *os.Root) error {
|
||||
gitAttributesFile, err := root.Create(".gitattributes")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create git attributes file: %w", err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
err = gitAttributesFile.Close()
|
||||
if err != nil {
|
||||
log.WithFields(log.Fields{
|
||||
common.SecurityField: common.SecurityMedium,
|
||||
common.SecurityCWEField: common.SecurityCWEMissingReleaseOfFileDescriptor,
|
||||
}).Errorf("error closing file %q: %v", gitAttributesFile.Name(), err)
|
||||
}
|
||||
}()
|
||||
|
||||
_, err = gitAttributesFile.WriteString(gitAttributesContents)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to write git attributes: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeManifests writes the manifests to the manifest.yaml file, truncating the file if it exists and appending the
|
||||
// manifests in the order they are provided.
|
||||
func writeManifests(root *os.Root, dirPath string, manifests []*apiclient.HydratedManifestDetails) error {
|
||||
@@ -175,25 +199,3 @@ func writeManifests(root *os.Root, dirPath string, manifests []*apiclient.Hydrat
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// mkdirAll creates the directory and all its parents if they do not exist. It returns an error if the directory
|
||||
// cannot be.
|
||||
func mkdirAll(root *os.Root, dirPath string) error {
|
||||
parts := strings.Split(dirPath, string(os.PathSeparator))
|
||||
builtPath := ""
|
||||
for _, part := range parts {
|
||||
if part == "" {
|
||||
continue
|
||||
}
|
||||
builtPath = filepath.Join(builtPath, part)
|
||||
err := root.Mkdir(builtPath, os.ModePerm)
|
||||
if err != nil {
|
||||
if errors.Is(err, fs.ErrExist) {
|
||||
log.WithError(err).Warnf("path %s already exists, skipping", dirPath)
|
||||
continue
|
||||
}
|
||||
return fmt.Errorf("failed to create path: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -19,6 +18,7 @@ import (
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/commitserver/apiclient"
|
||||
appsv1 "github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v3/util/hydrator"
|
||||
)
|
||||
|
||||
// tempRoot creates a temporary directory and returns an os.Root object for it.
|
||||
@@ -73,9 +73,13 @@ func TestWriteForPaths(t *testing.T) {
|
||||
|
||||
now := metav1.NewTime(time.Now())
|
||||
metadata := &appsv1.RevisionMetadata{
|
||||
Author: "test-author",
|
||||
Date: &now,
|
||||
Message: "test-message",
|
||||
Author: "test-author",
|
||||
Date: &now,
|
||||
Message: `test-message
|
||||
|
||||
Signed-off-by: Test User <test@example.com>
|
||||
Argocd-reference-commit-sha: abc123
|
||||
`,
|
||||
References: []appsv1.RevisionReference{
|
||||
{
|
||||
Commit: &appsv1.CommitMetadata{
|
||||
@@ -97,16 +101,15 @@ func TestWriteForPaths(t *testing.T) {
|
||||
topMetadataBytes, err := os.ReadFile(topMetadataPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedSubject, expectedBody, _ := strings.Cut(metadata.Message, "\n\n")
|
||||
|
||||
var topMetadata hydratorMetadataFile
|
||||
err = json.Unmarshal(topMetadataBytes, &topMetadata)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, repoURL, topMetadata.RepoURL)
|
||||
assert.Equal(t, drySha, topMetadata.DrySHA)
|
||||
assert.Equal(t, metadata.Author, topMetadata.Author)
|
||||
assert.Equal(t, expectedSubject, topMetadata.Subject)
|
||||
assert.Equal(t, expectedBody, topMetadata.Body)
|
||||
assert.Equal(t, "test-message", topMetadata.Subject)
|
||||
// The body should exclude the Argocd- trailers.
|
||||
assert.Equal(t, "Signed-off-by: Test User <test@example.com>\n", topMetadata.Body)
|
||||
assert.Equal(t, metadata.Date.Format(time.RFC3339), topMetadata.Date)
|
||||
assert.Equal(t, metadata.References, topMetadata.References)
|
||||
|
||||
@@ -142,7 +145,7 @@ func TestWriteForPaths(t *testing.T) {
|
||||
func TestWriteMetadata(t *testing.T) {
|
||||
root := tempRoot(t)
|
||||
|
||||
metadata := hydratorMetadataFile{
|
||||
metadata := hydrator.HydratorCommitMetadata{
|
||||
RepoURL: "https://github.com/example/repo",
|
||||
DrySHA: "abc123",
|
||||
}
|
||||
@@ -154,7 +157,7 @@ func TestWriteMetadata(t *testing.T) {
|
||||
metadataBytes, err := os.ReadFile(metadataPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
var readMetadata hydratorMetadataFile
|
||||
var readMetadata hydrator.HydratorCommitMetadata
|
||||
err = json.Unmarshal(metadataBytes, &readMetadata)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, metadata, readMetadata)
|
||||
@@ -169,7 +172,7 @@ func TestWriteReadme(t *testing.T) {
|
||||
hash := sha256.Sum256(randomData)
|
||||
sha := hex.EncodeToString(hash[:])
|
||||
|
||||
metadata := hydratorMetadataFile{
|
||||
metadata := hydrator.HydratorCommitMetadata{
|
||||
RepoURL: "https://github.com/example/repo",
|
||||
DrySHA: "abc123",
|
||||
References: []appsv1.RevisionReference{
|
||||
@@ -221,3 +224,16 @@ func TestWriteManifests(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, string(manifestBytes), "kind")
|
||||
}
|
||||
|
||||
func TestWriteGitAttributes(t *testing.T) {
|
||||
root := tempRoot(t)
|
||||
|
||||
err := writeGitAttributes(root)
|
||||
require.NoError(t, err)
|
||||
|
||||
gitAttributesPath := filepath.Join(root.Name(), ".gitattributes")
|
||||
gitAttributesBytes, err := os.ReadFile(gitAttributesPath)
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, string(gitAttributesBytes), "*/README.md linguist-generated=true")
|
||||
assert.Contains(t, string(gitAttributesBytes), "*/hydrator.metadata linguist-generated=true")
|
||||
}
|
||||
|
||||
@@ -100,6 +100,12 @@ const (
|
||||
PluginConfigFileName = "plugin.yaml"
|
||||
)
|
||||
|
||||
// consts for podrequests metrics in cache/info
|
||||
const (
|
||||
PodRequestsCPU = "cpu"
|
||||
PodRequestsMEM = "memory"
|
||||
)
|
||||
|
||||
// Argo CD application related constants
|
||||
const (
|
||||
|
||||
@@ -186,6 +192,8 @@ const (
|
||||
LabelValueSecretTypeRepoCreds = "repo-creds"
|
||||
// LabelValueSecretTypeRepositoryWrite indicates a secret type of repository credentials for writing
|
||||
LabelValueSecretTypeRepositoryWrite = "repository-write"
|
||||
// LabelValueSecretTypeRepoCredsWrite indicates a secret type of repository credentials for writing for templating
|
||||
LabelValueSecretTypeRepoCredsWrite = "repo-write-creds"
|
||||
// LabelValueSecretTypeSCMCreds indicates a secret type of SCM credentials
|
||||
LabelValueSecretTypeSCMCreds = "scm-creds"
|
||||
|
||||
|
||||
82
common/version_test.go
Normal file
82
common/version_test.go
Normal file
@@ -0,0 +1,82 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestGetVersion(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
inputGitCommit string
|
||||
inputGitTag string
|
||||
inputTreeState string
|
||||
inputVersion string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "Official release with tag and clean state",
|
||||
inputGitCommit: "abcdef123456",
|
||||
inputGitTag: "v1.2.3",
|
||||
inputTreeState: "clean",
|
||||
inputVersion: "1.2.3",
|
||||
expected: "v1.2.3",
|
||||
},
|
||||
{
|
||||
name: "Dirty state with commit",
|
||||
inputGitCommit: "deadbeefcafebabe",
|
||||
inputGitTag: "",
|
||||
inputTreeState: "dirty",
|
||||
inputVersion: "2.0.1",
|
||||
expected: "v2.0.1+deadbee.dirty",
|
||||
},
|
||||
{
|
||||
name: "Clean state with commit, no tag",
|
||||
inputGitCommit: "cafebabedeadbeef",
|
||||
inputGitTag: "",
|
||||
inputTreeState: "clean",
|
||||
inputVersion: "2.1.0",
|
||||
expected: "v2.1.0+cafebab",
|
||||
},
|
||||
{
|
||||
name: "Missing commit and tag",
|
||||
inputGitCommit: "",
|
||||
inputGitTag: "",
|
||||
inputTreeState: "clean",
|
||||
inputVersion: "3.1.0",
|
||||
expected: "v3.1.0+unknown",
|
||||
},
|
||||
{
|
||||
name: "Short commit",
|
||||
inputGitCommit: "abc",
|
||||
inputGitTag: "",
|
||||
inputTreeState: "clean",
|
||||
inputVersion: "4.0.0",
|
||||
expected: "v4.0.0+unknown",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
gitCommit = tt.inputGitCommit
|
||||
gitTag = tt.inputGitTag
|
||||
gitTreeState = tt.inputTreeState
|
||||
version = tt.inputVersion
|
||||
|
||||
buildDate = "2025-06-26"
|
||||
kubectlVersion = "v1.30.0"
|
||||
extraBuildInfo = "test-build"
|
||||
|
||||
got := GetVersion()
|
||||
assert.Equal(t, tt.expected, got.Version)
|
||||
assert.Equal(t, buildDate, got.BuildDate)
|
||||
assert.Equal(t, tt.inputGitCommit, got.GitCommit)
|
||||
assert.Equal(t, tt.inputGitTag, got.GitTag)
|
||||
assert.Equal(t, tt.inputTreeState, got.GitTreeState)
|
||||
assert.Equal(t, runtime.Version(), got.GoVersion)
|
||||
assert.Equal(t, runtime.Compiler, got.Compiler)
|
||||
assert.Equal(t, runtime.GOOS+"/"+runtime.GOARCH, got.Platform)
|
||||
assert.Equal(t, kubectlVersion, got.KubectlVersion)
|
||||
assert.Equal(t, extraBuildInfo, got.ExtraBuildInfo)
|
||||
}
|
||||
}
|
||||
@@ -47,6 +47,7 @@ import (
|
||||
"github.com/argoproj/argo-cd/v3/common"
|
||||
statecache "github.com/argoproj/argo-cd/v3/controller/cache"
|
||||
"github.com/argoproj/argo-cd/v3/controller/hydrator"
|
||||
hydratortypes "github.com/argoproj/argo-cd/v3/controller/hydrator/types"
|
||||
"github.com/argoproj/argo-cd/v3/controller/metrics"
|
||||
"github.com/argoproj/argo-cd/v3/controller/sharding"
|
||||
"github.com/argoproj/argo-cd/v3/pkg/apis/application"
|
||||
@@ -115,7 +116,7 @@ type ApplicationController struct {
|
||||
appOperationQueue workqueue.TypedRateLimitingInterface[string]
|
||||
projectRefreshQueue workqueue.TypedRateLimitingInterface[string]
|
||||
appHydrateQueue workqueue.TypedRateLimitingInterface[string]
|
||||
hydrationQueue workqueue.TypedRateLimitingInterface[hydrator.HydrationQueueKey]
|
||||
hydrationQueue workqueue.TypedRateLimitingInterface[hydratortypes.HydrationQueueKey]
|
||||
appInformer cache.SharedIndexInformer
|
||||
appLister applisters.ApplicationLister
|
||||
projInformer cache.SharedIndexInformer
|
||||
@@ -125,7 +126,7 @@ type ApplicationController struct {
|
||||
statusHardRefreshTimeout time.Duration
|
||||
statusRefreshJitter time.Duration
|
||||
selfHealTimeout time.Duration
|
||||
selfHealBackOff *wait.Backoff
|
||||
selfHealBackoff *wait.Backoff
|
||||
selfHealBackoffCooldown time.Duration
|
||||
syncTimeout time.Duration
|
||||
db db.ArgoDB
|
||||
@@ -198,7 +199,7 @@ func NewApplicationController(
|
||||
projectRefreshQueue: workqueue.NewTypedRateLimitingQueueWithConfig(ratelimiter.NewCustomAppControllerRateLimiter[string](rateLimiterConfig), workqueue.TypedRateLimitingQueueConfig[string]{Name: "project_reconciliation_queue"}),
|
||||
appComparisonTypeRefreshQueue: workqueue.NewTypedRateLimitingQueue(ratelimiter.NewCustomAppControllerRateLimiter[string](rateLimiterConfig)),
|
||||
appHydrateQueue: workqueue.NewTypedRateLimitingQueueWithConfig(ratelimiter.NewCustomAppControllerRateLimiter[string](rateLimiterConfig), workqueue.TypedRateLimitingQueueConfig[string]{Name: "app_hydration_queue"}),
|
||||
hydrationQueue: workqueue.NewTypedRateLimitingQueueWithConfig(ratelimiter.NewCustomAppControllerRateLimiter[hydrator.HydrationQueueKey](rateLimiterConfig), workqueue.TypedRateLimitingQueueConfig[hydrator.HydrationQueueKey]{Name: "manifest_hydration_queue"}),
|
||||
hydrationQueue: workqueue.NewTypedRateLimitingQueueWithConfig(ratelimiter.NewCustomAppControllerRateLimiter[hydratortypes.HydrationQueueKey](rateLimiterConfig), workqueue.TypedRateLimitingQueueConfig[hydratortypes.HydrationQueueKey]{Name: "manifest_hydration_queue"}),
|
||||
db: db,
|
||||
statusRefreshTimeout: appResyncPeriod,
|
||||
statusHardRefreshTimeout: appHardResyncPeriod,
|
||||
@@ -208,7 +209,7 @@ func NewApplicationController(
|
||||
auditLogger: argo.NewAuditLogger(kubeClientset, common.ApplicationController, enableK8sEvent),
|
||||
settingsMgr: settingsMgr,
|
||||
selfHealTimeout: selfHealTimeout,
|
||||
selfHealBackOff: selfHealBackoff,
|
||||
selfHealBackoff: selfHealBackoff,
|
||||
selfHealBackoffCooldown: selfHealBackoffCooldown,
|
||||
syncTimeout: syncTimeout,
|
||||
clusterSharding: clusterSharding,
|
||||
@@ -328,7 +329,7 @@ func NewApplicationController(
|
||||
}
|
||||
}
|
||||
stateCache := statecache.NewLiveStateCache(db, appInformer, ctrl.settingsMgr, ctrl.metricsServer, ctrl.handleObjectUpdated, clusterSharding, argo.NewResourceTracking())
|
||||
appStateManager := NewAppStateManager(db, applicationClientset, repoClientset, namespace, kubectl, ctrl.onKubectlRun, ctrl.settingsMgr, stateCache, projInformer, ctrl.metricsServer, argoCache, ctrl.statusRefreshTimeout, argo.NewResourceTracking(), persistResourceHealth, repoErrorGracePeriod, serverSideDiff, ignoreNormalizerOpts)
|
||||
appStateManager := NewAppStateManager(db, applicationClientset, repoClientset, namespace, kubectl, ctrl.onKubectlRun, ctrl.settingsMgr, stateCache, ctrl.metricsServer, argoCache, ctrl.statusRefreshTimeout, argo.NewResourceTracking(), persistResourceHealth, repoErrorGracePeriod, serverSideDiff, ignoreNormalizerOpts)
|
||||
ctrl.appInformer = appInformer
|
||||
ctrl.appLister = appLister
|
||||
ctrl.projInformer = projInformer
|
||||
@@ -602,6 +603,9 @@ func (ctrl *ApplicationController) getResourceTree(destCluster *appv1.Cluster, a
|
||||
Group: managedResource.Group,
|
||||
Namespace: managedResource.Namespace,
|
||||
},
|
||||
Health: &appv1.HealthStatus{
|
||||
Status: health.HealthStatusMissing,
|
||||
},
|
||||
})
|
||||
} else {
|
||||
managedResourcesKeys = append(managedResourcesKeys, kube.GetResourceKey(live))
|
||||
@@ -1202,7 +1206,7 @@ func (ctrl *ApplicationController) finalizeApplicationDeletion(app *appv1.Applic
|
||||
if err != nil {
|
||||
logCtx.Warnf("Unable to get destination cluster: %v", err)
|
||||
app.UnSetCascadedDeletion()
|
||||
app.UnSetPostDeleteFinalizer()
|
||||
app.UnSetPostDeleteFinalizerAll()
|
||||
if err := ctrl.updateFinalizers(app); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1391,42 +1395,55 @@ func (ctrl *ApplicationController) processRequestedAppOperation(app *appv1.Appli
|
||||
logCtx = logCtx.WithField("time_ms", time.Since(ts.StartTime).Milliseconds())
|
||||
logCtx.Debug("Finished processing requested app operation")
|
||||
}()
|
||||
terminating := false
|
||||
terminatingCause := ""
|
||||
if isOperationInProgress(app) {
|
||||
state = app.Status.OperationState.DeepCopy()
|
||||
terminating = state.Phase == synccommon.OperationTerminating
|
||||
// Failed operation with retry strategy might have be in-progress and has completion time
|
||||
switch {
|
||||
case state.FinishedAt != nil && !terminating:
|
||||
case state.Phase == synccommon.OperationTerminating:
|
||||
logCtx.Infof("Resuming in-progress operation. phase: %s, message: %s", state.Phase, state.Message)
|
||||
case ctrl.syncTimeout != time.Duration(0) && time.Now().After(state.StartedAt.Add(ctrl.syncTimeout)):
|
||||
state.Phase = synccommon.OperationTerminating
|
||||
state.Message = "operation is terminating due to timeout"
|
||||
terminatingCause = "controller sync timeout"
|
||||
ctrl.setOperationState(app, state)
|
||||
logCtx.Infof("Terminating in-progress operation due to timeout. Started at: %v, timeout: %v", state.StartedAt, ctrl.syncTimeout)
|
||||
case state.Phase == synccommon.OperationRunning && state.FinishedAt != nil:
|
||||
// Failed operation with retry strategy might be in-progress and has completion time
|
||||
retryAt, err := app.Status.OperationState.Operation.Retry.NextRetryAt(state.FinishedAt.Time, state.RetryCount)
|
||||
if err != nil {
|
||||
state.Phase = synccommon.OperationFailed
|
||||
state.Phase = synccommon.OperationError
|
||||
state.Message = err.Error()
|
||||
ctrl.setOperationState(app, state)
|
||||
return
|
||||
}
|
||||
retryAfter := time.Until(retryAt)
|
||||
|
||||
if retryAfter > 0 {
|
||||
logCtx.Infof("Skipping retrying in-progress operation. Attempting again at: %s", retryAt.Format(time.RFC3339))
|
||||
ctrl.requestAppRefresh(app.QualifiedName(), CompareWithLatest.Pointer(), &retryAfter)
|
||||
return
|
||||
}
|
||||
// retrying operation. remove previous failure time in app since it is used as a trigger
|
||||
// that previous failed and operation should be retried
|
||||
state.FinishedAt = nil
|
||||
ctrl.setOperationState(app, state)
|
||||
|
||||
// Remove the desired revisions if the sync failed and we are retrying. The latest revision from the source will be used.
|
||||
extraMsg := ""
|
||||
if state.Operation.Retry.Refresh {
|
||||
extraMsg += " with latest revisions"
|
||||
state.Operation.Sync.Revision = ""
|
||||
state.Operation.Sync.Revisions = nil
|
||||
}
|
||||
|
||||
// Get rid of sync results and null out previous operation completion time
|
||||
// This will start the retry attempt
|
||||
state.Message = fmt.Sprintf("Retrying operation%s. Attempt #%d", extraMsg, state.RetryCount)
|
||||
state.FinishedAt = nil
|
||||
state.SyncResult = nil
|
||||
case ctrl.syncTimeout != time.Duration(0) && time.Now().After(state.StartedAt.Add(ctrl.syncTimeout)) && !terminating:
|
||||
state.Phase = synccommon.OperationTerminating
|
||||
state.Message = "operation is terminating due to timeout"
|
||||
ctrl.setOperationState(app, state)
|
||||
logCtx.Infof("Terminating in-progress operation due to timeout. Started at: %v, timeout: %v", state.StartedAt, ctrl.syncTimeout)
|
||||
logCtx.Infof("Retrying operation%s. Attempt #%d", extraMsg, state.RetryCount)
|
||||
default:
|
||||
logCtx.Infof("Resuming in-progress operation. phase: %s, message: %s", state.Phase, state.Message)
|
||||
}
|
||||
} else {
|
||||
state = &appv1.OperationState{Phase: synccommon.OperationRunning, Operation: *app.Operation, StartedAt: metav1.Now()}
|
||||
state = NewOperationState(*app.Operation)
|
||||
ctrl.setOperationState(app, state)
|
||||
if ctrl.syncTimeout != time.Duration(0) {
|
||||
// Schedule a check during which the timeout would be checked.
|
||||
@@ -1436,22 +1453,16 @@ func (ctrl *ApplicationController) processRequestedAppOperation(app *appv1.Appli
|
||||
}
|
||||
ts.AddCheckpoint("initial_operation_stage_ms")
|
||||
|
||||
// Call GetDestinationCluster to validate the destination cluster.
|
||||
if _, err := argo.GetDestinationCluster(context.Background(), app.Spec.Destination, ctrl.db); err != nil {
|
||||
state.Phase = synccommon.OperationFailed
|
||||
state.Message = err.Error()
|
||||
terminating := state.Phase == synccommon.OperationTerminating
|
||||
project, err := ctrl.getAppProj(app)
|
||||
if err == nil {
|
||||
// Start or resume the sync
|
||||
ctrl.appStateManager.SyncAppState(app, project, state)
|
||||
} else {
|
||||
ctrl.appStateManager.SyncAppState(app, state)
|
||||
}
|
||||
ts.AddCheckpoint("validate_and_sync_app_state_ms")
|
||||
|
||||
// Check whether application is allowed to use project
|
||||
_, err := ctrl.getAppProj(app)
|
||||
ts.AddCheckpoint("get_app_proj_ms")
|
||||
if err != nil {
|
||||
state.Phase = synccommon.OperationError
|
||||
state.Message = err.Error()
|
||||
state.Message = fmt.Sprintf("Failed to load application project: %v", err)
|
||||
}
|
||||
ts.AddCheckpoint("sync_app_state_ms")
|
||||
|
||||
switch state.Phase {
|
||||
case synccommon.OperationRunning:
|
||||
@@ -1459,12 +1470,6 @@ func (ctrl *ApplicationController) processRequestedAppOperation(app *appv1.Appli
|
||||
// to clobber the Terminated state with Running. Get the latest app state to check for this.
|
||||
freshApp, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(app.Namespace).Get(context.Background(), app.Name, metav1.GetOptions{})
|
||||
if err == nil {
|
||||
// App may have lost permissions to use the project meanwhile.
|
||||
_, err = ctrl.getAppProj(freshApp)
|
||||
if err != nil {
|
||||
state.Phase = synccommon.OperationFailed
|
||||
state.Message = fmt.Sprintf("operation not allowed: %v", err)
|
||||
}
|
||||
if freshApp.Status.OperationState != nil && freshApp.Status.OperationState.Phase == synccommon.OperationTerminating {
|
||||
state.Phase = synccommon.OperationTerminating
|
||||
state.Message = "operation is terminating"
|
||||
@@ -1476,17 +1481,24 @@ func (ctrl *ApplicationController) processRequestedAppOperation(app *appv1.Appli
|
||||
case synccommon.OperationFailed, synccommon.OperationError:
|
||||
if !terminating && (state.RetryCount < state.Operation.Retry.Limit || state.Operation.Retry.Limit < 0) {
|
||||
now := metav1.Now()
|
||||
state.FinishedAt = &now
|
||||
if retryAt, err := state.Operation.Retry.NextRetryAt(now.Time, state.RetryCount); err != nil {
|
||||
state.Phase = synccommon.OperationFailed
|
||||
state.Phase = synccommon.OperationError
|
||||
state.Message = fmt.Sprintf("%s (failed to retry: %v)", state.Message, err)
|
||||
} else {
|
||||
// Set FinishedAt explicitly on a Running phase. This is a unique condition that will allow this
|
||||
// function to perform a retry the next time the operation is processed.
|
||||
state.Phase = synccommon.OperationRunning
|
||||
state.FinishedAt = &now
|
||||
state.RetryCount++
|
||||
state.Message = fmt.Sprintf("%s. Retrying attempt #%d at %s.", state.Message, state.RetryCount, retryAt.Format(time.Kitchen))
|
||||
}
|
||||
} else if state.RetryCount > 0 {
|
||||
state.Message = fmt.Sprintf("%s (retried %d times).", state.Message, state.RetryCount)
|
||||
} else {
|
||||
if terminating && terminatingCause != "" {
|
||||
state.Message = fmt.Sprintf("%s, triggered by %s", state.Message, terminatingCause)
|
||||
}
|
||||
if state.RetryCount > 0 {
|
||||
state.Message = fmt.Sprintf("%s (retried %d times).", state.Message, state.RetryCount)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1760,7 +1772,7 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
}
|
||||
|
||||
compareResult, err := ctrl.appStateManager.CompareAppState(app, project, revisions, sources, refreshType == appv1.RefreshTypeHard, comparisonLevel == CompareWithLatestForceResolve, localManifests, hasMultipleSources, false)
|
||||
compareResult, err := ctrl.appStateManager.CompareAppState(app, project, revisions, sources, refreshType == appv1.RefreshTypeHard, comparisonLevel == CompareWithLatestForceResolve, localManifests, hasMultipleSources)
|
||||
|
||||
ts.AddCheckpoint("compare_app_state_ms")
|
||||
|
||||
@@ -1786,7 +1798,7 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo
|
||||
|
||||
canSync, _ := project.Spec.SyncWindows.Matches(app).CanSync(false)
|
||||
if canSync {
|
||||
syncErrCond, opDuration := ctrl.autoSync(app, compareResult.syncStatus, compareResult.resources, compareResult.revisionUpdated)
|
||||
syncErrCond, opDuration := ctrl.autoSync(app, compareResult.syncStatus, compareResult.resources, compareResult.revisionsMayHaveChanges)
|
||||
setOpDuration = opDuration
|
||||
if syncErrCond != nil {
|
||||
app.Status.SetConditions(
|
||||
@@ -2081,7 +2093,7 @@ func (ctrl *ApplicationController) persistAppStatus(orig *appv1.Application, new
|
||||
}
|
||||
|
||||
// autoSync will initiate a sync operation for an application configured with automated sync
|
||||
func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus *appv1.SyncStatus, resources []appv1.ResourceStatus, revisionUpdated bool) (*appv1.ApplicationCondition, time.Duration) {
|
||||
func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus *appv1.SyncStatus, resources []appv1.ResourceStatus, shouldCompareRevisions bool) (*appv1.ApplicationCondition, time.Duration) {
|
||||
logCtx := log.WithFields(applog.GetAppLogFields(app))
|
||||
ts := stats.NewTimingStats()
|
||||
defer func() {
|
||||
@@ -2125,65 +2137,70 @@ func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus *
|
||||
}
|
||||
}
|
||||
|
||||
selfHeal := app.Spec.SyncPolicy.Automated.SelfHeal
|
||||
// Multi-Source Apps with selfHeal disabled should not trigger an autosync if
|
||||
// the last sync revision and the new sync revision is the same.
|
||||
if app.Spec.HasMultipleSources() && !selfHeal && reflect.DeepEqual(app.Status.Sync.Revisions, syncStatus.Revisions) {
|
||||
logCtx.Infof("Skipping auto-sync: selfHeal disabled and sync caused by object update")
|
||||
return nil, 0
|
||||
source := ptr.To(app.Spec.GetSource())
|
||||
desiredRevisions := []string{syncStatus.Revision}
|
||||
if app.Spec.HasMultipleSources() {
|
||||
source = nil
|
||||
desiredRevisions = syncStatus.Revisions
|
||||
}
|
||||
|
||||
desiredCommitSHA := syncStatus.Revision
|
||||
desiredCommitSHAsMS := syncStatus.Revisions
|
||||
alreadyAttempted, attemptPhase := alreadyAttemptedSync(app, desiredCommitSHA, desiredCommitSHAsMS, app.Spec.HasMultipleSources(), revisionUpdated)
|
||||
ts.AddCheckpoint("already_attempted_sync_ms")
|
||||
op := appv1.Operation{
|
||||
Sync: &appv1.SyncOperation{
|
||||
Revision: desiredCommitSHA,
|
||||
Source: source,
|
||||
Revision: syncStatus.Revision,
|
||||
Prune: app.Spec.SyncPolicy.Automated.Prune,
|
||||
SyncOptions: app.Spec.SyncPolicy.SyncOptions,
|
||||
Revisions: desiredCommitSHAsMS,
|
||||
Sources: app.Spec.Sources,
|
||||
Revisions: syncStatus.Revisions,
|
||||
},
|
||||
InitiatedBy: appv1.OperationInitiator{Automated: true},
|
||||
Retry: appv1.RetryStrategy{Limit: 5},
|
||||
}
|
||||
|
||||
if app.Spec.SyncPolicy.Retry != nil {
|
||||
op.Retry = *app.Spec.SyncPolicy.Retry
|
||||
}
|
||||
|
||||
// It is possible for manifests to remain OutOfSync even after a sync/kubectl apply (e.g.
|
||||
// auto-sync with pruning disabled). We need to ensure that we do not keep Syncing an
|
||||
// application in an infinite loop. To detect this, we only attempt the Sync if the revision
|
||||
// and parameter overrides are different from our most recent sync operation.
|
||||
if alreadyAttempted && (!selfHeal || !attemptPhase.Successful()) {
|
||||
if !attemptPhase.Successful() {
|
||||
logCtx.Warnf("Skipping auto-sync: failed previous sync attempt to %s", desiredCommitSHA)
|
||||
message := fmt.Sprintf("Failed sync attempt to %s: %s", desiredCommitSHA, app.Status.OperationState.Message)
|
||||
alreadyAttempted, lastAttemptedRevisions, lastAttemptedPhase := alreadyAttemptedSync(app, desiredRevisions, shouldCompareRevisions)
|
||||
ts.AddCheckpoint("already_attempted_sync_ms")
|
||||
if alreadyAttempted {
|
||||
if !lastAttemptedPhase.Successful() {
|
||||
logCtx.Warnf("Skipping auto-sync: failed previous sync attempt to %s and will not retry for %s", lastAttemptedRevisions, desiredRevisions)
|
||||
message := fmt.Sprintf("Failed last sync attempt to %s: %s", lastAttemptedRevisions, app.Status.OperationState.Message)
|
||||
return &appv1.ApplicationCondition{Type: appv1.ApplicationConditionSyncError, Message: message}, 0
|
||||
}
|
||||
logCtx.Infof("Skipping auto-sync: most recent sync already to %s", desiredCommitSHA)
|
||||
return nil, 0
|
||||
} else if selfHeal {
|
||||
shouldSelfHeal, retryAfter := ctrl.shouldSelfHeal(app, alreadyAttempted)
|
||||
if app.Status.OperationState != nil && app.Status.OperationState.Operation.Sync != nil {
|
||||
op.Sync.SelfHealAttemptsCount = app.Status.OperationState.Operation.Sync.SelfHealAttemptsCount
|
||||
if !app.Spec.SyncPolicy.Automated.SelfHeal {
|
||||
logCtx.Infof("Skipping auto-sync: most recent sync already to %s", desiredRevisions)
|
||||
return nil, 0
|
||||
}
|
||||
// Self heal will trigger a new sync operation when the desired state changes and cause the application to
|
||||
// be OutOfSync when it was previously synced Successfully. This means SelfHeal should only ever be attempted
|
||||
// when the revisions have not changed, and where the previous sync to these revision was successful
|
||||
|
||||
// Only carry SelfHealAttemptsCount to be increased when the selfHealBackoffCooldown has not elapsed yet
|
||||
if !ctrl.selfHealBackoffCooldownElapsed(app) {
|
||||
if app.Status.OperationState != nil && app.Status.OperationState.Operation.Sync != nil {
|
||||
op.Sync.SelfHealAttemptsCount = app.Status.OperationState.Operation.Sync.SelfHealAttemptsCount
|
||||
}
|
||||
}
|
||||
|
||||
if alreadyAttempted {
|
||||
if !shouldSelfHeal {
|
||||
logCtx.Infof("Skipping auto-sync: already attempted sync to %s with timeout %v (retrying in %v)", desiredCommitSHA, ctrl.selfHealTimeout, retryAfter)
|
||||
ctrl.requestAppRefresh(app.QualifiedName(), CompareWithLatest.Pointer(), &retryAfter)
|
||||
return nil, 0
|
||||
}
|
||||
op.Sync.SelfHealAttemptsCount++
|
||||
for _, resource := range resources {
|
||||
if resource.Status != appv1.SyncStatusCodeSynced {
|
||||
op.Sync.Resources = append(op.Sync.Resources, appv1.SyncOperationResource{
|
||||
Kind: resource.Kind,
|
||||
Group: resource.Group,
|
||||
Name: resource.Name,
|
||||
})
|
||||
}
|
||||
if remainingTime := ctrl.selfHealRemainingBackoff(app, int(op.Sync.SelfHealAttemptsCount)); remainingTime > 0 {
|
||||
logCtx.Infof("Skipping auto-sync: already attempted sync to %s with timeout %v (retrying in %v)", lastAttemptedRevisions, ctrl.selfHealTimeout, remainingTime)
|
||||
ctrl.requestAppRefresh(app.QualifiedName(), CompareWithLatest.Pointer(), &remainingTime)
|
||||
return nil, 0
|
||||
}
|
||||
|
||||
op.Sync.SelfHealAttemptsCount++
|
||||
for _, resource := range resources {
|
||||
if resource.Status != appv1.SyncStatusCodeSynced {
|
||||
op.Sync.Resources = append(op.Sync.Resources, appv1.SyncOperationResource{
|
||||
Kind: resource.Kind,
|
||||
Group: resource.Group,
|
||||
Name: resource.Name,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2197,7 +2214,7 @@ func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus *
|
||||
}
|
||||
}
|
||||
if bAllNeedPrune {
|
||||
message := fmt.Sprintf("Skipping sync attempt to %s: auto-sync will wipe out all resources", desiredCommitSHA)
|
||||
message := fmt.Sprintf("Skipping sync attempt to %s: auto-sync will wipe out all resources", desiredRevisions)
|
||||
logCtx.Warn(message)
|
||||
return &appv1.ApplicationCondition{Type: appv1.ApplicationConditionSyncError, Message: message}, 0
|
||||
}
|
||||
@@ -2213,62 +2230,65 @@ func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus *
|
||||
if stderrors.Is(err, argo.ErrAnotherOperationInProgress) {
|
||||
// skipping auto-sync because another operation is in progress and was not noticed due to stale data in informer
|
||||
// it is safe to skip auto-sync because it is already running
|
||||
logCtx.Warnf("Failed to initiate auto-sync to %s: %v", desiredCommitSHA, err)
|
||||
logCtx.Warnf("Failed to initiate auto-sync to %s: %v", desiredRevisions, err)
|
||||
return nil, 0
|
||||
}
|
||||
|
||||
logCtx.Errorf("Failed to initiate auto-sync to %s: %v", desiredCommitSHA, err)
|
||||
logCtx.Errorf("Failed to initiate auto-sync to %s: %v", desiredRevisions, err)
|
||||
return &appv1.ApplicationCondition{Type: appv1.ApplicationConditionSyncError, Message: err.Error()}, setOpTime
|
||||
}
|
||||
ctrl.writeBackToInformer(updatedApp)
|
||||
ts.AddCheckpoint("write_back_to_informer_ms")
|
||||
|
||||
var target string
|
||||
if updatedApp.Spec.HasMultipleSources() {
|
||||
target = strings.Join(desiredCommitSHAsMS, ", ")
|
||||
} else {
|
||||
target = desiredCommitSHA
|
||||
}
|
||||
message := fmt.Sprintf("Initiated automated sync to '%s'", target)
|
||||
message := fmt.Sprintf("Initiated automated sync to %s", desiredRevisions)
|
||||
ctrl.logAppEvent(context.TODO(), app, argo.EventInfo{Reason: argo.EventReasonOperationStarted, Type: corev1.EventTypeNormal}, message)
|
||||
logCtx.Info(message)
|
||||
return nil, setOpTime
|
||||
}
|
||||
|
||||
// alreadyAttemptedSync returns whether the most recent sync was performed against the
|
||||
// commitSHA and with the same app source config which are currently set in the app.
|
||||
func alreadyAttemptedSync(app *appv1.Application, commitSHA string, commitSHAsMS []string, hasMultipleSources bool, revisionUpdated bool) (bool, synccommon.OperationPhase) {
|
||||
if app.Status.OperationState == nil || app.Status.OperationState.Operation.Sync == nil || app.Status.OperationState.SyncResult == nil {
|
||||
return false, ""
|
||||
// alreadyAttemptedSync returns whether the most recently synced revision(s) exactly match the given desiredRevisions
|
||||
// and for the same application source. If the revision(s) have changed or the Application source configuration has been updated,
|
||||
// it will return false, indicating that a new sync should be attempted.
|
||||
// When newRevisionHasChanges is false, due to commits not having direct changes on the application, it will not compare the revision(s), but only the sources.
|
||||
// It also returns the last synced revisions if any, and the result of that last sync operation.
|
||||
func alreadyAttemptedSync(app *appv1.Application, desiredRevisions []string, newRevisionHasChanges bool) (bool, []string, synccommon.OperationPhase) {
|
||||
if app.Status.OperationState == nil {
|
||||
// The operation state may be removed when new operations are triggered
|
||||
return false, []string{}, ""
|
||||
}
|
||||
if hasMultipleSources {
|
||||
if revisionUpdated {
|
||||
if !reflect.DeepEqual(app.Status.OperationState.SyncResult.Revisions, commitSHAsMS) {
|
||||
return false, ""
|
||||
if app.Status.OperationState.SyncResult == nil {
|
||||
// If the sync has completed without result, it is very likely that an error happened
|
||||
// We don't want to resync with auto-sync indefinitely. We should have retried the configured amount of time already
|
||||
// In this case, a manual action to restore the app may be required
|
||||
log.WithFields(applog.GetAppLogFields(app)).Warn("Already attempted sync: sync does not have any results")
|
||||
return app.Status.OperationState.Phase.Completed(), []string{}, app.Status.OperationState.Phase
|
||||
}
|
||||
|
||||
if newRevisionHasChanges {
|
||||
log.WithFields(applog.GetAppLogFields(app)).Infof("Already attempted sync: comparing synced revisions to %s", desiredRevisions)
|
||||
if app.Spec.HasMultipleSources() {
|
||||
if !reflect.DeepEqual(app.Status.OperationState.SyncResult.Revisions, desiredRevisions) {
|
||||
return false, app.Status.OperationState.SyncResult.Revisions, app.Status.OperationState.Phase
|
||||
}
|
||||
} else {
|
||||
log.WithFields(applog.GetAppLogFields(app)).Debugf("Skipping auto-sync: commitSHA %s has no changes", commitSHA)
|
||||
if len(desiredRevisions) != 1 || app.Status.OperationState.SyncResult.Revision != desiredRevisions[0] {
|
||||
return false, []string{app.Status.OperationState.SyncResult.Revision}, app.Status.OperationState.Phase
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if revisionUpdated {
|
||||
log.WithFields(applog.GetAppLogFields(app)).Infof("Executing compare of syncResult.Revision and commitSha because manifest changed: %v", commitSHA)
|
||||
if app.Status.OperationState.SyncResult.Revision != commitSHA {
|
||||
return false, ""
|
||||
}
|
||||
} else {
|
||||
log.WithFields(applog.GetAppLogFields(app)).Debugf("Skipping auto-sync: commitSHA %s has no changes", commitSHA)
|
||||
}
|
||||
log.WithFields(applog.GetAppLogFields(app)).Debugf("Already attempted sync: revisions %s have no changes", desiredRevisions)
|
||||
}
|
||||
|
||||
if hasMultipleSources {
|
||||
return reflect.DeepEqual(app.Spec.Sources, app.Status.OperationState.SyncResult.Sources), app.Status.OperationState.Phase
|
||||
log.WithFields(applog.GetAppLogFields(app)).Debug("Already attempted sync: comparing sources")
|
||||
if app.Spec.HasMultipleSources() {
|
||||
return reflect.DeepEqual(app.Spec.Sources, app.Status.OperationState.SyncResult.Sources), app.Status.OperationState.SyncResult.Revisions, app.Status.OperationState.Phase
|
||||
}
|
||||
return reflect.DeepEqual(app.Spec.GetSource(), app.Status.OperationState.SyncResult.Source), app.Status.OperationState.Phase
|
||||
return reflect.DeepEqual(app.Spec.GetSource(), app.Status.OperationState.SyncResult.Source), []string{app.Status.OperationState.SyncResult.Revision}, app.Status.OperationState.Phase
|
||||
}
|
||||
|
||||
func (ctrl *ApplicationController) shouldSelfHeal(app *appv1.Application, alreadyAttempted bool) (bool, time.Duration) {
|
||||
func (ctrl *ApplicationController) selfHealRemainingBackoff(app *appv1.Application, selfHealAttemptsCount int) time.Duration {
|
||||
if app.Status.OperationState == nil {
|
||||
return true, time.Duration(0)
|
||||
return time.Duration(0)
|
||||
}
|
||||
|
||||
var timeSinceOperation *time.Duration
|
||||
@@ -2276,34 +2296,41 @@ func (ctrl *ApplicationController) shouldSelfHeal(app *appv1.Application, alread
|
||||
timeSinceOperation = ptr.To(time.Since(app.Status.OperationState.FinishedAt.Time))
|
||||
}
|
||||
|
||||
// Reset counter if the prior sync was successful and the cooldown period is over OR if the revision has changed
|
||||
if !alreadyAttempted || (timeSinceOperation != nil && *timeSinceOperation >= ctrl.selfHealBackoffCooldown && app.Status.Sync.Status == appv1.SyncStatusCodeSynced) {
|
||||
app.Status.OperationState.Operation.Sync.SelfHealAttemptsCount = 0
|
||||
}
|
||||
|
||||
var retryAfter time.Duration
|
||||
if ctrl.selfHealBackOff == nil {
|
||||
if ctrl.selfHealBackoff == nil {
|
||||
if timeSinceOperation == nil {
|
||||
retryAfter = ctrl.selfHealTimeout
|
||||
} else {
|
||||
retryAfter = ctrl.selfHealTimeout - *timeSinceOperation
|
||||
}
|
||||
} else {
|
||||
backOff := *ctrl.selfHealBackOff
|
||||
backOff.Steps = int(app.Status.OperationState.Operation.Sync.SelfHealAttemptsCount)
|
||||
backOff := *ctrl.selfHealBackoff
|
||||
backOff.Steps = selfHealAttemptsCount
|
||||
var delay time.Duration
|
||||
steps := backOff.Steps
|
||||
for i := 0; i < steps; i++ {
|
||||
delay = backOff.Step()
|
||||
}
|
||||
|
||||
if timeSinceOperation == nil {
|
||||
retryAfter = delay
|
||||
} else {
|
||||
retryAfter = delay - *timeSinceOperation
|
||||
}
|
||||
}
|
||||
return retryAfter <= 0, retryAfter
|
||||
return retryAfter
|
||||
}
|
||||
|
||||
// selfHealBackoffCooldownElapsed returns true when the last successful sync has occurred since longer
|
||||
// than then self heal cooldown. This means that the application has been in sync for long enough to
|
||||
// reset the self healing backoff to its initial state
|
||||
func (ctrl *ApplicationController) selfHealBackoffCooldownElapsed(app *appv1.Application) bool {
|
||||
if app.Status.OperationState == nil || app.Status.OperationState.FinishedAt == nil {
|
||||
// Something is in progress, or about to be. In that case, selfHeal attempt should be zero anyway
|
||||
return true
|
||||
}
|
||||
|
||||
timeSinceLastOperation := time.Since(app.Status.OperationState.FinishedAt.Time)
|
||||
return timeSinceLastOperation >= ctrl.selfHealBackoffCooldown && app.Status.OperationState.Phase.Successful()
|
||||
}
|
||||
|
||||
// isAppNamespaceAllowed returns whether the application is allowed in the
|
||||
|
||||
@@ -95,10 +95,10 @@ func (m *MockKubectl) DeleteResource(ctx context.Context, config *rest.Config, g
|
||||
}
|
||||
|
||||
func newFakeController(data *fakeData, repoErr error) *ApplicationController {
|
||||
return newFakeControllerWithResync(data, time.Minute, repoErr)
|
||||
return newFakeControllerWithResync(data, time.Minute, repoErr, nil)
|
||||
}
|
||||
|
||||
func newFakeControllerWithResync(data *fakeData, appResyncPeriod time.Duration, repoErr error) *ApplicationController {
|
||||
func newFakeControllerWithResync(data *fakeData, appResyncPeriod time.Duration, repoErr, revisionPathsErr error) *ApplicationController {
|
||||
var clust corev1.Secret
|
||||
err := yaml.Unmarshal([]byte(fakeCluster), &clust)
|
||||
if err != nil {
|
||||
@@ -124,7 +124,11 @@ func newFakeControllerWithResync(data *fakeData, appResyncPeriod time.Duration,
|
||||
}
|
||||
}
|
||||
|
||||
mockRepoClient.On("UpdateRevisionForPaths", mock.Anything, mock.Anything).Return(data.updateRevisionForPathsResponse, nil)
|
||||
if revisionPathsErr != nil {
|
||||
mockRepoClient.On("UpdateRevisionForPaths", mock.Anything, mock.Anything).Return(nil, revisionPathsErr)
|
||||
} else {
|
||||
mockRepoClient.On("UpdateRevisionForPaths", mock.Anything, mock.Anything).Return(data.updateRevisionForPathsResponse, nil)
|
||||
}
|
||||
|
||||
mockRepoClientset := mockrepoclient.Clientset{RepoServerServiceClient: &mockRepoClient}
|
||||
|
||||
@@ -344,10 +348,13 @@ status:
|
||||
- cccccccccccccccccccccccccccccccccccccccc
|
||||
sources:
|
||||
- path: some/path
|
||||
helm:
|
||||
valueFiles:
|
||||
- $values_test/values.yaml
|
||||
repoURL: https://github.com/argoproj/argocd-example-apps.git
|
||||
- path: some/other/path
|
||||
repoURL: https://github.com/argoproj/argocd-example-apps-fake.git
|
||||
- path: some/other/path
|
||||
- ref: values_test
|
||||
repoURL: https://github.com/argoproj/argocd-example-apps-fake-ref.git
|
||||
`
|
||||
|
||||
@@ -621,13 +628,13 @@ func TestAutoSyncEnabledSetToTrue(t *testing.T) {
|
||||
assert.False(t, app.Operation.Sync.Prune)
|
||||
}
|
||||
|
||||
func TestMultiSourceSelfHeal(t *testing.T) {
|
||||
func TestAutoSyncMultiSourceWithoutSelfHeal(t *testing.T) {
|
||||
// Simulate OutOfSync caused by object change in cluster
|
||||
// So our Sync Revisions and SyncStatus Revisions should deep equal
|
||||
t.Run("ClusterObjectChangeShouldNotTriggerAutoSync", func(t *testing.T) {
|
||||
app := newFakeMultiSourceApp()
|
||||
app.Spec.SyncPolicy.Automated.SelfHeal = false
|
||||
app.Status.Sync.Revisions = []string{"z", "x", "v"}
|
||||
app.Status.OperationState.SyncResult.Revisions = []string{"z", "x", "v"}
|
||||
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}, nil)
|
||||
syncStatus := v1alpha1.SyncStatus{
|
||||
Status: v1alpha1.SyncStatusCodeOutOfSync,
|
||||
@@ -639,15 +646,14 @@ func TestMultiSourceSelfHeal(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, app.Operation)
|
||||
})
|
||||
|
||||
t.Run("NewRevisionChangeShouldTriggerAutoSync", func(t *testing.T) {
|
||||
app := newFakeMultiSourceApp()
|
||||
app.Spec.SyncPolicy.Automated.SelfHeal = false
|
||||
app.Status.Sync.Revisions = []string{"a", "b", "c"}
|
||||
app.Status.OperationState.SyncResult.Revisions = []string{"z", "x", "v"}
|
||||
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}, nil)
|
||||
syncStatus := v1alpha1.SyncStatus{
|
||||
Status: v1alpha1.SyncStatusCodeOutOfSync,
|
||||
Revisions: []string{"z", "x", "v"},
|
||||
Revisions: []string{"a", "b", "c"},
|
||||
}
|
||||
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{{Name: "guestbook-1", Kind: kube.DeploymentKind, Status: v1alpha1.SyncStatusCodeOutOfSync}}, true)
|
||||
assert.Nil(t, cond)
|
||||
@@ -790,6 +796,30 @@ func TestSkipAutoSync(t *testing.T) {
|
||||
assert.Nil(t, app.Operation)
|
||||
})
|
||||
|
||||
t.Run("PreviousSyncAttemptError", func(t *testing.T) {
|
||||
app := newFakeApp()
|
||||
app.Status.OperationState = &v1alpha1.OperationState{
|
||||
Operation: v1alpha1.Operation{
|
||||
Sync: &v1alpha1.SyncOperation{},
|
||||
},
|
||||
Phase: synccommon.OperationError,
|
||||
SyncResult: &v1alpha1.SyncOperationResult{
|
||||
Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
|
||||
Source: *app.Spec.Source.DeepCopy(),
|
||||
},
|
||||
}
|
||||
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}, nil)
|
||||
syncStatus := v1alpha1.SyncStatus{
|
||||
Status: v1alpha1.SyncStatusCodeOutOfSync,
|
||||
Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
|
||||
}
|
||||
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{{Name: "guestbook", Kind: kube.DeploymentKind, Status: v1alpha1.SyncStatusCodeOutOfSync}}, true)
|
||||
assert.NotNil(t, cond)
|
||||
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get(t.Context(), "my-app", metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, app.Operation)
|
||||
})
|
||||
|
||||
t.Run("NeedsToPruneResourcesOnlyButAutomatedPruneDisabled", func(t *testing.T) {
|
||||
app := newFakeApp()
|
||||
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}, nil)
|
||||
@@ -844,45 +874,78 @@ func TestAutoSyncIndicateError(t *testing.T) {
|
||||
|
||||
// TestAutoSyncParameterOverrides verifies we auto-sync if revision is same but parameter overrides are different
|
||||
func TestAutoSyncParameterOverrides(t *testing.T) {
|
||||
app := newFakeApp()
|
||||
app.Spec.Source.Helm = &v1alpha1.ApplicationSourceHelm{
|
||||
Parameters: []v1alpha1.HelmParameter{
|
||||
{
|
||||
Name: "a",
|
||||
Value: "1",
|
||||
t.Run("Single source", func(t *testing.T) {
|
||||
app := newFakeApp()
|
||||
app.Spec.Source.Helm = &v1alpha1.ApplicationSourceHelm{
|
||||
Parameters: []v1alpha1.HelmParameter{
|
||||
{
|
||||
Name: "a",
|
||||
Value: "1",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}, nil)
|
||||
syncStatus := v1alpha1.SyncStatus{
|
||||
Status: v1alpha1.SyncStatusCodeOutOfSync,
|
||||
Revision: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
|
||||
}
|
||||
app.Status.OperationState = &v1alpha1.OperationState{
|
||||
Operation: v1alpha1.Operation{
|
||||
Sync: &v1alpha1.SyncOperation{
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
Helm: &v1alpha1.ApplicationSourceHelm{
|
||||
Parameters: []v1alpha1.HelmParameter{
|
||||
{
|
||||
Name: "a",
|
||||
Value: "2", // this value changed
|
||||
}
|
||||
app.Status.OperationState = &v1alpha1.OperationState{
|
||||
Operation: v1alpha1.Operation{
|
||||
Sync: &v1alpha1.SyncOperation{
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
Helm: &v1alpha1.ApplicationSourceHelm{
|
||||
Parameters: []v1alpha1.HelmParameter{
|
||||
{
|
||||
Name: "a",
|
||||
Value: "2", // this value changed
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Phase: synccommon.OperationFailed,
|
||||
SyncResult: &v1alpha1.SyncOperationResult{
|
||||
Phase: synccommon.OperationFailed,
|
||||
SyncResult: &v1alpha1.SyncOperationResult{
|
||||
Revision: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
|
||||
},
|
||||
}
|
||||
syncStatus := v1alpha1.SyncStatus{
|
||||
Status: v1alpha1.SyncStatusCodeOutOfSync,
|
||||
Revision: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
|
||||
},
|
||||
}
|
||||
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{{Name: "guestbook", Kind: kube.DeploymentKind, Status: v1alpha1.SyncStatusCodeOutOfSync}}, true)
|
||||
assert.Nil(t, cond)
|
||||
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get(t.Context(), "my-app", metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, app.Operation)
|
||||
}
|
||||
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}, nil)
|
||||
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{{Name: "guestbook", Kind: kube.DeploymentKind, Status: v1alpha1.SyncStatusCodeOutOfSync}}, true)
|
||||
assert.Nil(t, cond)
|
||||
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get(t.Context(), "my-app", metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, app.Operation)
|
||||
})
|
||||
|
||||
t.Run("Multi sources", func(t *testing.T) {
|
||||
app := newFakeMultiSourceApp()
|
||||
app.Spec.Sources[0].Helm = &v1alpha1.ApplicationSourceHelm{
|
||||
Parameters: []v1alpha1.HelmParameter{
|
||||
{
|
||||
Name: "a",
|
||||
Value: "1",
|
||||
},
|
||||
},
|
||||
}
|
||||
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}, nil)
|
||||
app.Status.OperationState.SyncResult.Revisions = []string{"z", "x", "v"}
|
||||
app.Status.OperationState.SyncResult.Sources[0].Helm = &v1alpha1.ApplicationSourceHelm{
|
||||
Parameters: []v1alpha1.HelmParameter{
|
||||
{
|
||||
Name: "a",
|
||||
Value: "2", // this value changed
|
||||
},
|
||||
},
|
||||
}
|
||||
syncStatus := v1alpha1.SyncStatus{
|
||||
Status: v1alpha1.SyncStatusCodeOutOfSync,
|
||||
Revisions: []string{"z", "x", "v"},
|
||||
}
|
||||
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{{Name: "guestbook", Kind: kube.DeploymentKind, Status: v1alpha1.SyncStatusCodeOutOfSync}}, true)
|
||||
assert.Nil(t, cond)
|
||||
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get(t.Context(), "my-app", metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, app.Operation)
|
||||
})
|
||||
}
|
||||
|
||||
// TestFinalizeAppDeletion verifies application deletion
|
||||
@@ -1310,6 +1373,9 @@ func TestGetResourceTree_HasOrphanedResources(t *testing.T) {
|
||||
|
||||
managedDeploy := v1alpha1.ResourceNode{
|
||||
ResourceRef: v1alpha1.ResourceRef{Group: "apps", Kind: "Deployment", Namespace: "default", Name: "nginx-deployment", Version: "v1"},
|
||||
Health: &v1alpha1.HealthStatus{
|
||||
Status: health.HealthStatusMissing,
|
||||
},
|
||||
}
|
||||
orphanedDeploy1 := v1alpha1.ResourceNode{
|
||||
ResourceRef: v1alpha1.ResourceRef{Group: "apps", Kind: "Deployment", Namespace: "default", Name: "deploy1"},
|
||||
@@ -1862,7 +1928,7 @@ apps/Deployment:
|
||||
hs = {}
|
||||
hs.status = ""
|
||||
hs.message = ""
|
||||
|
||||
|
||||
if obj.metadata ~= nil then
|
||||
if obj.metadata.labels ~= nil then
|
||||
current_status = obj.metadata.labels["status"]
|
||||
@@ -1898,7 +1964,7 @@ apps/Deployment:
|
||||
{},
|
||||
{},
|
||||
},
|
||||
}, time.Millisecond*10, nil)
|
||||
}, time.Millisecond*10, nil, nil)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
@@ -2030,7 +2096,9 @@ func TestProcessRequestedAppOperation_FailedNoRetries(t *testing.T) {
|
||||
ctrl.processRequestedAppOperation(app)
|
||||
|
||||
phase, _, _ := unstructured.NestedString(receivedPatch, "status", "operationState", "phase")
|
||||
message, _, _ := unstructured.NestedString(receivedPatch, "status", "operationState", "message")
|
||||
assert.Equal(t, string(synccommon.OperationError), phase)
|
||||
assert.Equal(t, "Failed to load application project: error getting app project \"default\": appproject.argoproj.io \"default\" not found", message)
|
||||
}
|
||||
|
||||
func TestProcessRequestedAppOperation_InvalidDestination(t *testing.T) {
|
||||
@@ -2059,8 +2127,8 @@ func TestProcessRequestedAppOperation_InvalidDestination(t *testing.T) {
|
||||
ctrl.processRequestedAppOperation(app)
|
||||
|
||||
phase, _, _ := unstructured.NestedString(receivedPatch, "status", "operationState", "phase")
|
||||
assert.Equal(t, string(synccommon.OperationFailed), phase)
|
||||
message, _, _ := unstructured.NestedString(receivedPatch, "status", "operationState", "message")
|
||||
assert.Equal(t, string(synccommon.OperationError), phase)
|
||||
assert.Contains(t, message, "application destination can't have both name and server defined: another-cluster https://localhost:6443")
|
||||
}
|
||||
|
||||
@@ -2084,20 +2152,24 @@ func TestProcessRequestedAppOperation_FailedHasRetries(t *testing.T) {
|
||||
ctrl.processRequestedAppOperation(app)
|
||||
|
||||
phase, _, _ := unstructured.NestedString(receivedPatch, "status", "operationState", "phase")
|
||||
assert.Equal(t, string(synccommon.OperationRunning), phase)
|
||||
message, _, _ := unstructured.NestedString(receivedPatch, "status", "operationState", "message")
|
||||
assert.Contains(t, message, "Retrying attempt #1")
|
||||
retryCount, _, _ := unstructured.NestedFloat64(receivedPatch, "status", "operationState", "retryCount")
|
||||
assert.Equal(t, string(synccommon.OperationRunning), phase)
|
||||
assert.Contains(t, message, "Failed to load application project: error getting app project \"invalid-project\": appproject.argoproj.io \"invalid-project\" not found. Retrying attempt #1")
|
||||
assert.InEpsilon(t, float64(1), retryCount, 0.0001)
|
||||
}
|
||||
|
||||
func TestProcessRequestedAppOperation_RunningPreviouslyFailed(t *testing.T) {
|
||||
failedAttemptFinisedAt := time.Now().Add(-time.Minute * 5)
|
||||
app := newFakeApp()
|
||||
app.Operation = &v1alpha1.Operation{
|
||||
Sync: &v1alpha1.SyncOperation{},
|
||||
Retry: v1alpha1.RetryStrategy{Limit: 1},
|
||||
}
|
||||
app.Status.OperationState.Operation = *app.Operation
|
||||
app.Status.OperationState.Phase = synccommon.OperationRunning
|
||||
app.Status.OperationState.RetryCount = 1
|
||||
app.Status.OperationState.FinishedAt = &metav1.Time{Time: failedAttemptFinisedAt}
|
||||
app.Status.OperationState.SyncResult.Resources = []*v1alpha1.ResourceResult{{
|
||||
Name: "guestbook",
|
||||
Kind: "Deployment",
|
||||
@@ -2127,7 +2199,58 @@ func TestProcessRequestedAppOperation_RunningPreviouslyFailed(t *testing.T) {
|
||||
ctrl.processRequestedAppOperation(app)
|
||||
|
||||
phase, _, _ := unstructured.NestedString(receivedPatch, "status", "operationState", "phase")
|
||||
message, _, _ := unstructured.NestedString(receivedPatch, "status", "operationState", "message")
|
||||
finishedAtStr, _, _ := unstructured.NestedString(receivedPatch, "status", "operationState", "finishedAt")
|
||||
finishedAt, err := time.Parse(time.RFC3339, finishedAtStr)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, string(synccommon.OperationSucceeded), phase)
|
||||
assert.Equal(t, "successfully synced (no more tasks)", message)
|
||||
assert.Truef(t, finishedAt.After(failedAttemptFinisedAt), "finishedAt was expected to be updated. The retry was not performed.")
|
||||
}
|
||||
|
||||
func TestProcessRequestedAppOperation_RunningPreviouslyFailedBackoff(t *testing.T) {
|
||||
failedAttemptFinisedAt := time.Now().Add(-time.Second)
|
||||
app := newFakeApp()
|
||||
app.Operation = &v1alpha1.Operation{
|
||||
Sync: &v1alpha1.SyncOperation{},
|
||||
Retry: v1alpha1.RetryStrategy{
|
||||
Limit: 1,
|
||||
Backoff: &v1alpha1.Backoff{
|
||||
Duration: "1h",
|
||||
Factor: ptr.To(int64(100)),
|
||||
MaxDuration: "1h",
|
||||
},
|
||||
},
|
||||
}
|
||||
app.Status.OperationState.Operation = *app.Operation
|
||||
app.Status.OperationState.Phase = synccommon.OperationRunning
|
||||
app.Status.OperationState.Message = "pending retry"
|
||||
app.Status.OperationState.RetryCount = 1
|
||||
app.Status.OperationState.FinishedAt = &metav1.Time{Time: failedAttemptFinisedAt}
|
||||
app.Status.OperationState.SyncResult.Resources = []*v1alpha1.ResourceResult{{
|
||||
Name: "guestbook",
|
||||
Kind: "Deployment",
|
||||
Group: "apps",
|
||||
Status: synccommon.ResultCodeSyncFailed,
|
||||
}}
|
||||
|
||||
data := &fakeData{
|
||||
apps: []runtime.Object{app, &defaultProj},
|
||||
manifestResponse: &apiclient.ManifestResponse{
|
||||
Manifests: []string{},
|
||||
Namespace: test.FakeDestNamespace,
|
||||
Server: test.FakeClusterURL,
|
||||
Revision: "abc123",
|
||||
},
|
||||
}
|
||||
ctrl := newFakeController(data, nil)
|
||||
fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset)
|
||||
fakeAppCs.PrependReactor("patch", "*", func(_ kubetesting.Action) (handled bool, ret runtime.Object, err error) {
|
||||
require.FailNow(t, "A patch should not have been called if the backoff has not passed")
|
||||
return true, &v1alpha1.Application{}, nil
|
||||
})
|
||||
|
||||
ctrl.processRequestedAppOperation(app)
|
||||
}
|
||||
|
||||
func TestProcessRequestedAppOperation_HasRetriesTerminated(t *testing.T) {
|
||||
@@ -2136,6 +2259,7 @@ func TestProcessRequestedAppOperation_HasRetriesTerminated(t *testing.T) {
|
||||
Sync: &v1alpha1.SyncOperation{},
|
||||
Retry: v1alpha1.RetryStrategy{Limit: 10},
|
||||
}
|
||||
app.Status.OperationState.Operation = *app.Operation
|
||||
app.Status.OperationState.Phase = synccommon.OperationTerminating
|
||||
|
||||
data := &fakeData{
|
||||
@@ -2160,7 +2284,9 @@ func TestProcessRequestedAppOperation_HasRetriesTerminated(t *testing.T) {
|
||||
ctrl.processRequestedAppOperation(app)
|
||||
|
||||
phase, _, _ := unstructured.NestedString(receivedPatch, "status", "operationState", "phase")
|
||||
message, _, _ := unstructured.NestedString(receivedPatch, "status", "operationState", "message")
|
||||
assert.Equal(t, string(synccommon.OperationFailed), phase)
|
||||
assert.Equal(t, "Operation terminated", message)
|
||||
}
|
||||
|
||||
func TestProcessRequestedAppOperation_Successful(t *testing.T) {
|
||||
@@ -2187,12 +2313,91 @@ func TestProcessRequestedAppOperation_Successful(t *testing.T) {
|
||||
ctrl.processRequestedAppOperation(app)
|
||||
|
||||
phase, _, _ := unstructured.NestedString(receivedPatch, "status", "operationState", "phase")
|
||||
message, _, _ := unstructured.NestedString(receivedPatch, "status", "operationState", "message")
|
||||
assert.Equal(t, string(synccommon.OperationSucceeded), phase)
|
||||
assert.Equal(t, "successfully synced (no more tasks)", message)
|
||||
ok, level := ctrl.isRefreshRequested(ctrl.toAppKey(app.Name))
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, CompareWithLatestForceResolve, level)
|
||||
}
|
||||
|
||||
func TestProcessRequestedAppOperation_SyncTimeout(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
startedSince time.Duration
|
||||
syncTimeout time.Duration
|
||||
retryAttempt int
|
||||
currentPhase synccommon.OperationPhase
|
||||
expectedPhase synccommon.OperationPhase
|
||||
expectedMessage string
|
||||
}{{
|
||||
name: "Continue when running operation has not exceeded timeout",
|
||||
syncTimeout: time.Minute,
|
||||
startedSince: 30 * time.Second,
|
||||
currentPhase: synccommon.OperationRunning,
|
||||
expectedPhase: synccommon.OperationSucceeded,
|
||||
expectedMessage: "successfully synced (no more tasks)",
|
||||
}, {
|
||||
name: "Continue when terminating operation has exceeded timeout",
|
||||
syncTimeout: time.Minute,
|
||||
startedSince: 2 * time.Minute,
|
||||
currentPhase: synccommon.OperationTerminating,
|
||||
expectedPhase: synccommon.OperationFailed,
|
||||
expectedMessage: "Operation terminated",
|
||||
}, {
|
||||
name: "Terminate when running operation exceeded timeout",
|
||||
syncTimeout: time.Minute,
|
||||
startedSince: 2 * time.Minute,
|
||||
currentPhase: synccommon.OperationRunning,
|
||||
expectedPhase: synccommon.OperationFailed,
|
||||
expectedMessage: "Operation terminated, triggered by controller sync timeout",
|
||||
}, {
|
||||
name: "Terminate when retried operation exceeded timeout",
|
||||
syncTimeout: time.Minute,
|
||||
startedSince: 15 * time.Minute,
|
||||
currentPhase: synccommon.OperationRunning,
|
||||
retryAttempt: 1,
|
||||
expectedPhase: synccommon.OperationFailed,
|
||||
expectedMessage: "Operation terminated, triggered by controller sync timeout (retried 1 times).",
|
||||
}}
|
||||
for i := range testCases {
|
||||
tc := testCases[i]
|
||||
t.Run(fmt.Sprintf("case %d: %s", i, tc.name), func(t *testing.T) {
|
||||
app := newFakeApp()
|
||||
app.Spec.Project = "default"
|
||||
app.Operation = &v1alpha1.Operation{
|
||||
Sync: &v1alpha1.SyncOperation{
|
||||
Revision: "HEAD",
|
||||
},
|
||||
}
|
||||
ctrl := newFakeController(&fakeData{
|
||||
apps: []runtime.Object{app, &defaultProj},
|
||||
manifestResponses: []*apiclient.ManifestResponse{{
|
||||
Manifests: []string{},
|
||||
}},
|
||||
}, nil)
|
||||
|
||||
ctrl.syncTimeout = tc.syncTimeout
|
||||
app.Status.OperationState = &v1alpha1.OperationState{
|
||||
Operation: *app.Operation,
|
||||
Phase: tc.currentPhase,
|
||||
StartedAt: metav1.NewTime(time.Now().Add(-tc.startedSince)),
|
||||
}
|
||||
if tc.retryAttempt > 0 {
|
||||
app.Status.OperationState.FinishedAt = ptr.To(metav1.NewTime(time.Now().Add(-tc.startedSince)))
|
||||
app.Status.OperationState.RetryCount = int64(tc.retryAttempt)
|
||||
}
|
||||
|
||||
ctrl.processRequestedAppOperation(app)
|
||||
|
||||
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(app.ObjectMeta.Namespace).Get(t.Context(), app.Name, metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tc.expectedPhase, app.Status.OperationState.Phase)
|
||||
assert.Equal(t, tc.expectedMessage, app.Status.OperationState.Message)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetAppHosts(t *testing.T) {
|
||||
app := newFakeApp()
|
||||
data := &fakeData{
|
||||
@@ -2459,35 +2664,71 @@ func TestAppStatusIsReplaced(t *testing.T) {
|
||||
|
||||
func TestAlreadyAttemptSync(t *testing.T) {
|
||||
app := newFakeApp()
|
||||
defaultRevision := app.Status.OperationState.SyncResult.Revision
|
||||
|
||||
t.Run("no operation state", func(t *testing.T) {
|
||||
app := app.DeepCopy()
|
||||
app.Status.OperationState = nil
|
||||
attempted, _ := alreadyAttemptedSync(app, "", []string{}, false, false)
|
||||
attempted, _, _ := alreadyAttemptedSync(app, []string{defaultRevision}, true)
|
||||
assert.False(t, attempted)
|
||||
})
|
||||
|
||||
t.Run("no sync operation", func(t *testing.T) {
|
||||
app := app.DeepCopy()
|
||||
app.Status.OperationState.Operation.Sync = nil
|
||||
attempted, _ := alreadyAttemptedSync(app, "", []string{}, false, false)
|
||||
assert.False(t, attempted)
|
||||
})
|
||||
|
||||
t.Run("no sync result", func(t *testing.T) {
|
||||
t.Run("no sync result for running sync", func(t *testing.T) {
|
||||
app := app.DeepCopy()
|
||||
app.Status.OperationState.SyncResult = nil
|
||||
attempted, _ := alreadyAttemptedSync(app, "", []string{}, false, false)
|
||||
app.Status.OperationState.Phase = synccommon.OperationRunning
|
||||
attempted, _, _ := alreadyAttemptedSync(app, []string{defaultRevision}, true)
|
||||
assert.False(t, attempted)
|
||||
})
|
||||
|
||||
t.Run("no sync result for completed sync", func(t *testing.T) {
|
||||
app := app.DeepCopy()
|
||||
app.Status.OperationState.SyncResult = nil
|
||||
app.Status.OperationState.Phase = synccommon.OperationError
|
||||
attempted, _, _ := alreadyAttemptedSync(app, []string{defaultRevision}, true)
|
||||
assert.True(t, attempted)
|
||||
})
|
||||
|
||||
t.Run("single source", func(t *testing.T) {
|
||||
t.Run("same manifest with sync result", func(t *testing.T) {
|
||||
attempted, _ := alreadyAttemptedSync(app, "sha", []string{}, false, false)
|
||||
t.Run("no revision", func(t *testing.T) {
|
||||
attempted, _, _ := alreadyAttemptedSync(app, []string{}, true)
|
||||
assert.False(t, attempted)
|
||||
})
|
||||
|
||||
t.Run("empty revision", func(t *testing.T) {
|
||||
attempted, _, _ := alreadyAttemptedSync(app, []string{""}, true)
|
||||
assert.False(t, attempted)
|
||||
})
|
||||
|
||||
t.Run("too many revision", func(t *testing.T) {
|
||||
app := app.DeepCopy()
|
||||
app.Status.OperationState.SyncResult.Revision = "sha"
|
||||
attempted, _, _ := alreadyAttemptedSync(app, []string{"sha", "sha2"}, true)
|
||||
assert.False(t, attempted)
|
||||
})
|
||||
|
||||
t.Run("same manifest, same SHA with changes", func(t *testing.T) {
|
||||
app := app.DeepCopy()
|
||||
app.Status.OperationState.SyncResult.Revision = "sha"
|
||||
attempted, _, _ := alreadyAttemptedSync(app, []string{"sha"}, true)
|
||||
assert.True(t, attempted)
|
||||
})
|
||||
|
||||
t.Run("same manifest with sync result different targetRevision, same SHA", func(t *testing.T) {
|
||||
t.Run("same manifest, different SHA with changes", func(t *testing.T) {
|
||||
app := app.DeepCopy()
|
||||
app.Status.OperationState.SyncResult.Revision = "sha1"
|
||||
attempted, _, _ := alreadyAttemptedSync(app, []string{"sha2"}, true)
|
||||
assert.False(t, attempted)
|
||||
})
|
||||
|
||||
t.Run("same manifest, different SHA without changes", func(t *testing.T) {
|
||||
app := app.DeepCopy()
|
||||
app.Status.OperationState.SyncResult.Revision = "sha1"
|
||||
attempted, _, _ := alreadyAttemptedSync(app, []string{"sha2"}, false)
|
||||
assert.True(t, attempted)
|
||||
})
|
||||
|
||||
t.Run("different manifest, same SHA with changes", func(t *testing.T) {
|
||||
// This test represents the case where the user changed a source's target revision to a new branch, but it
|
||||
// points to the same revision as the old branch. We currently do not consider this as having been "already
|
||||
// attempted." In the future we may want to short-circuit the auto-sync in these cases.
|
||||
@@ -2495,55 +2736,101 @@ func TestAlreadyAttemptSync(t *testing.T) {
|
||||
app.Status.OperationState.SyncResult.Source = v1alpha1.ApplicationSource{TargetRevision: "branch1"}
|
||||
app.Spec.Source = &v1alpha1.ApplicationSource{TargetRevision: "branch2"}
|
||||
app.Status.OperationState.SyncResult.Revision = "sha"
|
||||
attempted, _ := alreadyAttemptedSync(app, "sha", []string{}, false, false)
|
||||
attempted, _, _ := alreadyAttemptedSync(app, []string{"sha"}, true)
|
||||
assert.False(t, attempted)
|
||||
})
|
||||
|
||||
t.Run("different manifest with sync result, different SHA", func(t *testing.T) {
|
||||
t.Run("different manifest, different SHA with changes", func(t *testing.T) {
|
||||
app := app.DeepCopy()
|
||||
app.Status.OperationState.SyncResult.Source = v1alpha1.ApplicationSource{Path: "folder1"}
|
||||
app.Spec.Source = &v1alpha1.ApplicationSource{Path: "folder2"}
|
||||
app.Status.OperationState.SyncResult.Revision = "sha1"
|
||||
attempted, _ := alreadyAttemptedSync(app, "sha2", []string{}, false, true)
|
||||
attempted, _, _ := alreadyAttemptedSync(app, []string{"sha2"}, true)
|
||||
assert.False(t, attempted)
|
||||
})
|
||||
|
||||
t.Run("different manifest with sync result, same SHA", func(t *testing.T) {
|
||||
t.Run("different manifest, different SHA without changes", func(t *testing.T) {
|
||||
app := app.DeepCopy()
|
||||
app.Status.OperationState.SyncResult.Source = v1alpha1.ApplicationSource{Path: "folder1"}
|
||||
app.Spec.Source = &v1alpha1.ApplicationSource{Path: "folder2"}
|
||||
app.Status.OperationState.SyncResult.Revision = "sha1"
|
||||
attempted, _, _ := alreadyAttemptedSync(app, []string{"sha2"}, false)
|
||||
assert.False(t, attempted)
|
||||
})
|
||||
|
||||
t.Run("different manifest, same SHA without changes", func(t *testing.T) {
|
||||
app := app.DeepCopy()
|
||||
app.Status.OperationState.SyncResult.Source = v1alpha1.ApplicationSource{Path: "folder1"}
|
||||
app.Spec.Source = &v1alpha1.ApplicationSource{Path: "folder2"}
|
||||
app.Status.OperationState.SyncResult.Revision = "sha"
|
||||
attempted, _ := alreadyAttemptedSync(app, "sha", []string{}, false, true)
|
||||
assert.True(t, attempted)
|
||||
attempted, _, _ := alreadyAttemptedSync(app, []string{"sha"}, false)
|
||||
assert.False(t, attempted)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("multi-source", func(t *testing.T) {
|
||||
t.Run("same manifest with sync result", func(t *testing.T) {
|
||||
attempted, _ := alreadyAttemptedSync(app, "", []string{"sha"}, true, false)
|
||||
app := app.DeepCopy()
|
||||
app.Status.OperationState.SyncResult.Sources = []v1alpha1.ApplicationSource{{Path: "folder1"}, {Path: "folder2"}}
|
||||
app.Spec.Sources = []v1alpha1.ApplicationSource{{Path: "folder1"}, {Path: "folder2"}}
|
||||
|
||||
t.Run("same manifest, same SHAs with changes", func(t *testing.T) {
|
||||
app := app.DeepCopy()
|
||||
app.Status.OperationState.SyncResult.Revisions = []string{"sha_a", "sha_b"}
|
||||
attempted, _, _ := alreadyAttemptedSync(app, []string{"sha_a", "sha_b"}, true)
|
||||
assert.True(t, attempted)
|
||||
})
|
||||
|
||||
t.Run("same manifest with sync result, different targetRevision, same SHA", func(t *testing.T) {
|
||||
t.Run("same manifest, different SHAs with changes", func(t *testing.T) {
|
||||
app := app.DeepCopy()
|
||||
app.Status.OperationState.SyncResult.Revisions = []string{"sha_a_=", "sha_b_1"}
|
||||
attempted, _, _ := alreadyAttemptedSync(app, []string{"sha_a_2", "sha_b_2"}, true)
|
||||
assert.False(t, attempted)
|
||||
})
|
||||
|
||||
t.Run("same manifest, different SHA without changes", func(t *testing.T) {
|
||||
app := app.DeepCopy()
|
||||
app.Status.OperationState.SyncResult.Revisions = []string{"sha_a_=", "sha_b_1"}
|
||||
attempted, _, _ := alreadyAttemptedSync(app, []string{"sha_a_2", "sha_b_2"}, false)
|
||||
assert.True(t, attempted)
|
||||
})
|
||||
|
||||
t.Run("different manifest, same SHA with changes", func(t *testing.T) {
|
||||
// This test represents the case where the user changed a source's target revision to a new branch, but it
|
||||
// points to the same revision as the old branch. We currently do not consider this as having been "already
|
||||
// attempted." In the future we may want to short-circuit the auto-sync in these cases.
|
||||
app := app.DeepCopy()
|
||||
app.Status.OperationState.SyncResult.Sources = []v1alpha1.ApplicationSource{{TargetRevision: "branch1"}}
|
||||
app.Spec.Sources = []v1alpha1.ApplicationSource{{TargetRevision: "branch2"}}
|
||||
app.Status.OperationState.SyncResult.Revisions = []string{"sha"}
|
||||
attempted, _ := alreadyAttemptedSync(app, "", []string{"sha"}, true, false)
|
||||
app.Status.OperationState.SyncResult.Sources = []v1alpha1.ApplicationSource{{TargetRevision: "branch1"}, {TargetRevision: "branch2"}}
|
||||
app.Spec.Sources = []v1alpha1.ApplicationSource{{TargetRevision: "branch1"}, {TargetRevision: "branch3"}}
|
||||
app.Status.OperationState.SyncResult.Revisions = []string{"sha_a_2", "sha_b_2"}
|
||||
attempted, _, _ := alreadyAttemptedSync(app, []string{"sha_a_2", "sha_b_2"}, false)
|
||||
assert.False(t, attempted)
|
||||
})
|
||||
|
||||
t.Run("different manifest with sync result, different SHAs", func(t *testing.T) {
|
||||
app := app.DeepCopy()
|
||||
app.Status.OperationState.SyncResult.Revisions = []string{"sha_a_=", "sha_b_1"}
|
||||
attempted, _ := alreadyAttemptedSync(app, "", []string{"sha_a_2", "sha_b_2"}, true, true)
|
||||
assert.False(t, attempted)
|
||||
})
|
||||
|
||||
t.Run("different manifest with sync result, same SHAs", func(t *testing.T) {
|
||||
t.Run("different manifest, different SHA with changes", func(t *testing.T) {
|
||||
app := app.DeepCopy()
|
||||
app.Status.OperationState.SyncResult.Sources = []v1alpha1.ApplicationSource{{Path: "folder1"}, {Path: "folder2"}}
|
||||
app.Spec.Sources = []v1alpha1.ApplicationSource{{Path: "folder1"}, {Path: "folder3"}}
|
||||
app.Status.OperationState.SyncResult.Revisions = []string{"sha_a", "sha_b"}
|
||||
attempted, _ := alreadyAttemptedSync(app, "", []string{"sha_a", "sha_b"}, true, true)
|
||||
assert.True(t, attempted)
|
||||
attempted, _, _ := alreadyAttemptedSync(app, []string{"sha_a", "sha_b_2"}, true)
|
||||
assert.False(t, attempted)
|
||||
})
|
||||
|
||||
t.Run("different manifest, different SHA without changes", func(t *testing.T) {
|
||||
app := app.DeepCopy()
|
||||
app.Status.OperationState.SyncResult.Sources = []v1alpha1.ApplicationSource{{Path: "folder1"}, {Path: "folder2"}}
|
||||
app.Spec.Sources = []v1alpha1.ApplicationSource{{Path: "folder1"}, {Path: "folder3"}}
|
||||
app.Status.OperationState.SyncResult.Revisions = []string{"sha_a", "sha_b"}
|
||||
attempted, _, _ := alreadyAttemptedSync(app, []string{"sha_a", "sha_b_2"}, false)
|
||||
assert.False(t, attempted)
|
||||
})
|
||||
|
||||
t.Run("different manifest, same SHA without changes", func(t *testing.T) {
|
||||
app := app.DeepCopy()
|
||||
app.Status.OperationState.SyncResult.Sources = []v1alpha1.ApplicationSource{{Path: "folder1"}, {Path: "folder2"}}
|
||||
app.Spec.Sources = []v1alpha1.ApplicationSource{{Path: "folder1"}, {Path: "folder3"}}
|
||||
app.Status.OperationState.SyncResult.Revisions = []string{"sha_a", "sha_b"}
|
||||
attempted, _, _ := alreadyAttemptedSync(app, []string{"sha_a", "sha_b"}, false)
|
||||
assert.False(t, attempted)
|
||||
})
|
||||
})
|
||||
}
|
||||
@@ -2555,14 +2842,13 @@ func assertDurationAround(t *testing.T, expected time.Duration, actual time.Dura
|
||||
assert.LessOrEqual(t, expected, actual+delta)
|
||||
}
|
||||
|
||||
func TestSelfHealExponentialBackoff(t *testing.T) {
|
||||
func TestSelfHealRemainingBackoff(t *testing.T) {
|
||||
ctrl := newFakeController(&fakeData{}, nil)
|
||||
ctrl.selfHealBackOff = &wait.Backoff{
|
||||
ctrl.selfHealBackoff = &wait.Backoff{
|
||||
Factor: 3,
|
||||
Duration: 2 * time.Second,
|
||||
Cap: 2 * time.Minute,
|
||||
}
|
||||
|
||||
app := &v1alpha1.Application{
|
||||
Status: v1alpha1.ApplicationStatus{
|
||||
OperationState: &v1alpha1.OperationState{
|
||||
@@ -2574,156 +2860,108 @@ func TestSelfHealExponentialBackoff(t *testing.T) {
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
attempts int64
|
||||
expectedAttempts int64
|
||||
attempts int
|
||||
finishedAt *metav1.Time
|
||||
expectedDuration time.Duration
|
||||
shouldSelfHeal bool
|
||||
alreadyAttempted bool
|
||||
syncStatus v1alpha1.SyncStatusCode
|
||||
}{{
|
||||
attempts: 0,
|
||||
finishedAt: ptr.To(metav1.Now()),
|
||||
expectedDuration: 0,
|
||||
shouldSelfHeal: true,
|
||||
alreadyAttempted: true,
|
||||
expectedAttempts: 0,
|
||||
syncStatus: v1alpha1.SyncStatusCodeOutOfSync,
|
||||
}, {
|
||||
attempts: 1,
|
||||
finishedAt: ptr.To(metav1.Now()),
|
||||
expectedDuration: 2 * time.Second,
|
||||
shouldSelfHeal: false,
|
||||
alreadyAttempted: true,
|
||||
expectedAttempts: 1,
|
||||
syncStatus: v1alpha1.SyncStatusCodeOutOfSync,
|
||||
}, {
|
||||
attempts: 2,
|
||||
finishedAt: ptr.To(metav1.Now()),
|
||||
expectedDuration: 6 * time.Second,
|
||||
shouldSelfHeal: false,
|
||||
alreadyAttempted: true,
|
||||
expectedAttempts: 2,
|
||||
syncStatus: v1alpha1.SyncStatusCodeOutOfSync,
|
||||
}, {
|
||||
attempts: 3,
|
||||
finishedAt: nil,
|
||||
expectedDuration: 18 * time.Second,
|
||||
shouldSelfHeal: false,
|
||||
alreadyAttempted: true,
|
||||
expectedAttempts: 3,
|
||||
syncStatus: v1alpha1.SyncStatusCodeOutOfSync,
|
||||
}, {
|
||||
attempts: 4,
|
||||
finishedAt: nil,
|
||||
expectedDuration: 54 * time.Second,
|
||||
shouldSelfHeal: false,
|
||||
alreadyAttempted: true,
|
||||
expectedAttempts: 4,
|
||||
syncStatus: v1alpha1.SyncStatusCodeOutOfSync,
|
||||
}, {
|
||||
attempts: 5,
|
||||
finishedAt: nil,
|
||||
expectedDuration: 120 * time.Second,
|
||||
shouldSelfHeal: false,
|
||||
alreadyAttempted: true,
|
||||
expectedAttempts: 5,
|
||||
syncStatus: v1alpha1.SyncStatusCodeOutOfSync,
|
||||
}, {
|
||||
attempts: 6,
|
||||
finishedAt: nil,
|
||||
expectedDuration: 120 * time.Second,
|
||||
shouldSelfHeal: false,
|
||||
alreadyAttempted: true,
|
||||
expectedAttempts: 6,
|
||||
syncStatus: v1alpha1.SyncStatusCodeOutOfSync,
|
||||
}, {
|
||||
attempts: 6,
|
||||
finishedAt: nil,
|
||||
expectedDuration: 0,
|
||||
shouldSelfHeal: true,
|
||||
alreadyAttempted: false,
|
||||
expectedAttempts: 0,
|
||||
syncStatus: v1alpha1.SyncStatusCodeOutOfSync,
|
||||
}, { // backoff will not reset as finished tme isn't >= cooldown
|
||||
attempts: 6,
|
||||
finishedAt: ptr.To(metav1.Now()),
|
||||
expectedDuration: 120 * time.Second,
|
||||
shouldSelfHeal: false,
|
||||
alreadyAttempted: true,
|
||||
expectedAttempts: 6,
|
||||
syncStatus: v1alpha1.SyncStatusCodeSynced,
|
||||
}, { // backoff will reset as finished time is >= cooldown
|
||||
}, {
|
||||
attempts: 40,
|
||||
finishedAt: &metav1.Time{Time: time.Now().Add(-(1 * time.Minute))},
|
||||
expectedDuration: -60 * time.Second,
|
||||
shouldSelfHeal: true,
|
||||
alreadyAttempted: true,
|
||||
expectedAttempts: 0,
|
||||
syncStatus: v1alpha1.SyncStatusCodeSynced,
|
||||
finishedAt: &metav1.Time{Time: time.Now().Add(-1 * time.Minute)},
|
||||
expectedDuration: 60 * time.Second,
|
||||
shouldSelfHeal: false,
|
||||
}}
|
||||
|
||||
for i := range testCases {
|
||||
tc := testCases[i]
|
||||
t.Run(fmt.Sprintf("test case %d", i), func(t *testing.T) {
|
||||
app.Status.OperationState.Operation.Sync.SelfHealAttemptsCount = tc.attempts
|
||||
app.Status.OperationState.FinishedAt = tc.finishedAt
|
||||
app.Status.Sync.Status = tc.syncStatus
|
||||
ok, duration := ctrl.shouldSelfHeal(app, tc.alreadyAttempted)
|
||||
require.Equal(t, ok, tc.shouldSelfHeal)
|
||||
require.Equal(t, tc.expectedAttempts, app.Status.OperationState.Operation.Sync.SelfHealAttemptsCount)
|
||||
duration := ctrl.selfHealRemainingBackoff(app, tc.attempts)
|
||||
shouldSelfHeal := duration <= 0
|
||||
require.Equal(t, tc.shouldSelfHeal, shouldSelfHeal)
|
||||
assertDurationAround(t, tc.expectedDuration, duration)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSyncTimeout(t *testing.T) {
|
||||
testCases := []struct {
|
||||
delta time.Duration
|
||||
expectedPhase synccommon.OperationPhase
|
||||
expectedMessage string
|
||||
}{{
|
||||
delta: 2 * time.Minute,
|
||||
expectedPhase: synccommon.OperationFailed,
|
||||
expectedMessage: "Operation terminated",
|
||||
}, {
|
||||
delta: 30 * time.Second,
|
||||
expectedPhase: synccommon.OperationSucceeded,
|
||||
expectedMessage: "successfully synced (no more tasks)",
|
||||
}}
|
||||
for i := range testCases {
|
||||
tc := testCases[i]
|
||||
t.Run(fmt.Sprintf("test case %d", i), func(t *testing.T) {
|
||||
app := newFakeApp()
|
||||
app.Spec.Project = "default"
|
||||
app.Operation = &v1alpha1.Operation{
|
||||
Sync: &v1alpha1.SyncOperation{
|
||||
Revision: "HEAD",
|
||||
},
|
||||
}
|
||||
ctrl := newFakeController(&fakeData{
|
||||
apps: []runtime.Object{app, &defaultProj},
|
||||
manifestResponses: []*apiclient.ManifestResponse{{
|
||||
Manifests: []string{},
|
||||
}},
|
||||
}, nil)
|
||||
func TestSelfHealBackoffCooldownElapsed(t *testing.T) {
|
||||
cooldown := time.Second * 30
|
||||
ctrl := newFakeController(&fakeData{}, nil)
|
||||
ctrl.selfHealBackoffCooldown = cooldown
|
||||
|
||||
ctrl.syncTimeout = time.Minute
|
||||
app.Status.OperationState = &v1alpha1.OperationState{
|
||||
Operation: v1alpha1.Operation{
|
||||
Sync: &v1alpha1.SyncOperation{
|
||||
Revision: "HEAD",
|
||||
},
|
||||
},
|
||||
Phase: synccommon.OperationRunning,
|
||||
StartedAt: metav1.NewTime(time.Now().Add(-tc.delta)),
|
||||
}
|
||||
ctrl.processRequestedAppOperation(app)
|
||||
|
||||
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(app.ObjectMeta.Namespace).Get(t.Context(), app.Name, metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.expectedPhase, app.Status.OperationState.Phase)
|
||||
require.Equal(t, tc.expectedMessage, app.Status.OperationState.Message)
|
||||
})
|
||||
app := &v1alpha1.Application{
|
||||
Status: v1alpha1.ApplicationStatus{
|
||||
OperationState: &v1alpha1.OperationState{
|
||||
Phase: synccommon.OperationSucceeded,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
t.Run("operation not completed", func(t *testing.T) {
|
||||
app := app.DeepCopy()
|
||||
app.Status.OperationState.FinishedAt = nil
|
||||
elapsed := ctrl.selfHealBackoffCooldownElapsed(app)
|
||||
assert.True(t, elapsed)
|
||||
})
|
||||
|
||||
t.Run("successful operation finised after cooldown", func(t *testing.T) {
|
||||
app := app.DeepCopy()
|
||||
app.Status.OperationState.FinishedAt = &metav1.Time{Time: time.Now().Add(-cooldown)}
|
||||
elapsed := ctrl.selfHealBackoffCooldownElapsed(app)
|
||||
assert.True(t, elapsed)
|
||||
})
|
||||
|
||||
t.Run("unsuccessful operation finised after cooldown", func(t *testing.T) {
|
||||
app := app.DeepCopy()
|
||||
app.Status.OperationState.Phase = synccommon.OperationFailed
|
||||
app.Status.OperationState.FinishedAt = &metav1.Time{Time: time.Now().Add(-cooldown)}
|
||||
elapsed := ctrl.selfHealBackoffCooldownElapsed(app)
|
||||
assert.False(t, elapsed)
|
||||
})
|
||||
|
||||
t.Run("successful operation finised before cooldown", func(t *testing.T) {
|
||||
app := app.DeepCopy()
|
||||
app.Status.OperationState.FinishedAt = &metav1.Time{Time: time.Now()}
|
||||
elapsed := ctrl.selfHealBackoffCooldownElapsed(app)
|
||||
assert.False(t, elapsed)
|
||||
})
|
||||
}
|
||||
|
||||
13
controller/cache/cache.go
vendored
13
controller/cache/cache.go
vendored
@@ -137,8 +137,6 @@ type LiveStateCache interface {
|
||||
IsNamespaced(server *appv1.Cluster, gk schema.GroupKind) (bool, error)
|
||||
// Returns synced cluster cache
|
||||
GetClusterCache(server *appv1.Cluster) (clustercache.ClusterCache, error)
|
||||
// Executes give callback against resource specified by the key and all its children
|
||||
IterateHierarchy(server *appv1.Cluster, key kube.ResourceKey, action func(child appv1.ResourceNode, appName string) bool) error
|
||||
// Executes give callback against resources specified by the keys and all its children
|
||||
IterateHierarchyV2(server *appv1.Cluster, keys []kube.ResourceKey, action func(child appv1.ResourceNode, appName string) bool) error
|
||||
// Returns state of live nodes which correspond for target nodes of specified application.
|
||||
@@ -669,17 +667,6 @@ func (c *liveStateCache) IsNamespaced(server *appv1.Cluster, gk schema.GroupKind
|
||||
return clusterInfo.IsNamespaced(gk)
|
||||
}
|
||||
|
||||
func (c *liveStateCache) IterateHierarchy(server *appv1.Cluster, key kube.ResourceKey, action func(child appv1.ResourceNode, appName string) bool) error {
|
||||
clusterInfo, err := c.getSyncedCluster(server)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
clusterInfo.IterateHierarchy(key, func(resource *clustercache.Resource, namespaceResources map[kube.ResourceKey]*clustercache.Resource) bool {
|
||||
return action(asResourceNode(resource), getApp(resource, namespaceResources))
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *liveStateCache) IterateHierarchyV2(server *appv1.Cluster, keys []kube.ResourceKey, action func(child appv1.ResourceNode, appName string) bool) error {
|
||||
clusterInfo, err := c.getSyncedCluster(server)
|
||||
if err != nil {
|
||||
|
||||
100
controller/cache/cache_test.go
vendored
100
controller/cache/cache_test.go
vendored
@@ -1,6 +1,7 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net"
|
||||
"net/url"
|
||||
@@ -39,6 +40,36 @@ func (n netError) Error() string { return string(n) }
|
||||
func (n netError) Timeout() bool { return false }
|
||||
func (n netError) Temporary() bool { return false }
|
||||
|
||||
func fixtures(data map[string]string, opts ...func(secret *corev1.Secret)) (*fake.Clientset, *argosettings.SettingsManager) {
|
||||
cm := &corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: common.ArgoCDConfigMapName,
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{
|
||||
"app.kubernetes.io/part-of": "argocd",
|
||||
},
|
||||
},
|
||||
Data: data,
|
||||
}
|
||||
secret := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: common.ArgoCDSecretName,
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{
|
||||
"app.kubernetes.io/part-of": "argocd",
|
||||
},
|
||||
},
|
||||
Data: map[string][]byte{},
|
||||
}
|
||||
for i := range opts {
|
||||
opts[i](secret)
|
||||
}
|
||||
kubeClient := fake.NewClientset(cm, secret)
|
||||
settingsManager := argosettings.NewSettingsManager(context.Background(), kubeClient, "default")
|
||||
|
||||
return kubeClient, settingsManager
|
||||
}
|
||||
|
||||
func TestHandleModEvent_HasChanges(_ *testing.T) {
|
||||
clusterCache := &mocks.ClusterCache{}
|
||||
clusterCache.On("Invalidate", mock.Anything, mock.Anything).Return(nil).Once()
|
||||
@@ -745,3 +776,72 @@ func Test_GetVersionsInfo_error_redacted(t *testing.T) {
|
||||
require.Error(t, err)
|
||||
assert.NotContains(t, err.Error(), "password")
|
||||
}
|
||||
|
||||
func TestLoadCacheSettings(t *testing.T) {
|
||||
_, settingsManager := fixtures(map[string]string{
|
||||
"application.instanceLabelKey": "testLabel",
|
||||
"application.resourceTrackingMethod": string(appv1.TrackingMethodLabel),
|
||||
"installationID": "123456789",
|
||||
})
|
||||
ch := liveStateCache{
|
||||
settingsMgr: settingsManager,
|
||||
}
|
||||
label, err := settingsManager.GetAppInstanceLabelKey()
|
||||
require.NoError(t, err)
|
||||
trackingMethod, err := settingsManager.GetTrackingMethod()
|
||||
require.NoError(t, err)
|
||||
res, err := ch.loadCacheSettings()
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, label, res.appInstanceLabelKey)
|
||||
assert.Equal(t, string(appv1.TrackingMethodLabel), trackingMethod)
|
||||
assert.Equal(t, "123456789", res.installationID)
|
||||
|
||||
// By default the values won't be nil
|
||||
assert.NotNil(t, res.resourceOverrides)
|
||||
assert.NotNil(t, res.clusterSettings)
|
||||
assert.True(t, res.ignoreResourceUpdatesEnabled)
|
||||
}
|
||||
|
||||
func Test_ownerRefGV(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input metav1.OwnerReference
|
||||
expected schema.GroupVersion
|
||||
}{
|
||||
{
|
||||
name: "valid API Version",
|
||||
input: metav1.OwnerReference{
|
||||
APIVersion: "apps/v1",
|
||||
},
|
||||
expected: schema.GroupVersion{
|
||||
Group: "apps",
|
||||
Version: "v1",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "custom defined version",
|
||||
input: metav1.OwnerReference{
|
||||
APIVersion: "custom-version",
|
||||
},
|
||||
expected: schema.GroupVersion{
|
||||
Version: "custom-version",
|
||||
Group: "",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "empty APIVersion",
|
||||
input: metav1.OwnerReference{
|
||||
APIVersion: "",
|
||||
},
|
||||
expected: schema.GroupVersion{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
res := ownerRefGV(tt.input)
|
||||
assert.Equal(t, tt.expected, res)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
12
controller/cache/info.go
vendored
12
controller/cache/info.go
vendored
@@ -446,6 +446,7 @@ func populatePodInfo(un *unstructured.Unstructured, res *ResourceInfo) {
|
||||
}
|
||||
|
||||
req, _ := resourcehelper.PodRequestsAndLimits(&pod)
|
||||
|
||||
res.PodInfo = &PodInfo{NodeName: pod.Spec.NodeName, ResourceRequests: req, Phase: pod.Status.Phase}
|
||||
|
||||
res.Info = append(res.Info, v1alpha1.InfoItem{Name: "Node", Value: pod.Spec.NodeName})
|
||||
@@ -454,6 +455,17 @@ func populatePodInfo(un *unstructured.Unstructured, res *ResourceInfo) {
|
||||
res.Info = append(res.Info, v1alpha1.InfoItem{Name: "Restart Count", Value: strconv.Itoa(restarts)})
|
||||
}
|
||||
|
||||
// Requests are relevant even for pods in the init phase or pending state (e.g., due to insufficient resources),
|
||||
// as they help with diagnosing scheduling and startup issues.
|
||||
// requests will be released for terminated pods either with success or failed state termination.
|
||||
if !isPodPhaseTerminal(pod.Status.Phase) {
|
||||
CPUReq := req[corev1.ResourceCPU]
|
||||
MemoryReq := req[corev1.ResourceMemory]
|
||||
|
||||
res.Info = append(res.Info, v1alpha1.InfoItem{Name: common.PodRequestsCPU, Value: strconv.FormatInt(CPUReq.MilliValue(), 10)})
|
||||
res.Info = append(res.Info, v1alpha1.InfoItem{Name: common.PodRequestsMEM, Value: strconv.FormatInt(MemoryReq.MilliValue(), 10)})
|
||||
}
|
||||
|
||||
var urls []string
|
||||
if res.NetworkingInfo != nil {
|
||||
urls = res.NetworkingInfo.ExternalURLs
|
||||
|
||||
172
controller/cache/info_test.go
vendored
172
controller/cache/info_test.go
vendored
@@ -12,6 +12,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"sigs.k8s.io/yaml"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/common"
|
||||
"github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v3/util/argo/normalizers"
|
||||
"github.com/argoproj/argo-cd/v3/util/errors"
|
||||
@@ -304,6 +305,8 @@ func TestGetPodInfo(t *testing.T) {
|
||||
assert.Equal(t, []v1alpha1.InfoItem{
|
||||
{Name: "Node", Value: "minikube"},
|
||||
{Name: "Containers", Value: "0/1"},
|
||||
{Name: common.PodRequestsCPU, Value: "0"}, // strings imported from common
|
||||
{Name: common.PodRequestsMEM, Value: "134217728000"},
|
||||
}, info.Info)
|
||||
assert.Equal(t, []string{"bar"}, info.Images)
|
||||
assert.Equal(t, &PodInfo{
|
||||
@@ -365,9 +368,81 @@ func TestGetPodInfo(t *testing.T) {
|
||||
{Name: "Status Reason", Value: "Running"},
|
||||
{Name: "Node", Value: "minikube"},
|
||||
{Name: "Containers", Value: "1/1"},
|
||||
{Name: common.PodRequestsCPU, Value: "0"},
|
||||
{Name: common.PodRequestsMEM, Value: "0"},
|
||||
}, info.Info)
|
||||
})
|
||||
|
||||
t.Run("TestGetPodWithInitialContainerInfoWithResources", func(t *testing.T) {
|
||||
pod := strToUnstructured(`
|
||||
apiVersion: "v1"
|
||||
kind: "Pod"
|
||||
metadata:
|
||||
labels:
|
||||
app: "app-with-initial-container"
|
||||
name: "app-with-initial-container-5f46976fdb-vd6rv"
|
||||
namespace: "default"
|
||||
ownerReferences:
|
||||
- apiVersion: "apps/v1"
|
||||
kind: "ReplicaSet"
|
||||
name: "app-with-initial-container-5f46976fdb"
|
||||
spec:
|
||||
containers:
|
||||
- image: "alpine:latest"
|
||||
imagePullPolicy: "Always"
|
||||
name: "app-with-initial-container"
|
||||
resources:
|
||||
requests:
|
||||
cpu: "100m"
|
||||
memory: "128Mi"
|
||||
limits:
|
||||
cpu: "500m"
|
||||
memory: "512Mi"
|
||||
initContainers:
|
||||
- image: "alpine:latest"
|
||||
imagePullPolicy: "Always"
|
||||
name: "app-with-initial-container-logshipper"
|
||||
resources:
|
||||
requests:
|
||||
cpu: "50m"
|
||||
memory: "64Mi"
|
||||
limits:
|
||||
cpu: "250m"
|
||||
memory: "256Mi"
|
||||
nodeName: "minikube"
|
||||
status:
|
||||
containerStatuses:
|
||||
- image: "alpine:latest"
|
||||
name: "app-with-initial-container"
|
||||
ready: true
|
||||
restartCount: 0
|
||||
started: true
|
||||
state:
|
||||
running:
|
||||
startedAt: "2024-10-08T08:44:25Z"
|
||||
initContainerStatuses:
|
||||
- image: "alpine:latest"
|
||||
name: "app-with-initial-container-logshipper"
|
||||
ready: true
|
||||
restartCount: 0
|
||||
started: false
|
||||
state:
|
||||
terminated:
|
||||
exitCode: 0
|
||||
reason: "Completed"
|
||||
phase: "Running"
|
||||
`)
|
||||
|
||||
info := &ResourceInfo{}
|
||||
populateNodeInfo(pod, info, []string{})
|
||||
assert.Equal(t, []v1alpha1.InfoItem{
|
||||
{Name: "Status Reason", Value: "Running"},
|
||||
{Name: "Node", Value: "minikube"},
|
||||
{Name: "Containers", Value: "1/1"},
|
||||
{Name: common.PodRequestsCPU, Value: "100"},
|
||||
{Name: common.PodRequestsMEM, Value: "134217728000"},
|
||||
}, info.Info)
|
||||
})
|
||||
t.Run("TestGetPodInfoWithSidecar", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
@@ -422,6 +497,8 @@ func TestGetPodInfo(t *testing.T) {
|
||||
{Name: "Status Reason", Value: "Running"},
|
||||
{Name: "Node", Value: "minikube"},
|
||||
{Name: "Containers", Value: "2/2"},
|
||||
{Name: common.PodRequestsCPU, Value: "0"},
|
||||
{Name: common.PodRequestsMEM, Value: "0"},
|
||||
}, info.Info)
|
||||
})
|
||||
|
||||
@@ -480,6 +557,8 @@ func TestGetPodInfo(t *testing.T) {
|
||||
{Name: "Status Reason", Value: "Init:0/1"},
|
||||
{Name: "Node", Value: "minikube"},
|
||||
{Name: "Containers", Value: "0/1"},
|
||||
{Name: common.PodRequestsCPU, Value: "0"},
|
||||
{Name: common.PodRequestsMEM, Value: "0"},
|
||||
}, info.Info)
|
||||
})
|
||||
|
||||
@@ -537,6 +616,8 @@ func TestGetPodInfo(t *testing.T) {
|
||||
{Name: "Node", Value: "minikube"},
|
||||
{Name: "Containers", Value: "0/3"},
|
||||
{Name: "Restart Count", Value: "3"},
|
||||
{Name: common.PodRequestsCPU, Value: "0"},
|
||||
{Name: common.PodRequestsMEM, Value: "0"},
|
||||
}, info.Info)
|
||||
})
|
||||
|
||||
@@ -594,6 +675,8 @@ func TestGetPodInfo(t *testing.T) {
|
||||
{Name: "Node", Value: "minikube"},
|
||||
{Name: "Containers", Value: "0/3"},
|
||||
{Name: "Restart Count", Value: "3"},
|
||||
{Name: common.PodRequestsCPU, Value: "0"},
|
||||
{Name: common.PodRequestsMEM, Value: "0"},
|
||||
}, info.Info)
|
||||
})
|
||||
|
||||
@@ -654,6 +737,8 @@ func TestGetPodInfo(t *testing.T) {
|
||||
{Name: "Node", Value: "minikube"},
|
||||
{Name: "Containers", Value: "1/3"},
|
||||
{Name: "Restart Count", Value: "7"},
|
||||
{Name: common.PodRequestsCPU, Value: "0"},
|
||||
{Name: common.PodRequestsMEM, Value: "0"},
|
||||
}, info.Info)
|
||||
})
|
||||
|
||||
@@ -696,6 +781,8 @@ func TestGetPodInfo(t *testing.T) {
|
||||
{Name: "Node", Value: "minikube"},
|
||||
{Name: "Containers", Value: "0/1"},
|
||||
{Name: "Restart Count", Value: "3"},
|
||||
{Name: common.PodRequestsCPU, Value: "0"},
|
||||
{Name: common.PodRequestsMEM, Value: "0"},
|
||||
}, info.Info)
|
||||
})
|
||||
|
||||
@@ -731,6 +818,45 @@ func TestGetPodInfo(t *testing.T) {
|
||||
}, info.Info)
|
||||
})
|
||||
|
||||
// Test pod condition succeed which had some allocated resources
|
||||
t.Run("TestPodConditionSucceededWithResources", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
pod := strToUnstructured(`
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: test8
|
||||
spec:
|
||||
nodeName: minikube
|
||||
containers:
|
||||
- name: container
|
||||
resources:
|
||||
requests:
|
||||
cpu: "50m"
|
||||
memory: "64Mi"
|
||||
limits:
|
||||
cpu: "250m"
|
||||
memory: "256Mi"
|
||||
status:
|
||||
phase: Succeeded
|
||||
containerStatuses:
|
||||
- ready: false
|
||||
restartCount: 0
|
||||
state:
|
||||
terminated:
|
||||
reason: Completed
|
||||
exitCode: 0
|
||||
`)
|
||||
info := &ResourceInfo{}
|
||||
populateNodeInfo(pod, info, []string{})
|
||||
assert.Equal(t, []v1alpha1.InfoItem{
|
||||
{Name: "Status Reason", Value: "Completed"},
|
||||
{Name: "Node", Value: "minikube"},
|
||||
{Name: "Containers", Value: "0/1"},
|
||||
}, info.Info)
|
||||
})
|
||||
|
||||
// Test pod condition failed
|
||||
t.Run("TestPodConditionFailed", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
@@ -763,6 +889,46 @@ func TestGetPodInfo(t *testing.T) {
|
||||
}, info.Info)
|
||||
})
|
||||
|
||||
// Test pod condition failed with allocated resources
|
||||
|
||||
t.Run("TestPodConditionFailedWithResources", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
pod := strToUnstructured(`
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: test9
|
||||
spec:
|
||||
nodeName: minikube
|
||||
containers:
|
||||
- name: container
|
||||
resources:
|
||||
requests:
|
||||
cpu: "50m"
|
||||
memory: "64Mi"
|
||||
limits:
|
||||
cpu: "250m"
|
||||
memory: "256Mi"
|
||||
status:
|
||||
phase: Failed
|
||||
containerStatuses:
|
||||
- ready: false
|
||||
restartCount: 0
|
||||
state:
|
||||
terminated:
|
||||
reason: Error
|
||||
exitCode: 1
|
||||
`)
|
||||
info := &ResourceInfo{}
|
||||
populateNodeInfo(pod, info, []string{})
|
||||
assert.Equal(t, []v1alpha1.InfoItem{
|
||||
{Name: "Status Reason", Value: "Error"},
|
||||
{Name: "Node", Value: "minikube"},
|
||||
{Name: "Containers", Value: "0/1"},
|
||||
}, info.Info)
|
||||
})
|
||||
|
||||
// Test pod condition succeed with deletion
|
||||
t.Run("TestPodConditionSucceededWithDeletion", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
@@ -824,6 +990,8 @@ func TestGetPodInfo(t *testing.T) {
|
||||
{Name: "Status Reason", Value: "Terminating"},
|
||||
{Name: "Node", Value: "minikube"},
|
||||
{Name: "Containers", Value: "0/1"},
|
||||
{Name: common.PodRequestsCPU, Value: "0"},
|
||||
{Name: common.PodRequestsMEM, Value: "0"},
|
||||
}, info.Info)
|
||||
})
|
||||
|
||||
@@ -850,6 +1018,8 @@ func TestGetPodInfo(t *testing.T) {
|
||||
{Name: "Status Reason", Value: "Terminating"},
|
||||
{Name: "Node", Value: "minikube"},
|
||||
{Name: "Containers", Value: "0/1"},
|
||||
{Name: common.PodRequestsCPU, Value: "0"},
|
||||
{Name: common.PodRequestsMEM, Value: "0"},
|
||||
}, info.Info)
|
||||
})
|
||||
|
||||
@@ -880,6 +1050,8 @@ func TestGetPodInfo(t *testing.T) {
|
||||
{Name: "Status Reason", Value: "SchedulingGated"},
|
||||
{Name: "Node", Value: "minikube"},
|
||||
{Name: "Containers", Value: "0/2"},
|
||||
{Name: common.PodRequestsCPU, Value: "0"},
|
||||
{Name: common.PodRequestsMEM, Value: "0"},
|
||||
}, info.Info)
|
||||
})
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user