mirror of
https://github.com/argoproj/argo-cd.git
synced 2026-02-20 17:48:47 +01:00
Compare commits
552 Commits
crenshaw-d
...
master
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7db8ab20cb | ||
|
|
326831aee4 | ||
|
|
9851b84f97 | ||
|
|
f2fdc65386 | ||
|
|
7180deb937 | ||
|
|
ed6d2c525e | ||
|
|
7acd9305df | ||
|
|
6a902023b2 | ||
|
|
043544c197 | ||
|
|
650fa6a10e | ||
|
|
03556db586 | ||
|
|
47eb7ee4d6 | ||
|
|
b40a443b6a | ||
|
|
f4852f70af | ||
|
|
adbf9cd237 | ||
|
|
0f069ee767 | ||
|
|
a1d68ca465 | ||
|
|
3db8a412e0 | ||
|
|
67c5fe27cd | ||
|
|
9e19a8f0bc | ||
|
|
510af5c4d7 | ||
|
|
ac0c63eb43 | ||
|
|
8dfbdec941 | ||
|
|
f1ce190671 | ||
|
|
139ecf610b | ||
|
|
dcf0a1586d | ||
|
|
b48ed499c0 | ||
|
|
b0b9658e04 | ||
|
|
3eeebd8346 | ||
|
|
2253481c84 | ||
|
|
fb6a46c8de | ||
|
|
86e42fb223 | ||
|
|
d66ad45500 | ||
|
|
5cfcd0f88f | ||
|
|
c13ba1ee98 | ||
|
|
09b3856177 | ||
|
|
53a5cd3420 | ||
|
|
8b2ad9ba66 | ||
|
|
43ea991a25 | ||
|
|
8e636b78e5 | ||
|
|
a46baf4863 | ||
|
|
0cdd44bda7 | ||
|
|
18efd0cf1b | ||
|
|
40412aaba3 | ||
|
|
a544bc0920 | ||
|
|
a2e0b6f902 | ||
|
|
a22f33dbe2 | ||
|
|
1391e2f95f | ||
|
|
21acbb861d | ||
|
|
1418e87420 | ||
|
|
e8539bea12 | ||
|
|
716339c799 | ||
|
|
971bf5769a | ||
|
|
a76a8762e4 | ||
|
|
fdbe19adaf | ||
|
|
8f9ee6d1da | ||
|
|
9cfcd2d261 | ||
|
|
262360e0ee | ||
|
|
6c043d3acb | ||
|
|
2793097480 | ||
|
|
2c5f7317a5 | ||
|
|
66835b6ec7 | ||
|
|
d655bf6453 | ||
|
|
5345a2aa22 | ||
|
|
89fe752eb4 | ||
|
|
6be8ac10e6 | ||
|
|
b337855e22 | ||
|
|
ad186cfe54 | ||
|
|
235bf2aace | ||
|
|
49d0b48984 | ||
|
|
8515358413 | ||
|
|
2615be441d | ||
|
|
b0488f5a9e | ||
|
|
1b4398b5ba | ||
|
|
847b8b203c | ||
|
|
a108a84f62 | ||
|
|
27433929c0 | ||
|
|
1a62c87d29 | ||
|
|
34ccf865f6 | ||
|
|
0cff632502 | ||
|
|
ac83f1d8da | ||
|
|
34eeede822 | ||
|
|
f2c69c1628 | ||
|
|
39fcff7bad | ||
|
|
7ccc6f94c7 | ||
|
|
9b10fe9774 | ||
|
|
76162a9310 | ||
|
|
1d65d8be6c | ||
|
|
2b3eae62c4 | ||
|
|
88223b09e4 | ||
|
|
0c9039ecd9 | ||
|
|
2bea8c0deb | ||
|
|
342aea457f | ||
|
|
6b2b0668be | ||
|
|
be08693409 | ||
|
|
24615c8ce8 | ||
|
|
257ebc5f3e | ||
|
|
286c6b8725 | ||
|
|
a369ae2cf6 | ||
|
|
e2f7e7d27f | ||
|
|
6e0c949dd2 | ||
|
|
f85ad11b2a | ||
|
|
2e1d81d62c | ||
|
|
849815ee1a | ||
|
|
bea1139e96 | ||
|
|
e4a97bde49 | ||
|
|
bd284b1d29 | ||
|
|
b4421672f6 | ||
|
|
96bfc1e20b | ||
|
|
feab8bde77 | ||
|
|
9cc0c77eb5 | ||
|
|
f45ee221e5 | ||
|
|
12149466c8 | ||
|
|
84f9446d3a | ||
|
|
f8cab49a07 | ||
|
|
46e9022ab8 | ||
|
|
05620cea27 | ||
|
|
cd65bc0585 | ||
|
|
f255cef326 | ||
|
|
e168d8a9fd | ||
|
|
28b90a75ba | ||
|
|
7925bf7916 | ||
|
|
bf1f836ece | ||
|
|
ae34305d18 | ||
|
|
d8635ca27b | ||
|
|
57e9e5e201 | ||
|
|
1d66593195 | ||
|
|
01629ccd7c | ||
|
|
8ef0fd7068 | ||
|
|
d883412df8 | ||
|
|
d35a2518e6 | ||
|
|
807ff53fd5 | ||
|
|
7f34ee84fd | ||
|
|
a7c771fbdd | ||
|
|
0a70881921 | ||
|
|
e20affd6f0 | ||
|
|
72085781dc | ||
|
|
b912405c16 | ||
|
|
f5de841d97 | ||
|
|
c9a5a7e62c | ||
|
|
989fa6fecd | ||
|
|
28ca821978 | ||
|
|
8c30738f6f | ||
|
|
fe93731777 | ||
|
|
d064cec17a | ||
|
|
e548ec76c7 | ||
|
|
69c2620aef | ||
|
|
2f4c5bb972 | ||
|
|
e3f616d6dc | ||
|
|
9d459c8afe | ||
|
|
7879dc0f6e | ||
|
|
afdf99f33a | ||
|
|
c2e594c5f5 | ||
|
|
9c108cbaa0 | ||
|
|
e42727bcaf | ||
|
|
5ab598e0fd | ||
|
|
19a74df8dc | ||
|
|
3811a30949 | ||
|
|
928543527d | ||
|
|
e10a395f34 | ||
|
|
c7574970d0 | ||
|
|
24434907af | ||
|
|
988e4f48cf | ||
|
|
82be819e9a | ||
|
|
aadf2af7e1 | ||
|
|
92c5389388 | ||
|
|
81d109fb7a | ||
|
|
3ddf722517 | ||
|
|
65350789e8 | ||
|
|
82ee09a930 | ||
|
|
4e827c80cf | ||
|
|
4d66a209d3 | ||
|
|
3e26594009 | ||
|
|
90ce1782ec | ||
|
|
e5e00d3f6f | ||
|
|
559dc16e72 | ||
|
|
1b85a688ac | ||
|
|
9182e6696f | ||
|
|
1a1b142cea | ||
|
|
5c2a417257 | ||
|
|
6a597c9e62 | ||
|
|
e8895b8d68 | ||
|
|
b8d4216438 | ||
|
|
507289925b | ||
|
|
fc6379b90e | ||
|
|
c626b202b7 | ||
|
|
2f7d36bc38 | ||
|
|
c879b6da2f | ||
|
|
4e997f0d5e | ||
|
|
8b1415a6b7 | ||
|
|
acb30b4e06 | ||
|
|
408e99e9e9 | ||
|
|
228378474a | ||
|
|
a66fe2af24 | ||
|
|
088ef94d78 | ||
|
|
b911650616 | ||
|
|
a9da448046 | ||
|
|
91475509e1 | ||
|
|
6cd65b4622 | ||
|
|
23c021f53d | ||
|
|
b18d576fe2 | ||
|
|
42f09f7529 | ||
|
|
ef21768b92 | ||
|
|
fee6962f68 | ||
|
|
bfbb88e5fe | ||
|
|
82597111a1 | ||
|
|
fded82ad57 | ||
|
|
3453367509 | ||
|
|
2e638831a6 | ||
|
|
1049d40b7d | ||
|
|
6994a42fa9 | ||
|
|
ef40ba8805 | ||
|
|
67712c19d8 | ||
|
|
c4f3bb8be4 | ||
|
|
2d762e4a2b | ||
|
|
a6cc7ad9a6 | ||
|
|
d2cb56d7c7 | ||
|
|
c32286a9a4 | ||
|
|
429fc1f2d9 | ||
|
|
275c5de627 | ||
|
|
b320854f04 | ||
|
|
38363f3388 | ||
|
|
912e216be3 | ||
|
|
f76046fc7e | ||
|
|
61c8ce2fc9 | ||
|
|
8866fcf207 | ||
|
|
bde6f667e1 | ||
|
|
c212bb77bd | ||
|
|
0da603db11 | ||
|
|
1488a13b89 | ||
|
|
6a3a540c9a | ||
|
|
fb56875397 | ||
|
|
b137439c07 | ||
|
|
43dd717183 | ||
|
|
c19d0461ff | ||
|
|
667b7d658c | ||
|
|
d08a87931e | ||
|
|
a1955019f8 | ||
|
|
2322cdca32 | ||
|
|
f83906d877 | ||
|
|
e988c55a11 | ||
|
|
05504d623c | ||
|
|
3c01ab15ee | ||
|
|
3ac7a0b69a | ||
|
|
93a7717c71 | ||
|
|
b4a52fc5c8 | ||
|
|
af64957452 | ||
|
|
cbc7ecdb85 | ||
|
|
5d790e5c94 | ||
|
|
7317cde9e7 | ||
|
|
946a3ab44b | ||
|
|
e6825529ab | ||
|
|
dab6f3bfae | ||
|
|
79b0981b05 | ||
|
|
bb894e8c16 | ||
|
|
312a841f8c | ||
|
|
abde22229a | ||
|
|
2d19fa0781 | ||
|
|
ee1bf89bf8 | ||
|
|
f6d00b7733 | ||
|
|
08390e21cb | ||
|
|
b357063c02 | ||
|
|
040cc37ad3 | ||
|
|
73b4d9884f | ||
|
|
b0e4e84f23 | ||
|
|
e7aa9b099a | ||
|
|
d9b38a8e0e | ||
|
|
726b764f1e | ||
|
|
4edc1a96d3 | ||
|
|
5959693845 | ||
|
|
4aa2ba4715 | ||
|
|
ced94022b3 | ||
|
|
4a5d3a79cc | ||
|
|
ca82ee11e2 | ||
|
|
93205a7a08 | ||
|
|
f8899ee310 | ||
|
|
884b639e1e | ||
|
|
9213601160 | ||
|
|
93c736cf6a | ||
|
|
774f48e23e | ||
|
|
e4a28fa71f | ||
|
|
d8f9ed90f2 | ||
|
|
deb79bbfc4 | ||
|
|
5598f87d82 | ||
|
|
d11d025186 | ||
|
|
4409ec0ab8 | ||
|
|
20bf53f4a6 | ||
|
|
fa6f5c63c8 | ||
|
|
5113f820de | ||
|
|
23b387f117 | ||
|
|
2f9bea6892 | ||
|
|
104cd72c77 | ||
|
|
5cce5fe59b | ||
|
|
c34d44ab7f | ||
|
|
bac8c4bc19 | ||
|
|
da042b7f96 | ||
|
|
28beb3ec42 | ||
|
|
ef75a2e7a5 | ||
|
|
91a1311bbe | ||
|
|
bbeaa2e359 | ||
|
|
d63aa846c5 | ||
|
|
4c77f0c963 | ||
|
|
5ec311001b | ||
|
|
abf2233426 | ||
|
|
dd9799385d | ||
|
|
cc5cd7e30b | ||
|
|
b543e18b10 | ||
|
|
1dc85e564b | ||
|
|
0114636cdc | ||
|
|
5859065650 | ||
|
|
6f21978637 | ||
|
|
91e9b22624 | ||
|
|
9a777c63fa | ||
|
|
6f0de8b858 | ||
|
|
b1a93b4756 | ||
|
|
b5a91a18cd | ||
|
|
4bfd6243a1 | ||
|
|
cc0752d334 | ||
|
|
f3d0c1233e | ||
|
|
4fabbcebea | ||
|
|
3b24d33cda | ||
|
|
13b8b458f4 | ||
|
|
b414432ddb | ||
|
|
216611ff3b | ||
|
|
df3be1cdf0 | ||
|
|
0a2ae95be8 | ||
|
|
67d425f237 | ||
|
|
2e6e6cfc12 | ||
|
|
474d9005f4 | ||
|
|
43a9524d0c | ||
|
|
b2df60414c | ||
|
|
fee1c565c3 | ||
|
|
106acdafec | ||
|
|
a439c6c5ec | ||
|
|
95d19f2eda | ||
|
|
34e8935bf8 | ||
|
|
e8eebd7b12 | ||
|
|
48f01b5965 | ||
|
|
b92b7a6fd8 | ||
|
|
4832c5e7a5 | ||
|
|
5b8ce54f9d | ||
|
|
eedf6cc58e | ||
|
|
96f1266846 | ||
|
|
89cd590cb8 | ||
|
|
2de45e7532 | ||
|
|
8d24a9a211 | ||
|
|
f70c47a7fb | ||
|
|
faf0b75a73 | ||
|
|
c8a5159e10 | ||
|
|
ac1a2f8536 | ||
|
|
0456a707f3 | ||
|
|
d7954f0698 | ||
|
|
e58d75f1da | ||
|
|
d9fe8a4175 | ||
|
|
125606ef89 | ||
|
|
182a084407 | ||
|
|
b628c6dd9e | ||
|
|
ddce93cfdd | ||
|
|
a2659e9560 | ||
|
|
6cd30d3b99 | ||
|
|
bbc3e99aa4 | ||
|
|
63927d1e1e | ||
|
|
40e9a060d7 | ||
|
|
df2a759c65 | ||
|
|
abb354b5e3 | ||
|
|
b74c7aa31f | ||
|
|
53b0beae4a | ||
|
|
ae03d8f2d1 | ||
|
|
610ea27c8e | ||
|
|
17b98d9b66 | ||
|
|
8fec5c5306 | ||
|
|
53c35423ab | ||
|
|
0447ab62c4 | ||
|
|
414a17882d | ||
|
|
8b2e0e1aec | ||
|
|
e79a2bd6ea | ||
|
|
53fa4f45b9 | ||
|
|
90e48bca14 | ||
|
|
03e6342a4a | ||
|
|
035726e711 | ||
|
|
45a89ef4c0 | ||
|
|
e932dc2575 | ||
|
|
8bebf65bbe | ||
|
|
91c479b228 | ||
|
|
e50dd008fd | ||
|
|
2c6edd819f | ||
|
|
f437a75e39 | ||
|
|
f67da0b43d | ||
|
|
1e9f4aa793 | ||
|
|
7ebdf10cbf | ||
|
|
9129e8668f | ||
|
|
4a1bf9efff | ||
|
|
4a4db1d9ca | ||
|
|
482440b131 | ||
|
|
50d7b206f5 | ||
|
|
61322b66e4 | ||
|
|
4ea93dbdc0 | ||
|
|
fa609efbc1 | ||
|
|
fa873d4085 | ||
|
|
de79e6aafc | ||
|
|
b6da0545f3 | ||
|
|
9acb8f815c | ||
|
|
45a54ae041 | ||
|
|
805c3891cb | ||
|
|
f866959fe1 | ||
|
|
c0c9768424 | ||
|
|
df8727cca5 | ||
|
|
0000f05c38 | ||
|
|
cc57831808 | ||
|
|
acde9ff4fc | ||
|
|
4ddd0a2fc7 | ||
|
|
08b93e83d2 | ||
|
|
a48b381d3b | ||
|
|
3bf3d8a212 | ||
|
|
0c6fa288c2 | ||
|
|
528482c87a | ||
|
|
318e3319c5 | ||
|
|
7cdc0f952f | ||
|
|
b7c7d02b0e | ||
|
|
cfb6f5e7d7 | ||
|
|
7c0f032def | ||
|
|
2c24def159 | ||
|
|
644d1e6ec2 | ||
|
|
a62e3687f2 | ||
|
|
f53e1d5629 | ||
|
|
be0d2952ac | ||
|
|
e81872fb1d | ||
|
|
dea7ead9a3 | ||
|
|
8e91653f73 | ||
|
|
e77acec858 | ||
|
|
c43088265e | ||
|
|
eaf83019a1 | ||
|
|
7ba0898a5d | ||
|
|
9393e587d5 | ||
|
|
290db5de86 | ||
|
|
e5829757e7 | ||
|
|
b5f75f15cc | ||
|
|
3c12c0108a | ||
|
|
42929ffe5c | ||
|
|
f64507521d | ||
|
|
1a6973af2d | ||
|
|
860eed5127 | ||
|
|
da4e74837c | ||
|
|
1301eaa9e7 | ||
|
|
706e469809 | ||
|
|
3ad7da50f7 | ||
|
|
48c969b324 | ||
|
|
5db7846c78 | ||
|
|
27f30b4a7d | ||
|
|
56dcea0cfe | ||
|
|
81dcc2f2ee | ||
|
|
94e474a867 | ||
|
|
a64933f11d | ||
|
|
7669da6c3e | ||
|
|
f68f0ec16b | ||
|
|
f3ae26bb83 | ||
|
|
46783614d5 | ||
|
|
3d73f69522 | ||
|
|
d23501875c | ||
|
|
320754a470 | ||
|
|
83548e39de | ||
|
|
06bffebc04 | ||
|
|
0c77f3ca1f | ||
|
|
df1035d236 | ||
|
|
1f147912e4 | ||
|
|
de781f4a76 | ||
|
|
bcff1f6e3a | ||
|
|
3fa7348ec5 | ||
|
|
ba50c4a604 | ||
|
|
7c3b710fbd | ||
|
|
72e88be125 | ||
|
|
fe02a8f410 | ||
|
|
14d05d2cea | ||
|
|
69d5d94c4e | ||
|
|
d5fee5a18a | ||
|
|
96804e89a2 | ||
|
|
791e92490f | ||
|
|
b7dbff80b2 | ||
|
|
8373059176 | ||
|
|
c549aea1fd | ||
|
|
d92ad4d5c8 | ||
|
|
99b5a62650 | ||
|
|
c917599b0b | ||
|
|
1f8e9d9a90 | ||
|
|
9c64f4d7f8 | ||
|
|
84d94c0e7b | ||
|
|
c1a28aa51e | ||
|
|
fe3632fe0c | ||
|
|
9ee5cca38b | ||
|
|
27715cd556 | ||
|
|
7e1946c3d8 | ||
|
|
9fbdc10cb0 | ||
|
|
4a75a756a7 | ||
|
|
10f60b96ac | ||
|
|
0a585e24ed | ||
|
|
910661fab5 | ||
|
|
19ee75b9fc | ||
|
|
7065fbb6ca | ||
|
|
ec7134406a | ||
|
|
dcfd191d8e | ||
|
|
7b73766251 | ||
|
|
b7f60b7f76 | ||
|
|
ed6fe769e6 | ||
|
|
5444415c86 | ||
|
|
c79f17167c | ||
|
|
ef6a27fdfc | ||
|
|
61a89dc23e | ||
|
|
5c6aa59ed3 | ||
|
|
60f2ff5f77 | ||
|
|
98d0e8451a | ||
|
|
d8a86f4ccb | ||
|
|
f618adb93e | ||
|
|
b829cd29c8 | ||
|
|
b6bf931fe4 | ||
|
|
6d303b9b3f | ||
|
|
fd2fc0abf9 | ||
|
|
2a4734c54c | ||
|
|
43828a7770 | ||
|
|
be31558b41 | ||
|
|
b3dfab5f6d | ||
|
|
54f9b8c9b5 | ||
|
|
2ab3b0ddaf | ||
|
|
be2b7da724 | ||
|
|
13895feb99 | ||
|
|
991ede4764 | ||
|
|
6bf276f675 | ||
|
|
dbe0a0c1d3 | ||
|
|
19ca5dfad7 | ||
|
|
728f2e7436 | ||
|
|
6638dd67a6 | ||
|
|
10f991d674 | ||
|
|
45462175c9 | ||
|
|
ce627702dc | ||
|
|
d6f25a169e | ||
|
|
81073bdb1f | ||
|
|
6cfef6bf02 | ||
|
|
6df6b7a355 | ||
|
|
c7b47c3cd2 | ||
|
|
b4c7467cf3 | ||
|
|
e6152b827b | ||
|
|
1ae13b2896 | ||
|
|
8d0e5b9408 | ||
|
|
0b40e3bc78 | ||
|
|
1389f0c032 | ||
|
|
59b6b0e2b8 | ||
|
|
27a503aa59 | ||
|
|
943936a909 | ||
|
|
8d40fa3b5c | ||
|
|
2d71941dd0 | ||
|
|
49f5c03622 | ||
|
|
ebca0521ad | ||
|
|
4c57962cf4 |
85
.github/ISSUE_TEMPLATE/release.md
vendored
85
.github/ISSUE_TEMPLATE/release.md
vendored
@@ -9,18 +9,79 @@ assignees: ''
|
||||
Target RC1 date: ___. __, ____
|
||||
Target GA date: ___. __, ____
|
||||
|
||||
- [ ] 1wk before feature freeze post in #argo-contributors that PRs must be merged by DD-MM-YYYY to be included in the release - ask approvers to drop items from milestone they can’t merge
|
||||
## RC1 Release Checklist
|
||||
|
||||
- [ ] 1wk before feature freeze post in #argo-contributors that PRs must be merged by DD-MM-YYYY to be included in the release - ask approvers to drop items from milestone they can't merge
|
||||
- [ ] At least two days before RC1 date, draft RC blog post and submit it for review (or delegate this task)
|
||||
- [ ] Cut RC1 (or delegate this task to an Approver and coordinate timing)
|
||||
- [ ] Create new release branch
|
||||
- [ ] Create new release branch (or delegate this task to an Approver)
|
||||
- [ ] Add the release branch to ReadTheDocs
|
||||
- [ ] Confirm that tweet and blog post are ready
|
||||
- [ ] Trigger the release
|
||||
- [ ] After the release is finished, publish tweet and blog post
|
||||
- [ ] Post in #argo-cd and #argo-announcements with lots of emojis announcing the release and requesting help testing
|
||||
- [ ] Monitor support channels for issues, cherry-picking bugfixes and docs fixes as appropriate (or delegate this task to an Approver and coordinate timing)
|
||||
- [ ] At release date, evaluate if any bugs justify delaying the release. If not, cut the release (or delegate this task to an Approver and coordinate timing)
|
||||
- [ ] If unreleased changes are on the release branch for {current minor version minus 3}, cut a final patch release for that series (or delegate this task to an Approver and coordinate timing)
|
||||
- [ ] After the release, post in #argo-cd that the {current minor version minus 3} has reached EOL (example: https://cloud-native.slack.com/archives/C01TSERG0KZ/p1667336234059729)
|
||||
- [ ] Cut RC1 (or delegate this task to an Approver and coordinate timing)
|
||||
- [ ] Run the [Init ArgoCD Release workflow](https://github.com/argoproj/argo-cd/actions/workflows/init-release.yaml) from the release branch
|
||||
- [ ] Review and merge the generated version bump PR
|
||||
- [ ] Run `./hack/trigger-release.sh` to push the release tag
|
||||
- [ ] Monitor the [Publish ArgoCD Release workflow](https://github.com/argoproj/argo-cd/actions/workflows/release.yaml)
|
||||
- [ ] Verify the release on [GitHub releases](https://github.com/argoproj/argo-cd/releases)
|
||||
- [ ] Verify the container image on [Quay.io](https://quay.io/repository/argoproj/argocd?tab=tags)
|
||||
- [ ] Confirm the new version appears in [Read the Docs](https://argo-cd.readthedocs.io/)
|
||||
- [ ] Verify the docs release build in https://app.readthedocs.org/projects/argo-cd/ succeeded and retry if failed (requires an Approver with admin creds to readthedocs)
|
||||
- [ ] Announce RC1 release
|
||||
- [ ] Confirm that tweet and blog post are ready
|
||||
- [ ] Publish tweet and blog post
|
||||
- [ ] Post in #argo-cd and #argo-announcements requesting help testing:
|
||||
```
|
||||
:mega: Argo CD v{MAJOR}.{MINOR}.{PATCH}-rc{RC_NUMBER} is OUT NOW! :argocd::tada:
|
||||
|
||||
Please go through the following resources to know more about the release:
|
||||
|
||||
Release notes: https://github.com/argoproj/argo-cd/releases/tag/v{VERSION}
|
||||
Blog: {BLOG_POST_URL}
|
||||
|
||||
We'd love your help testing this release candidate! Please try it out in your environments and report any issues you find. This helps us ensure a stable GA release.
|
||||
|
||||
Thanks to all the folks who spent their time contributing to this release in any way possible!
|
||||
```
|
||||
- [ ] Monitor support channels for issues, cherry-picking bugfixes and docs fixes as appropriate during the RC period (or delegate this task to an Approver and coordinate timing)
|
||||
- [ ] After creating the RC, open a documentation PR for the next minor version using [this](../../docs/operator-manual/templates/minor_version_upgrade.md) template.
|
||||
|
||||
## GA Release Checklist
|
||||
|
||||
- [ ] At GA release date, evaluate if any bugs justify delaying the release
|
||||
- [ ] Prepare for EOL version (version that is 3 releases old)
|
||||
- [ ] If unreleased changes are on the release branch for {current minor version minus 3}, cut a final patch release for that series (or delegate this task to an Approver and coordinate timing)
|
||||
- [ ] Edit the final patch release on GitHub and add the following notice at the top:
|
||||
```markdown
|
||||
> [!IMPORTANT]
|
||||
> **END OF LIFE NOTICE**
|
||||
>
|
||||
> This is the final release of the {EOL_SERIES} release series. As of {GA_DATE}, this version has reached end of life and will no longer receive bug fixes or security updates.
|
||||
>
|
||||
> **Action Required**: Please upgrade to a [supported version](https://argo-cd.readthedocs.io/en/stable/operator-manual/upgrading/overview/) (v{SUPPORTED_VERSION_1}, v{SUPPORTED_VERSION_2}, or v{NEW_VERSION}).
|
||||
```
|
||||
- [ ] Cut GA release (or delegate this task to an Approver and coordinate timing)
|
||||
- [ ] Run the [Init ArgoCD Release workflow](https://github.com/argoproj/argo-cd/actions/workflows/init-release.yaml) from the release branch
|
||||
- [ ] Review and merge the generated version bump PR
|
||||
- [ ] Run `./hack/trigger-release.sh` to push the release tag
|
||||
- [ ] Monitor the [Publish ArgoCD Release workflow](https://github.com/argoproj/argo-cd/actions/workflows/release.yaml)
|
||||
- [ ] Verify the release on [GitHub releases](https://github.com/argoproj/argo-cd/releases)
|
||||
- [ ] Verify the container image on [Quay.io](https://quay.io/repository/argoproj/argocd?tab=tags)
|
||||
- [ ] Verify the `stable` tag has been updated
|
||||
- [ ] Confirm the new version appears in [Read the Docs](https://argo-cd.readthedocs.io/)
|
||||
- [ ] Verify the docs release build in https://app.readthedocs.org/projects/argo-cd/ succeeded and retry if failed (requires an Approver with admin creds to readthedocs)
|
||||
- [ ] Announce GA release with EOL notice
|
||||
- [ ] Confirm that tweet and blog post are ready
|
||||
- [ ] Publish tweet and blog post
|
||||
- [ ] Post in #argo-cd and #argo-announcements announcing the release and EOL:
|
||||
```
|
||||
:mega: Argo CD v{MAJOR}.{MINOR} is OUT NOW! :argocd::tada:
|
||||
|
||||
Please go through the following resources to know more about the release:
|
||||
|
||||
Upgrade instructions: https://argo-cd.readthedocs.io/en/latest/operator-manual/upgrading/{PREV_MINOR}-{MAJOR}.{MINOR}/
|
||||
Blog: {BLOG_POST_URL}
|
||||
|
||||
:warning: IMPORTANT: With the release of Argo CD v{MAJOR}.{MINOR}, support for Argo CD v{EOL_VERSION} has officially reached End of Life (EOL).
|
||||
|
||||
Thanks to all the folks who spent their time contributing to this release in any way possible!
|
||||
```
|
||||
- [ ] (For the next release champion) Review the [items scheduled for the next release](https://github.com/orgs/argoproj/projects/25). If any item does not have an assignee who can commit to finish the feature, move it to the next release.
|
||||
- [ ] (For the next release champion) Schedule a time mid-way through the release cycle to review items again.
|
||||
- [ ] (For the next release champion) Schedule a time mid-way through the release cycle to review items again.
|
||||
|
||||
1
.github/configs/renovate-config.js
vendored
1
.github/configs/renovate-config.js
vendored
@@ -4,6 +4,7 @@ module.exports = {
|
||||
autodiscover: false,
|
||||
allowPostUpgradeCommandTemplating: true,
|
||||
allowedPostUpgradeCommands: ["make mockgen"],
|
||||
binarySource: 'install',
|
||||
extends: [
|
||||
"github>argoproj/argo-cd//renovate-presets/commons.json5",
|
||||
"github>argoproj/argo-cd//renovate-presets/custom-managers/shell.json5",
|
||||
|
||||
26
.github/pr-title-checker-config.json
vendored
26
.github/pr-title-checker-config.json
vendored
@@ -1,15 +1,15 @@
|
||||
{
|
||||
"LABEL": {
|
||||
"name": "title needs formatting",
|
||||
"color": "EEEEEE"
|
||||
},
|
||||
"CHECKS": {
|
||||
"prefixes": ["[Bot] docs: "],
|
||||
"regexp": "^(feat|fix|docs|test|ci|chore)!?(\\(.*\\))?!?:.*"
|
||||
},
|
||||
"MESSAGES": {
|
||||
"success": "PR title is valid",
|
||||
"failure": "PR title is invalid",
|
||||
"notice": "PR Title needs to pass regex '^(feat|fix|docs|test|ci|chore)!?(\\(.*\\))?!?:.*"
|
||||
}
|
||||
"LABEL": {
|
||||
"name": "title needs formatting",
|
||||
"color": "EEEEEE"
|
||||
},
|
||||
"CHECKS": {
|
||||
"prefixes": ["[Bot] docs: "],
|
||||
"regexp": "^(refactor|feat|fix|docs|test|ci|chore)!?(\\(.*\\))?!?:.*"
|
||||
},
|
||||
"MESSAGES": {
|
||||
"success": "PR title is valid",
|
||||
"failure": "PR title is invalid",
|
||||
"notice": "PR Title needs to pass regex '^(refactor|feat|fix|docs|test|ci|chore)!?(\\(.*\\))?!?:.*"
|
||||
}
|
||||
}
|
||||
|
||||
1
.github/workflows/README.md
vendored
1
.github/workflows/README.md
vendored
@@ -11,6 +11,7 @@
|
||||
| release.yaml | Build images, cli-binaries, provenances, and post actions |
|
||||
| scorecard.yaml | Generate scorecard for supply-chain security |
|
||||
| update-snyk.yaml | Scheduled snyk reports |
|
||||
| stale.yaml | Labels stale issues and PRs |
|
||||
|
||||
# Reusable workflows
|
||||
|
||||
|
||||
8
.github/workflows/bump-major-version.yaml
vendored
8
.github/workflows/bump-major-version.yaml
vendored
@@ -10,10 +10,10 @@ jobs:
|
||||
contents: write # for peter-evans/create-pull-request to create branch
|
||||
pull-requests: write # for peter-evans/create-pull-request to create a PR
|
||||
name: Automatically update major version
|
||||
runs-on: ubuntu-22.04
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
@@ -37,7 +37,7 @@ jobs:
|
||||
working-directory: /home/runner/go/src/github.com/argoproj/argo-cd
|
||||
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
|
||||
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Add ~/go/bin to PATH
|
||||
@@ -74,7 +74,7 @@ jobs:
|
||||
rsync -a --exclude=.git /home/runner/go/src/github.com/argoproj/argo-cd/ ../argo-cd
|
||||
|
||||
- name: Create pull request
|
||||
uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7.0.8
|
||||
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8.1.0
|
||||
with:
|
||||
commit-message: "Bump major version to ${{ steps.get-target-version.outputs.TARGET_VERSION }}"
|
||||
title: "Bump major version to ${{ steps.get-target-version.outputs.TARGET_VERSION }}"
|
||||
|
||||
4
.github/workflows/cherry-pick-single.yml
vendored
4
.github/workflows/cherry-pick-single.yml
vendored
@@ -28,7 +28,7 @@ on:
|
||||
jobs:
|
||||
cherry-pick:
|
||||
name: Cherry Pick to ${{ inputs.version_number }}
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- name: Generate a token
|
||||
id: generate-token
|
||||
@@ -38,7 +38,7 @@ jobs:
|
||||
private-key: ${{ secrets.CHERRYPICK_APP_PRIVATE_KEY }}
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ steps.generate-token.outputs.token }}
|
||||
|
||||
2
.github/workflows/cherry-pick.yml
vendored
2
.github/workflows/cherry-pick.yml
vendored
@@ -14,7 +14,7 @@ jobs:
|
||||
(github.event.action == 'labeled' && startsWith(github.event.label.name, 'cherry-pick/')) ||
|
||||
(github.event.action == 'closed' && contains(toJSON(github.event.pull_request.labels.*.name), 'cherry-pick/'))
|
||||
)
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-24.04
|
||||
outputs:
|
||||
labels: ${{ steps.extract-labels.outputs.labels }}
|
||||
steps:
|
||||
|
||||
155
.github/workflows/ci-build.yaml
vendored
155
.github/workflows/ci-build.yaml
vendored
@@ -14,7 +14,7 @@ on:
|
||||
env:
|
||||
# Golang version to use across CI steps
|
||||
# renovate: datasource=golang-version packageName=golang
|
||||
GOLANG_VERSION: '1.25.3'
|
||||
GOLANG_VERSION: '1.26.0'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
@@ -25,14 +25,14 @@ permissions:
|
||||
|
||||
jobs:
|
||||
changes:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-24.04
|
||||
outputs:
|
||||
backend: ${{ steps.filter.outputs.backend_any_changed }}
|
||||
frontend: ${{ steps.filter.outputs.frontend_any_changed }}
|
||||
docs: ${{ steps.filter.outputs.docs_any_changed }}
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
- uses: tj-actions/changed-files@7dee1b0c1557f278e5c7dc244927139d78c0e22a # v47.0.4
|
||||
id: filter
|
||||
with:
|
||||
# Any file which is not under docs/, ui/ or is not a markdown file is counted as a backend file
|
||||
@@ -50,14 +50,14 @@ jobs:
|
||||
check-go:
|
||||
name: Ensure Go modules synchronicity
|
||||
if: ${{ needs.changes.outputs.backend == 'true' }}
|
||||
runs-on: ubuntu-22.04
|
||||
runs-on: ubuntu-24.04
|
||||
needs:
|
||||
- changes
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
|
||||
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Download all Go modules
|
||||
@@ -70,18 +70,18 @@ jobs:
|
||||
build-go:
|
||||
name: Build & cache Go code
|
||||
if: ${{ needs.changes.outputs.backend == 'true' }}
|
||||
runs-on: ubuntu-22.04
|
||||
runs-on: ubuntu-24.04
|
||||
needs:
|
||||
- changes
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
|
||||
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Restore go build cache
|
||||
uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
|
||||
uses: actions/cache@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3
|
||||
with:
|
||||
path: ~/.cache/go-build
|
||||
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
|
||||
@@ -97,27 +97,27 @@ jobs:
|
||||
pull-requests: read # for golangci/golangci-lint-action to fetch pull requests
|
||||
name: Lint Go code
|
||||
if: ${{ needs.changes.outputs.backend == 'true' }}
|
||||
runs-on: ubuntu-22.04
|
||||
runs-on: ubuntu-24.04
|
||||
needs:
|
||||
- changes
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
|
||||
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Run golangci-lint
|
||||
uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0
|
||||
uses: golangci/golangci-lint-action@1e7e51e771db61008b38414a730f564565cf7c20 # v9.2.0
|
||||
with:
|
||||
# renovate: datasource=go packageName=github.com/golangci/golangci-lint versioning=regex:^v(?<major>\d+)\.(?<minor>\d+)\.(?<patch>\d+)?$
|
||||
version: v2.5.0
|
||||
# renovate: datasource=go packageName=github.com/golangci/golangci-lint/v2 versioning=regex:^v(?<major>\d+)\.(?<minor>\d+)\.(?<patch>\d+)?$
|
||||
version: v2.9.0
|
||||
args: --verbose
|
||||
|
||||
test-go:
|
||||
name: Run unit tests for Go packages
|
||||
if: ${{ needs.changes.outputs.backend == 'true' }}
|
||||
runs-on: ubuntu-22.04
|
||||
runs-on: ubuntu-24.04
|
||||
needs:
|
||||
- build-go
|
||||
- changes
|
||||
@@ -128,11 +128,11 @@ jobs:
|
||||
- name: Create checkout directory
|
||||
run: mkdir -p ~/go/src/github.com/argoproj
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
- name: Create symlink in GOPATH
|
||||
run: ln -s $(pwd) ~/go/src/github.com/argoproj/argo-cd
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
|
||||
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Install required packages
|
||||
@@ -152,7 +152,7 @@ jobs:
|
||||
run: |
|
||||
echo "/usr/local/bin" >> $GITHUB_PATH
|
||||
- name: Restore go build cache
|
||||
uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
|
||||
uses: actions/cache@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3
|
||||
with:
|
||||
path: ~/.cache/go-build
|
||||
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
|
||||
@@ -173,7 +173,7 @@ jobs:
|
||||
- name: Run all unit tests
|
||||
run: make test-local
|
||||
- name: Generate test results artifacts
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
|
||||
with:
|
||||
name: test-results
|
||||
path: test-results
|
||||
@@ -181,7 +181,7 @@ jobs:
|
||||
test-go-race:
|
||||
name: Run unit tests with -race for Go packages
|
||||
if: ${{ needs.changes.outputs.backend == 'true' }}
|
||||
runs-on: ubuntu-22.04
|
||||
runs-on: ubuntu-24.04
|
||||
needs:
|
||||
- build-go
|
||||
- changes
|
||||
@@ -192,11 +192,11 @@ jobs:
|
||||
- name: Create checkout directory
|
||||
run: mkdir -p ~/go/src/github.com/argoproj
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
- name: Create symlink in GOPATH
|
||||
run: ln -s $(pwd) ~/go/src/github.com/argoproj/argo-cd
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
|
||||
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Install required packages
|
||||
@@ -216,7 +216,7 @@ jobs:
|
||||
run: |
|
||||
echo "/usr/local/bin" >> $GITHUB_PATH
|
||||
- name: Restore go build cache
|
||||
uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
|
||||
uses: actions/cache@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3
|
||||
with:
|
||||
path: ~/.cache/go-build
|
||||
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
|
||||
@@ -237,7 +237,7 @@ jobs:
|
||||
- name: Run all unit tests
|
||||
run: make test-race-local
|
||||
- name: Generate test results artifacts
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
|
||||
with:
|
||||
name: race-results
|
||||
path: test-results/
|
||||
@@ -245,20 +245,21 @@ jobs:
|
||||
codegen:
|
||||
name: Check changes to generated code
|
||||
if: ${{ needs.changes.outputs.backend == 'true' || needs.changes.outputs.docs == 'true'}}
|
||||
runs-on: ubuntu-22.04
|
||||
runs-on: ubuntu-24.04
|
||||
needs:
|
||||
- changes
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
|
||||
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Create symlink in GOPATH
|
||||
# generalizing repo name for forks: ${{ github.event.repository.name }}
|
||||
run: |
|
||||
mkdir -p ~/go/src/github.com/argoproj
|
||||
cp -a ../argo-cd ~/go/src/github.com/argoproj
|
||||
cp -a ../${{ github.event.repository.name }} ~/go/src/github.com/argoproj
|
||||
- name: Add ~/go/bin to PATH
|
||||
run: |
|
||||
echo "/home/runner/go/bin" >> $GITHUB_PATH
|
||||
@@ -270,12 +271,14 @@ jobs:
|
||||
# We need to vendor go modules for codegen yet
|
||||
go mod download
|
||||
go mod vendor -v
|
||||
working-directory: /home/runner/go/src/github.com/argoproj/argo-cd
|
||||
# generalizing repo name for forks: ${{ github.event.repository.name }}
|
||||
working-directory: /home/runner/go/src/github.com/argoproj/${{ github.event.repository.name }}
|
||||
- name: Install toolchain for codegen
|
||||
run: |
|
||||
make install-codegen-tools-local
|
||||
make install-go-tools-local
|
||||
working-directory: /home/runner/go/src/github.com/argoproj/argo-cd
|
||||
# generalizing repo name for forks: ${{ github.event.repository.name }}
|
||||
working-directory: /home/runner/go/src/github.com/argoproj/${{ github.event.repository.name }}
|
||||
# We install kustomize in the dist directory
|
||||
- name: Add dist to PATH
|
||||
run: |
|
||||
@@ -286,31 +289,33 @@ jobs:
|
||||
export GOPATH=$(go env GOPATH)
|
||||
git checkout -- go.mod go.sum
|
||||
make codegen-local
|
||||
working-directory: /home/runner/go/src/github.com/argoproj/argo-cd
|
||||
# generalizing repo name for forks: ${{ github.event.repository.name }}
|
||||
working-directory: /home/runner/go/src/github.com/argoproj/${{ github.event.repository.name }}
|
||||
- name: Check nothing has changed
|
||||
run: |
|
||||
set -xo pipefail
|
||||
git diff --exit-code -- . ':!go.sum' ':!go.mod' ':!assets/swagger.json' | tee codegen.patch
|
||||
working-directory: /home/runner/go/src/github.com/argoproj/argo-cd
|
||||
# generalizing repo name for forks: ${{ github.event.repository.name }}
|
||||
working-directory: /home/runner/go/src/github.com/argoproj/${{ github.event.repository.name }}
|
||||
|
||||
build-ui:
|
||||
name: Build, test & lint UI code
|
||||
# We run UI logic for backend changes so that we have a complete set of coverage documents to send to codecov.
|
||||
if: ${{ needs.changes.outputs.backend == 'true' || needs.changes.outputs.frontend == 'true' }}
|
||||
runs-on: ubuntu-22.04
|
||||
runs-on: ubuntu-24.04
|
||||
needs:
|
||||
- changes
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
- name: Setup NodeJS
|
||||
uses: actions/setup-node@2028fbc5c25fe9cf00d9f06a71cc4710d4507903 # v6.0.0
|
||||
uses: actions/setup-node@6044e13b5dc448c55e2357c09f80417699197238 # v6.2.0
|
||||
with:
|
||||
# renovate: datasource=node-version packageName=node versioning=node
|
||||
node-version: '22.9.0'
|
||||
- name: Restore node dependency cache
|
||||
id: cache-dependencies
|
||||
uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
|
||||
uses: actions/cache@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3
|
||||
with:
|
||||
path: ui/node_modules
|
||||
key: ${{ runner.os }}-node-dep-v2-${{ hashFiles('**/yarn.lock') }}
|
||||
@@ -333,9 +338,9 @@ jobs:
|
||||
working-directory: ui/
|
||||
|
||||
shellcheck:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
- run: |
|
||||
sudo apt-get install shellcheck
|
||||
shellcheck -e SC2059 -e SC2154 -e SC2034 -e SC2016 -e SC1091 $(find . -type f -name '*.sh' | grep -v './ui/node_modules') | tee sc.log
|
||||
@@ -344,7 +349,7 @@ jobs:
|
||||
analyze:
|
||||
name: Process & analyze test artifacts
|
||||
if: ${{ needs.changes.outputs.backend == 'true' || needs.changes.outputs.frontend == 'true' }}
|
||||
runs-on: ubuntu-22.04
|
||||
runs-on: ubuntu-24.04
|
||||
needs:
|
||||
- test-go
|
||||
- build-ui
|
||||
@@ -352,14 +357,15 @@ jobs:
|
||||
- test-e2e
|
||||
env:
|
||||
sonar_secret: ${{ secrets.SONAR_TOKEN }}
|
||||
codecov_secret: ${{ secrets.CODECOV_TOKEN }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Restore node dependency cache
|
||||
id: cache-dependencies
|
||||
uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
|
||||
uses: actions/cache@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3
|
||||
with:
|
||||
path: ui/node_modules
|
||||
key: ${{ runner.os }}-node-dep-v2-${{ hashFiles('**/yarn.lock') }}
|
||||
@@ -367,12 +373,12 @@ jobs:
|
||||
run: |
|
||||
rm -rf ui/node_modules/argo-ui/node_modules
|
||||
- name: Get e2e code coverage
|
||||
uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0
|
||||
uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0
|
||||
with:
|
||||
name: e2e-code-coverage
|
||||
path: e2e-code-coverage
|
||||
- name: Get unit test code coverage
|
||||
uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0
|
||||
uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0
|
||||
with:
|
||||
name: test-results
|
||||
path: test-results
|
||||
@@ -384,52 +390,52 @@ jobs:
|
||||
run: |
|
||||
go tool covdata percent -i=test-results,e2e-code-coverage/applicationset-controller,e2e-code-coverage/repo-server,e2e-code-coverage/app-controller,e2e-code-coverage/commit-server -o test-results/full-coverage.out
|
||||
- name: Upload code coverage information to codecov.io
|
||||
uses: codecov/codecov-action@5a1091511ad55cbe89839c7260b706298ca349f7 # v5.5.1
|
||||
# Only run when the workflow is for upstream (PR target or push is in argoproj/argo-cd).
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
uses: codecov/codecov-action@671740ac38dd9b0130fbe1cec585b89eea48d3de # v5.5.2
|
||||
with:
|
||||
files: test-results/full-coverage.out
|
||||
fail_ci_if_error: true
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
- name: Upload test results to Codecov
|
||||
if: github.ref == 'refs/heads/master' && github.event_name == 'push' && github.repository == 'argoproj/argo-cd'
|
||||
uses: codecov/test-results-action@47f89e9acb64b76debcd5ea40642d25a4adced9f # v1.1.1
|
||||
# Codecov uploads test results to Codecov.io on upstream master branch.
|
||||
if: github.repository == 'argoproj/argo-cd' && github.ref == 'refs/heads/master' && github.event_name == 'push'
|
||||
uses: codecov/codecov-action@671740ac38dd9b0130fbe1cec585b89eea48d3de # v5.5.2
|
||||
with:
|
||||
file: test-results/junit.xml
|
||||
files: test-results/junit.xml
|
||||
fail_ci_if_error: true
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
report_type: test_results
|
||||
- name: Perform static code analysis using SonarCloud
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}
|
||||
uses: SonarSource/sonarqube-scan-action@1a6d90ebcb0e6a6b1d87e37ba693fe453195ae25 # v5.3.1
|
||||
uses: SonarSource/sonarqube-scan-action@a31c9398be7ace6bbfaf30c0bd5d415f843d45e9 # v7.0.0
|
||||
if: env.sonar_secret != ''
|
||||
test-e2e:
|
||||
name: Run end-to-end tests
|
||||
if: ${{ needs.changes.outputs.backend == 'true' }}
|
||||
runs-on: oracle-vm-16cpu-64gb-x86-64
|
||||
runs-on: ${{ github.repository == 'argoproj/argo-cd' && 'oracle-vm-16cpu-64gb-x86-64' || 'ubuntu-24.04' }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
# latest: true means that this version mush upload the coverage report to codecov.io
|
||||
# We designate the latest version because we only collect code coverage for that version.
|
||||
k3s:
|
||||
- version: v1.33.1
|
||||
- version: v1.35.0
|
||||
latest: true
|
||||
- version: v1.34.2
|
||||
latest: false
|
||||
- version: v1.33.1
|
||||
latest: false
|
||||
- version: v1.32.1
|
||||
latest: false
|
||||
- version: v1.31.0
|
||||
latest: false
|
||||
- version: v1.30.4
|
||||
latest: false
|
||||
needs:
|
||||
- build-go
|
||||
- changes
|
||||
env:
|
||||
GOPATH: /home/ubuntu/go
|
||||
ARGOCD_FAKE_IN_CLUSTER: 'true'
|
||||
ARGOCD_SSH_DATA_PATH: '/tmp/argo-e2e/app/config/ssh'
|
||||
ARGOCD_TLS_DATA_PATH: '/tmp/argo-e2e/app/config/tls'
|
||||
ARGOCD_E2E_SSH_KNOWN_HOSTS: '../fixture/certs/ssh_known_hosts'
|
||||
ARGOCD_E2E_K3S: 'true'
|
||||
ARGOCD_IN_CI: 'true'
|
||||
ARGOCD_E2E_APISERVER_PORT: '8088'
|
||||
@@ -446,11 +452,14 @@ jobs:
|
||||
swap-storage: false
|
||||
tool-cache: false
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
|
||||
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Set GOPATH
|
||||
run: |
|
||||
echo "GOPATH=$HOME/go" >> $GITHUB_ENV
|
||||
- name: GH actions workaround - Kill XSP4 process
|
||||
run: |
|
||||
sudo pkill mono || true
|
||||
@@ -461,19 +470,19 @@ jobs:
|
||||
set -x
|
||||
curl -sfL https://get.k3s.io | sh -
|
||||
sudo chmod -R a+rw /etc/rancher/k3s
|
||||
sudo mkdir -p $HOME/.kube && sudo chown -R ubuntu $HOME/.kube
|
||||
sudo mkdir -p $HOME/.kube && sudo chown -R $(whoami) $HOME/.kube
|
||||
sudo k3s kubectl config view --raw > $HOME/.kube/config
|
||||
sudo chown ubuntu $HOME/.kube/config
|
||||
sudo chown $(whoami) $HOME/.kube/config
|
||||
sudo chmod go-r $HOME/.kube/config
|
||||
kubectl version
|
||||
- name: Restore go build cache
|
||||
uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
|
||||
uses: actions/cache@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3
|
||||
with:
|
||||
path: ~/.cache/go-build
|
||||
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
|
||||
- name: Add ~/go/bin to PATH
|
||||
run: |
|
||||
echo "/home/ubuntu/go/bin" >> $GITHUB_PATH
|
||||
echo "$HOME/go/bin" >> $GITHUB_PATH
|
||||
- name: Add /usr/local/bin to PATH
|
||||
run: |
|
||||
echo "/usr/local/bin" >> $GITHUB_PATH
|
||||
@@ -493,13 +502,13 @@ jobs:
|
||||
git config --global user.email "john.doe@example.com"
|
||||
- name: Pull Docker image required for tests
|
||||
run: |
|
||||
docker pull ghcr.io/dexidp/dex:v2.43.0
|
||||
docker pull ghcr.io/dexidp/dex:v2.44.0
|
||||
docker pull argoproj/argo-cd-ci-builder:v1.0.0
|
||||
docker pull redis:8.2.1-alpine
|
||||
docker pull redis:8.2.3-alpine
|
||||
- name: Create target directory for binaries in the build-process
|
||||
run: |
|
||||
mkdir -p dist
|
||||
chown ubuntu dist
|
||||
chown $(whoami) dist
|
||||
- name: Run E2E server and wait for it being available
|
||||
timeout-minutes: 30
|
||||
run: |
|
||||
@@ -525,13 +534,13 @@ jobs:
|
||||
goreman run stop-all || echo "goreman trouble"
|
||||
sleep 30
|
||||
- name: Upload e2e coverage report
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
|
||||
with:
|
||||
name: e2e-code-coverage
|
||||
path: /tmp/coverage
|
||||
if: ${{ matrix.k3s.latest }}
|
||||
- name: Upload e2e-server logs
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
|
||||
with:
|
||||
name: e2e-server-k8s${{ matrix.k3s.version }}.log
|
||||
path: /tmp/e2e-server.log
|
||||
@@ -549,7 +558,7 @@ jobs:
|
||||
needs:
|
||||
- test-e2e
|
||||
- changes
|
||||
runs-on: ubuntu-22.04
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- run: |
|
||||
result="${{ needs.test-e2e.result }}"
|
||||
|
||||
6
.github/workflows/codeql.yml
vendored
6
.github/workflows/codeql.yml
vendored
@@ -26,14 +26,14 @@ jobs:
|
||||
if: github.repository == 'argoproj/argo-cd' || vars.enable_codeql
|
||||
|
||||
# CodeQL runs on ubuntu-latest and windows-latest
|
||||
runs-on: ubuntu-22.04
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
|
||||
# Use correct go version. https://github.com/github/codeql-action/issues/1842#issuecomment-1704398087
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
|
||||
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
|
||||
20
.github/workflows/image-reuse.yaml
vendored
20
.github/workflows/image-reuse.yaml
vendored
@@ -51,23 +51,23 @@ jobs:
|
||||
contents: read
|
||||
packages: write # Used to push images to `ghcr.io` if used.
|
||||
id-token: write # Needed to create an OIDC token for keyless signing
|
||||
runs-on: ubuntu-22.04
|
||||
outputs:
|
||||
runs-on: ubuntu-24.04
|
||||
outputs:
|
||||
image-digest: ${{ steps.image.outputs.digest }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
if: ${{ github.ref_type == 'tag'}}
|
||||
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
if: ${{ github.ref_type != 'tag'}}
|
||||
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
|
||||
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
||||
with:
|
||||
go-version: ${{ inputs.go-version }}
|
||||
cache: false
|
||||
@@ -76,7 +76,7 @@ jobs:
|
||||
uses: sigstore/cosign-installer@faadad0cce49287aee09b3a48701e75088a2c6ad # v4.0.0
|
||||
|
||||
- uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3.6.0
|
||||
- uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||
- uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3.12.0
|
||||
|
||||
- name: Setup tags for container image as a CSV type
|
||||
run: |
|
||||
@@ -103,7 +103,7 @@ jobs:
|
||||
echo 'EOF' >> $GITHUB_ENV
|
||||
|
||||
- name: Login to Quay.io
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.quay_username }}
|
||||
@@ -111,7 +111,7 @@ jobs:
|
||||
if: ${{ inputs.quay_image_name && inputs.push }}
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ secrets.ghcr_username }}
|
||||
@@ -119,7 +119,7 @@ jobs:
|
||||
if: ${{ inputs.ghcr_image_name && inputs.push }}
|
||||
|
||||
- name: Login to dockerhub Container Registry
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
|
||||
with:
|
||||
username: ${{ secrets.docker_username }}
|
||||
password: ${{ secrets.docker_password }}
|
||||
@@ -142,7 +142,7 @@ jobs:
|
||||
|
||||
- name: Build and push container image
|
||||
id: image
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 #v6.18.0
|
||||
uses: docker/build-push-action@10e90e3645eae34f1e60eeb005ba3a3d33f178e8 #v6.19.2
|
||||
with:
|
||||
context: .
|
||||
platforms: ${{ inputs.platforms }}
|
||||
|
||||
64
.github/workflows/image.yaml
vendored
64
.github/workflows/image.yaml
vendored
@@ -19,16 +19,49 @@ jobs:
|
||||
set-vars:
|
||||
permissions:
|
||||
contents: read
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
runs-on: ubuntu-22.04
|
||||
# Always run to calculate variables - other jobs check outputs
|
||||
runs-on: ubuntu-24.04
|
||||
outputs:
|
||||
image-tag: ${{ steps.image.outputs.tag}}
|
||||
platforms: ${{ steps.platforms.outputs.platforms }}
|
||||
image_namespace: ${{ steps.image.outputs.image_namespace }}
|
||||
image_repository: ${{ steps.image.outputs.image_repository }}
|
||||
quay_image_name: ${{ steps.image.outputs.quay_image_name }}
|
||||
ghcr_image_name: ${{ steps.image.outputs.ghcr_image_name }}
|
||||
ghcr_provenance_image: ${{ steps.image.outputs.ghcr_provenance_image }}
|
||||
allow_ghcr_publish: ${{ steps.image.outputs.allow_ghcr_publish }}
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
|
||||
- name: Set image tag for ghcr
|
||||
run: echo "tag=$(cat ./VERSION)-${GITHUB_SHA::8}" >> $GITHUB_OUTPUT
|
||||
- name: Set image tag and names
|
||||
run: |
|
||||
# Calculate image tag
|
||||
TAG="$(cat ./VERSION)-${GITHUB_SHA::8}"
|
||||
echo "tag=$TAG" >> $GITHUB_OUTPUT
|
||||
|
||||
# Calculate image names with defaults
|
||||
IMAGE_NAMESPACE="${{ vars.IMAGE_NAMESPACE || 'argoproj' }}"
|
||||
IMAGE_REPOSITORY="${{ vars.IMAGE_REPOSITORY || 'argocd' }}"
|
||||
GHCR_NAMESPACE="${{ vars.GHCR_NAMESPACE || github.repository }}"
|
||||
GHCR_REPOSITORY="${{ vars.GHCR_REPOSITORY || 'argocd' }}"
|
||||
|
||||
echo "image_namespace=$IMAGE_NAMESPACE" >> $GITHUB_OUTPUT
|
||||
echo "image_repository=$IMAGE_REPOSITORY" >> $GITHUB_OUTPUT
|
||||
|
||||
# Construct image name
|
||||
echo "quay_image_name=quay.io/$IMAGE_NAMESPACE/$IMAGE_REPOSITORY:latest" >> $GITHUB_OUTPUT
|
||||
|
||||
ALLOW_GHCR_PUBLISH=false
|
||||
if [[ "${{ github.repository }}" == "argoproj/argo-cd" || "$GHCR_NAMESPACE" != argoproj/* ]]; then
|
||||
ALLOW_GHCR_PUBLISH=true
|
||||
echo "ghcr_image_name=ghcr.io/$GHCR_NAMESPACE/$GHCR_REPOSITORY:$TAG" >> $GITHUB_OUTPUT
|
||||
echo "ghcr_provenance_image=ghcr.io/$GHCR_NAMESPACE/$GHCR_REPOSITORY" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "GhCR publish skipped: refusing to push to namespace '$GHCR_NAMESPACE'. Please override GHCR_* for forks." >&2
|
||||
echo "ghcr_image_name=" >> $GITHUB_OUTPUT
|
||||
echo "ghcr_provenance_image=" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
echo "allow_ghcr_publish=$ALLOW_GHCR_PUBLISH" >> $GITHUB_OUTPUT
|
||||
id: image
|
||||
|
||||
- name: Determine image platforms to use
|
||||
@@ -48,12 +81,12 @@ jobs:
|
||||
contents: read
|
||||
packages: write # for pushing packages to GHCR, which is used by cd.apps.argoproj.io to avoid polluting Quay with tags
|
||||
id-token: write # for creating OIDC tokens for signing.
|
||||
if: ${{ github.repository == 'argoproj/argo-cd' && github.event_name != 'push' }}
|
||||
if: ${{ (github.repository == 'argoproj/argo-cd' || needs.set-vars.outputs.image_namespace != 'argoproj') && github.event_name != 'push' }}
|
||||
uses: ./.github/workflows/image-reuse.yaml
|
||||
with:
|
||||
# Note: cannot use env variables to set go-version (https://docs.github.com/en/actions/using-workflows/reusing-workflows#limitations)
|
||||
# renovate: datasource=golang-version packageName=golang
|
||||
go-version: 1.25.3
|
||||
go-version: 1.26.0
|
||||
platforms: ${{ needs.set-vars.outputs.platforms }}
|
||||
push: false
|
||||
|
||||
@@ -63,14 +96,14 @@ jobs:
|
||||
contents: read
|
||||
packages: write # for pushing packages to GHCR, which is used by cd.apps.argoproj.io to avoid polluting Quay with tags
|
||||
id-token: write # for creating OIDC tokens for signing.
|
||||
if: ${{ github.repository == 'argoproj/argo-cd' && github.event_name == 'push' }}
|
||||
if: ${{ (github.repository == 'argoproj/argo-cd' || needs.set-vars.outputs.image_namespace != 'argoproj') && github.event_name == 'push' }}
|
||||
uses: ./.github/workflows/image-reuse.yaml
|
||||
with:
|
||||
quay_image_name: quay.io/argoproj/argocd:latest
|
||||
ghcr_image_name: ghcr.io/argoproj/argo-cd/argocd:${{ needs.set-vars.outputs.image-tag }}
|
||||
quay_image_name: ${{ needs.set-vars.outputs.quay_image_name }}
|
||||
ghcr_image_name: ${{ needs.set-vars.outputs.ghcr_image_name }}
|
||||
# Note: cannot use env variables to set go-version (https://docs.github.com/en/actions/using-workflows/reusing-workflows#limitations)
|
||||
# renovate: datasource=golang-version packageName=golang
|
||||
go-version: 1.25.3
|
||||
go-version: 1.26.0
|
||||
platforms: ${{ needs.set-vars.outputs.platforms }}
|
||||
push: true
|
||||
secrets:
|
||||
@@ -81,16 +114,17 @@ jobs:
|
||||
|
||||
build-and-publish-provenance: # Push attestations to GHCR, latest image is polluting quay.io
|
||||
needs:
|
||||
- set-vars
|
||||
- build-and-publish
|
||||
permissions:
|
||||
actions: read # for detecting the Github Actions environment.
|
||||
id-token: write # for creating OIDC tokens for signing.
|
||||
packages: write # for uploading attestations. (https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#known-issues)
|
||||
if: ${{ github.repository == 'argoproj/argo-cd' && github.event_name == 'push' }}
|
||||
if: ${{ (github.repository == 'argoproj/argo-cd' || needs.set-vars.outputs.image_namespace != 'argoproj') && github.event_name == 'push' && needs.set-vars.outputs.allow_ghcr_publish == 'true'}}
|
||||
# Must be refernced by a tag. https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#referencing-the-slsa-generator
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v2.1.0
|
||||
with:
|
||||
image: ghcr.io/argoproj/argo-cd/argocd
|
||||
image: ${{ needs.set-vars.outputs.ghcr_provenance_image }}
|
||||
digest: ${{ needs.build-and-publish.outputs.image-digest }}
|
||||
registry-username: ${{ github.actor }}
|
||||
secrets:
|
||||
@@ -104,9 +138,9 @@ jobs:
|
||||
contents: write # for git to push upgrade commit if not already deployed
|
||||
packages: write # for pushing packages to GHCR, which is used by cd.apps.argoproj.io to avoid polluting Quay with tags
|
||||
if: ${{ github.repository == 'argoproj/argo-cd' && github.event_name == 'push' }}
|
||||
runs-on: ubuntu-22.04
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
- run: git clone "https://$TOKEN@github.com/argoproj/argoproj-deployments"
|
||||
env:
|
||||
TOKEN: ${{ secrets.TOKEN }}
|
||||
|
||||
12
.github/workflows/init-release.yaml
vendored
12
.github/workflows/init-release.yaml
vendored
@@ -20,10 +20,16 @@ jobs:
|
||||
contents: write # for peter-evans/create-pull-request to create branch
|
||||
pull-requests: write # for peter-evans/create-pull-request to create a PR
|
||||
name: Automatically generate version and manifests on ${{ inputs.TARGET_BRANCH }}
|
||||
runs-on: ubuntu-22.04
|
||||
runs-on: ubuntu-24.04
|
||||
env:
|
||||
# Calculate image names with defaults, this will be used in the make manifests-local command
|
||||
# to generate the correct image name in the manifests
|
||||
IMAGE_REGISTRY: ${{ vars.IMAGE_REGISTRY || 'quay.io' }}
|
||||
IMAGE_NAMESPACE: ${{ vars.IMAGE_NAMESPACE || 'argoproj' }}
|
||||
IMAGE_REPOSITORY: ${{ vars.IMAGE_REPOSITORY || 'argocd' }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
@@ -64,7 +70,7 @@ jobs:
|
||||
git stash pop
|
||||
|
||||
- name: Create pull request
|
||||
uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7.0.8
|
||||
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8.1.0
|
||||
with:
|
||||
commit-message: "Bump version to ${{ inputs.TARGET_VERSION }}"
|
||||
title: "Bump version to ${{ inputs.TARGET_VERSION }} on ${{ inputs.TARGET_BRANCH }} branch"
|
||||
|
||||
2
.github/workflows/pr-title-check.yml
vendored
2
.github/workflows/pr-title-check.yml
vendored
@@ -21,7 +21,7 @@ jobs:
|
||||
contents: read
|
||||
pull-requests: read
|
||||
name: Validate PR Title
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: thehanimo/pr-title-checker@7fbfe05602bdd86f926d3fb3bccb6f3aed43bc70 # v1.4.3
|
||||
with:
|
||||
|
||||
84
.github/workflows/release.yaml
vendored
84
.github/workflows/release.yaml
vendored
@@ -11,21 +11,22 @@ permissions: {}
|
||||
|
||||
env:
|
||||
# renovate: datasource=golang-version packageName=golang
|
||||
GOLANG_VERSION: '1.25.3' # Note: go-version must also be set in job argocd-image.with.go-version
|
||||
GOLANG_VERSION: '1.26.0' # Note: go-version must also be set in job argocd-image.with.go-version
|
||||
|
||||
jobs:
|
||||
argocd-image:
|
||||
needs: [setup-variables]
|
||||
permissions:
|
||||
contents: read
|
||||
id-token: write # for creating OIDC tokens for signing.
|
||||
packages: write # used to push images to `ghcr.io` if used.
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
if: github.repository == 'argoproj/argo-cd' || needs.setup-variables.outputs.allow_fork_release == 'true'
|
||||
uses: ./.github/workflows/image-reuse.yaml
|
||||
with:
|
||||
quay_image_name: quay.io/argoproj/argocd:${{ github.ref_name }}
|
||||
quay_image_name: ${{ needs.setup-variables.outputs.quay_image_name }}
|
||||
# Note: cannot use env variables to set go-version (https://docs.github.com/en/actions/using-workflows/reusing-workflows#limitations)
|
||||
# renovate: datasource=golang-version packageName=golang
|
||||
go-version: 1.25.3
|
||||
go-version: 1.26.0
|
||||
platforms: linux/amd64,linux/arm64,linux/s390x,linux/ppc64le
|
||||
push: true
|
||||
secrets:
|
||||
@@ -34,14 +35,20 @@ jobs:
|
||||
|
||||
setup-variables:
|
||||
name: Setup Release Variables
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
runs-on: ubuntu-22.04
|
||||
if: github.repository == 'argoproj/argo-cd' || (github.repository_owner != 'argoproj' && vars.ENABLE_FORK_RELEASES == 'true' && vars.IMAGE_NAMESPACE && vars.IMAGE_NAMESPACE != 'argoproj')
|
||||
runs-on: ubuntu-24.04
|
||||
outputs:
|
||||
is_pre_release: ${{ steps.var.outputs.is_pre_release }}
|
||||
is_latest_release: ${{ steps.var.outputs.is_latest_release }}
|
||||
enable_fork_releases: ${{ steps.var.outputs.enable_fork_releases }}
|
||||
image_namespace: ${{ steps.var.outputs.image_namespace }}
|
||||
image_repository: ${{ steps.var.outputs.image_repository }}
|
||||
quay_image_name: ${{ steps.var.outputs.quay_image_name }}
|
||||
provenance_image: ${{ steps.var.outputs.provenance_image }}
|
||||
allow_fork_release: ${{ steps.var.outputs.allow_fork_release }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
@@ -67,18 +74,36 @@ jobs:
|
||||
fi
|
||||
echo "is_pre_release=$PRE_RELEASE" >> $GITHUB_OUTPUT
|
||||
echo "is_latest_release=$IS_LATEST" >> $GITHUB_OUTPUT
|
||||
|
||||
# Calculate configuration with defaults
|
||||
ENABLE_FORK_RELEASES="${{ vars.ENABLE_FORK_RELEASES || 'false' }}"
|
||||
IMAGE_NAMESPACE="${{ vars.IMAGE_NAMESPACE || 'argoproj' }}"
|
||||
IMAGE_REPOSITORY="${{ vars.IMAGE_REPOSITORY || 'argocd' }}"
|
||||
|
||||
echo "enable_fork_releases=$ENABLE_FORK_RELEASES" >> $GITHUB_OUTPUT
|
||||
|
||||
echo "image_namespace=$IMAGE_NAMESPACE" >> $GITHUB_OUTPUT
|
||||
echo "image_repository=$IMAGE_REPOSITORY" >> $GITHUB_OUTPUT
|
||||
echo "quay_image_name=quay.io/$IMAGE_NAMESPACE/$IMAGE_REPOSITORY:${{ github.ref_name }}" >> $GITHUB_OUTPUT
|
||||
echo "provenance_image=quay.io/$IMAGE_NAMESPACE/$IMAGE_REPOSITORY" >> $GITHUB_OUTPUT
|
||||
|
||||
ALLOW_FORK_RELEASE=false
|
||||
if [[ "${{ github.repository_owner }}" != "argoproj" && "$ENABLE_FORK_RELEASES" == "true" && "$IMAGE_NAMESPACE" != "argoproj" && "${{ github.ref }}" == refs/tags/* ]]; then
|
||||
ALLOW_FORK_RELEASE=true
|
||||
fi
|
||||
echo "allow_fork_release=$ALLOW_FORK_RELEASE" >> $GITHUB_OUTPUT
|
||||
|
||||
argocd-image-provenance:
|
||||
needs: [argocd-image]
|
||||
needs: [setup-variables, argocd-image]
|
||||
permissions:
|
||||
actions: read # for detecting the Github Actions environment.
|
||||
id-token: write # for creating OIDC tokens for signing.
|
||||
packages: write # for uploading attestations. (https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#known-issues)
|
||||
# Must be refernced by a tag. https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#referencing-the-slsa-generator
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
if: github.repository == 'argoproj/argo-cd' || needs.setup-variables.outputs.allow_fork_release == 'true'
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v2.1.0
|
||||
with:
|
||||
image: quay.io/argoproj/argocd
|
||||
image: ${{ needs.setup-variables.outputs.provenance_image }}
|
||||
digest: ${{ needs.argocd-image.outputs.image-digest }}
|
||||
secrets:
|
||||
registry-username: ${{ secrets.RELEASE_QUAY_USERNAME }}
|
||||
@@ -91,15 +116,15 @@ jobs:
|
||||
- argocd-image-provenance
|
||||
permissions:
|
||||
contents: write # used for uploading assets
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
runs-on: ubuntu-22.04
|
||||
if: github.repository == 'argoproj/argo-cd' || needs.setup-variables.outputs.allow_fork_release == 'true'
|
||||
runs-on: ubuntu-24.04
|
||||
env:
|
||||
GORELEASER_MAKE_LATEST: ${{ needs.setup-variables.outputs.is_latest_release }}
|
||||
outputs:
|
||||
hashes: ${{ steps.hash.outputs.hashes }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
@@ -108,7 +133,7 @@ jobs:
|
||||
run: git fetch --force --tags
|
||||
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
|
||||
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
cache: false
|
||||
@@ -143,6 +168,8 @@ jobs:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
KUBECTL_VERSION: ${{ env.KUBECTL_VERSION }}
|
||||
GIT_TREE_STATE: ${{ env.GIT_TREE_STATE }}
|
||||
# Used to determine the current repository in the goreleaser config to display correct manifest links
|
||||
GORELEASER_CURRENT_REPOSITORY: ${{ github.repository }}
|
||||
|
||||
- name: Generate subject for provenance
|
||||
id: hash
|
||||
@@ -159,12 +186,12 @@ jobs:
|
||||
echo "hashes=$hashes" >> $GITHUB_OUTPUT
|
||||
|
||||
goreleaser-provenance:
|
||||
needs: [goreleaser]
|
||||
needs: [goreleaser, setup-variables]
|
||||
permissions:
|
||||
actions: read # for detecting the Github Actions environment
|
||||
id-token: write # Needed for provenance signing and ID
|
||||
contents: write # Needed for release uploads
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
if: github.repository == 'argoproj/argo-cd' || needs.setup-variables.outputs.allow_fork_release == 'true'
|
||||
# Must be refernced by a tag. https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#referencing-the-slsa-generator
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.1.0
|
||||
with:
|
||||
@@ -177,21 +204,22 @@ jobs:
|
||||
needs:
|
||||
- argocd-image
|
||||
- goreleaser
|
||||
- setup-variables
|
||||
permissions:
|
||||
contents: write # Needed for release uploads
|
||||
outputs:
|
||||
hashes: ${{ steps.sbom-hash.outputs.hashes }}
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
runs-on: ubuntu-22.04
|
||||
if: github.repository == 'argoproj/argo-cd' || needs.setup-variables.outputs.allow_fork_release == 'true'
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
|
||||
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
cache: false
|
||||
@@ -207,7 +235,7 @@ jobs:
|
||||
# managers (gomod, yarn, npm).
|
||||
PROJECT_FOLDERS: '.,./ui'
|
||||
# full qualified name of the docker image to be inspected
|
||||
DOCKER_IMAGE: quay.io/argoproj/argocd:${{ github.ref_name }}
|
||||
DOCKER_IMAGE: ${{ needs.setup-variables.outputs.quay_image_name }}
|
||||
run: |
|
||||
yarn install --cwd ./ui
|
||||
go install github.com/spdx/spdx-sbom-generator/cmd/generator@$SPDX_GEN_VERSION
|
||||
@@ -236,7 +264,7 @@ jobs:
|
||||
echo "hashes=$(sha256sum /tmp/sbom.tar.gz | base64 -w0)" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Upload SBOM
|
||||
uses: softprops/action-gh-release@6da8fa9354ddfdc4aeace5fc48d7f679b5214090 # v2.4.1
|
||||
uses: softprops/action-gh-release@a06a81a03ee405af7f2048a818ed3f03bbf83c7b # v2.5.0
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
@@ -244,12 +272,12 @@ jobs:
|
||||
/tmp/sbom.tar.gz
|
||||
|
||||
sbom-provenance:
|
||||
needs: [generate-sbom]
|
||||
needs: [generate-sbom, setup-variables]
|
||||
permissions:
|
||||
actions: read # for detecting the Github Actions environment
|
||||
id-token: write # Needed for provenance signing and ID
|
||||
contents: write # Needed for release uploads
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
if: github.repository == 'argoproj/argo-cd' || needs.setup-variables.outputs.allow_fork_release == 'true'
|
||||
# Must be referenced by a tag. https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#referencing-the-slsa-generator
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.1.0
|
||||
with:
|
||||
@@ -266,13 +294,13 @@ jobs:
|
||||
permissions:
|
||||
contents: write # Needed to push commit to update stable tag
|
||||
pull-requests: write # Needed to create PR for VERSION update.
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
runs-on: ubuntu-22.04
|
||||
if: github.repository == 'argoproj/argo-cd' || needs.setup-variables.outputs.allow_fork_release == 'true'
|
||||
runs-on: ubuntu-24.04
|
||||
env:
|
||||
TAG_STABLE: ${{ needs.setup-variables.outputs.is_latest_release }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
@@ -316,7 +344,7 @@ jobs:
|
||||
if: ${{ env.UPDATE_VERSION == 'true' }}
|
||||
|
||||
- name: Create PR to update VERSION on master branch
|
||||
uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7.0.8
|
||||
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8.1.0
|
||||
with:
|
||||
commit-message: Bump version in master
|
||||
title: 'chore: Bump version in master'
|
||||
|
||||
13
.github/workflows/renovate.yaml
vendored
13
.github/workflows/renovate.yaml
vendored
@@ -9,7 +9,7 @@ permissions:
|
||||
|
||||
jobs:
|
||||
renovate:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-24.04
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
steps:
|
||||
- name: Get token
|
||||
@@ -20,17 +20,10 @@ jobs:
|
||||
private-key: ${{ secrets.RENOVATE_APP_PRIVATE_KEY }}
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # 5.0.0
|
||||
|
||||
# Some codegen commands require Go to be setup
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
|
||||
with:
|
||||
# renovate: datasource=golang-version packageName=golang
|
||||
go-version: 1.25.3
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # 6.0.2
|
||||
|
||||
- name: Self-hosted Renovate
|
||||
uses: renovatebot/github-action@ea850436a5fe75c0925d583c7a02c60a5865461d #43.0.20
|
||||
uses: renovatebot/github-action@d65ef9e20512193cc070238b49c3873a361cd50c #46.1.1
|
||||
with:
|
||||
configurationFile: .github/configs/renovate-config.js
|
||||
token: '${{ steps.get_token.outputs.token }}'
|
||||
|
||||
6
.github/workflows/scorecard.yaml
vendored
6
.github/workflows/scorecard.yaml
vendored
@@ -17,7 +17,7 @@ permissions: read-all
|
||||
jobs:
|
||||
analysis:
|
||||
name: Scorecards analysis
|
||||
runs-on: ubuntu-22.04
|
||||
runs-on: ubuntu-24.04
|
||||
permissions:
|
||||
# Needed to upload the results to code-scanning dashboard.
|
||||
security-events: write
|
||||
@@ -30,7 +30,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: "Checkout code"
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
@@ -54,7 +54,7 @@ jobs:
|
||||
# Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
|
||||
# format to the repository Actions tab.
|
||||
- name: "Upload artifact"
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
|
||||
with:
|
||||
name: SARIF file
|
||||
path: results.sarif
|
||||
|
||||
33
.github/workflows/stale.yaml
vendored
Normal file
33
.github/workflows/stale.yaml
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
name: "Label stale issues and PRs"
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 0 * * *" #Runs midnight 12AM UTC
|
||||
|
||||
#Added Recommended permissions
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: actions/stale@b5d41d4e1d5dceea10e7104786b73624c18a190f # v10.2.0
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
stale-issue-message: >
|
||||
This issue has been marked as stale because it has had no activity for 90 days. Please comment if this is still relevant.
|
||||
|
||||
stale-pr-message: >
|
||||
This pull request has been marked as stale because it has had no activity for 90 days. Please comment if this is still relevant.
|
||||
|
||||
days-before-stale: 90
|
||||
days-before-close: -1 # Auto-close diabled
|
||||
|
||||
exempt-issue-labels: >
|
||||
bug, security, breaking/high, breaking/medium, breaking/low
|
||||
|
||||
# General configuration
|
||||
operations-per-run: 200
|
||||
remove-stale-when-updated: true #Remove stale label when issue/pr is updated
|
||||
4
.github/workflows/update-snyk.yaml
vendored
4
.github/workflows/update-snyk.yaml
vendored
@@ -14,10 +14,10 @@ jobs:
|
||||
pull-requests: write
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
name: Update Snyk report in the docs directory
|
||||
runs-on: ubuntu-22.04
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Build reports
|
||||
|
||||
@@ -22,6 +22,7 @@ linters:
|
||||
- govet
|
||||
- importas
|
||||
- misspell
|
||||
- modernize
|
||||
- noctx
|
||||
- perfsprint
|
||||
- revive
|
||||
@@ -121,6 +122,13 @@ linters:
|
||||
- pkg: github.com/argoproj/argo-cd/v3/util/io
|
||||
alias: utilio
|
||||
|
||||
modernize:
|
||||
disable:
|
||||
# Suggest replacing omitempty with omitzero for struct fields.
|
||||
- omitzero
|
||||
# Simplify code by using go1.26's new(expr). - generates lots of false positives.
|
||||
- newexpr
|
||||
|
||||
nolintlint:
|
||||
require-specific: true
|
||||
|
||||
|
||||
@@ -66,14 +66,14 @@ release:
|
||||
|
||||
```shell
|
||||
kubectl create namespace argocd
|
||||
kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/{{.Tag}}/manifests/install.yaml
|
||||
kubectl apply -n argocd --server-side --force-conflicts -f https://raw.githubusercontent.com/{{ .Env.GORELEASER_CURRENT_REPOSITORY }}/{{.Tag}}/manifests/install.yaml
|
||||
```
|
||||
|
||||
### HA:
|
||||
|
||||
```shell
|
||||
kubectl create namespace argocd
|
||||
kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/{{.Tag}}/manifests/ha/install.yaml
|
||||
kubectl apply -n argocd --server-side --force-conflicts -f https://raw.githubusercontent.com/{{ .Env.GORELEASER_CURRENT_REPOSITORY }}/{{.Tag}}/manifests/ha/install.yaml
|
||||
```
|
||||
|
||||
## Release Signatures and Provenance
|
||||
@@ -87,7 +87,7 @@ release:
|
||||
|
||||
If upgrading from a different minor version, be sure to read the [upgrading](https://argo-cd.readthedocs.io/en/stable/operator-manual/upgrading/overview/) documentation.
|
||||
footer: |
|
||||
**Full Changelog**: https://github.com/argoproj/argo-cd/compare/{{ .PreviousTag }}...{{ .Tag }}
|
||||
**Full Changelog**: https://github.com/{{ .Env.GORELEASER_CURRENT_REPOSITORY }}/compare/{{ .PreviousTag }}...{{ .Tag }}
|
||||
|
||||
<a href="https://argoproj.github.io/cd/"><img src="https://raw.githubusercontent.com/argoproj/argo-site/master/content/pages/cd/gitops-cd.png" width="25%" ></a>
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
dir: '{{.InterfaceDir}}/mocks'
|
||||
filename: '{{.InterfaceName}}.go'
|
||||
include-auto-generated: true # Needed since mockery 3.6.1
|
||||
packages:
|
||||
github.com/argoproj/argo-cd/v3/applicationset/generators:
|
||||
interfaces:
|
||||
@@ -31,6 +32,9 @@ packages:
|
||||
github.com/argoproj/argo-cd/v3/pkg/apiclient/cluster:
|
||||
interfaces:
|
||||
ClusterServiceServer: {}
|
||||
github.com/argoproj/argo-cd/v3/pkg/apiclient/project:
|
||||
interfaces:
|
||||
ProjectServiceClient: {}
|
||||
github.com/argoproj/argo-cd/v3/pkg/apiclient/session:
|
||||
interfaces:
|
||||
SessionServiceClient: {}
|
||||
@@ -75,10 +79,10 @@ packages:
|
||||
github.com/argoproj/argo-cd/v3/util/workloadidentity:
|
||||
interfaces:
|
||||
TokenProvider: {}
|
||||
github.com/argoproj/gitops-engine/pkg/cache:
|
||||
github.com/argoproj/argo-cd/gitops-engine/pkg/cache:
|
||||
interfaces:
|
||||
ClusterCache: {}
|
||||
github.com/argoproj/gitops-engine/pkg/diff:
|
||||
github.com/argoproj/argo-cd/gitops-engine/pkg/diff:
|
||||
interfaces:
|
||||
ServerSideDryRunner: {}
|
||||
github.com/microsoft/azure-devops-go-api/azuredevops/v7/git:
|
||||
|
||||
25
Dockerfile
25
Dockerfile
@@ -1,10 +1,10 @@
|
||||
ARG BASE_IMAGE=docker.io/library/ubuntu:25.04@sha256:27771fb7b40a58237c98e8d3e6b9ecdd9289cec69a857fccfb85ff36294dac20
|
||||
ARG BASE_IMAGE=docker.io/library/ubuntu:25.10@sha256:4a9232cc47bf99defcc8860ef6222c99773330367fcecbf21ba2edb0b810a31e
|
||||
####################################################################################################
|
||||
# Builder image
|
||||
# Initial stage which pulls prepares build dependencies and CLI tooling we need for our final image
|
||||
# Also used as the image in CI jobs so needs all dependencies
|
||||
####################################################################################################
|
||||
FROM docker.io/library/golang:1.25.3@sha256:6bac879c5b77e0fc9c556a5ed8920e89dab1709bd510a854903509c828f67f96 AS builder
|
||||
FROM docker.io/library/golang:1.26.0@sha256:c83e68f3ebb6943a2904fa66348867d108119890a2c6a2e6f07b38d0eb6c25c5 AS builder
|
||||
|
||||
WORKDIR /tmp
|
||||
|
||||
@@ -16,7 +16,6 @@ RUN apt-get update && apt-get install --no-install-recommends -y \
|
||||
unzip \
|
||||
fcgiwrap \
|
||||
git \
|
||||
git-lfs \
|
||||
make \
|
||||
wget \
|
||||
gcc \
|
||||
@@ -29,7 +28,8 @@ COPY hack/install.sh hack/tool-versions.sh ./
|
||||
COPY hack/installers installers
|
||||
|
||||
RUN ./install.sh helm && \
|
||||
INSTALL_PATH=/usr/local/bin ./install.sh kustomize
|
||||
INSTALL_PATH=/usr/local/bin ./install.sh kustomize && \
|
||||
./install.sh git-lfs
|
||||
|
||||
####################################################################################################
|
||||
# Argo CD Base - used as the base for both the release and dev argocd images
|
||||
@@ -50,10 +50,10 @@ RUN groupadd -g $ARGOCD_USER_ID argocd && \
|
||||
chmod g=u /home/argocd && \
|
||||
apt-get update && \
|
||||
apt-get dist-upgrade -y && \
|
||||
apt-get install -y \
|
||||
git git-lfs tini gpg tzdata connect-proxy && \
|
||||
apt-get install --no-install-recommends -y \
|
||||
git tini ca-certificates gpg gpg-agent tzdata connect-proxy openssh-client && \
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* /usr/share/doc/*
|
||||
|
||||
COPY hack/gpg-wrapper.sh \
|
||||
hack/git-verify-wrapper.sh \
|
||||
@@ -61,6 +61,7 @@ COPY hack/gpg-wrapper.sh \
|
||||
/usr/local/bin/
|
||||
COPY --from=builder /usr/local/bin/helm /usr/local/bin/helm
|
||||
COPY --from=builder /usr/local/bin/kustomize /usr/local/bin/kustomize
|
||||
COPY --from=builder /usr/local/bin/git-lfs /usr/local/bin/git-lfs
|
||||
|
||||
# keep uid_entrypoint.sh for backward compatibility
|
||||
RUN ln -s /usr/local/bin/entrypoint.sh /usr/local/bin/uid_entrypoint.sh
|
||||
@@ -79,13 +80,19 @@ RUN mkdir -p tls && \
|
||||
|
||||
ENV USER=argocd
|
||||
|
||||
# Disable gRPC service config lookups via DNS TXT records to prevent excessive
|
||||
# DNS queries for _grpc_config.<hostname> which can cause timeouts in dual-stack
|
||||
# environments. This can be overridden via argocd-cmd-params-cm ConfigMap.
|
||||
# See https://github.com/argoproj/argo-cd/issues/24991
|
||||
ENV GRPC_ENABLE_TXT_SERVICE_CONFIG=false
|
||||
|
||||
USER $ARGOCD_USER_ID
|
||||
WORKDIR /home/argocd
|
||||
|
||||
####################################################################################################
|
||||
# Argo CD UI stage
|
||||
####################################################################################################
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/node:23.0.0@sha256:e643c0b70dca9704dff42e12b17f5b719dbe4f95e6392fc2dfa0c5f02ea8044d AS argocd-ui
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/node:23.0.0@sha256:9d09fa506f5b8465c5221cbd6f980e29ae0ce9a3119e2b9bc0842e6a3f37bb59 AS argocd-ui
|
||||
|
||||
WORKDIR /src
|
||||
COPY ["ui/package.json", "ui/yarn.lock", "./"]
|
||||
@@ -103,7 +110,7 @@ RUN HOST_ARCH=$TARGETARCH NODE_ENV='production' NODE_ONLINE_ENV='online' NODE_OP
|
||||
####################################################################################################
|
||||
# Argo CD Build stage which performs the actual build of Argo CD binaries
|
||||
####################################################################################################
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/golang:1.25.3@sha256:6bac879c5b77e0fc9c556a5ed8920e89dab1709bd510a854903509c828f67f96 AS argocd-build
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/golang:1.26.0@sha256:c83e68f3ebb6943a2904fa66348867d108119890a2c6a2e6f07b38d0eb6c25c5 AS argocd-build
|
||||
|
||||
WORKDIR /go/src/github.com/argoproj/argo-cd
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM docker.io/library/golang:1.25.3@sha256:6bac879c5b77e0fc9c556a5ed8920e89dab1709bd510a854903509c828f67f96
|
||||
FROM docker.io/library/golang:1.26.0@sha256:c83e68f3ebb6943a2904fa66348867d108119890a2c6a2e6f07b38d0eb6c25c5
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
@@ -11,7 +11,6 @@ RUN apt-get update && apt-get install --no-install-recommends -y \
|
||||
unzip \
|
||||
fcgiwrap \
|
||||
git \
|
||||
git-lfs \
|
||||
make \
|
||||
wget \
|
||||
gcc \
|
||||
@@ -28,7 +27,8 @@ COPY hack/install.sh hack/tool-versions.sh ./
|
||||
COPY hack/installers installers
|
||||
|
||||
RUN ./install.sh helm && \
|
||||
INSTALL_PATH=/usr/local/bin ./install.sh kustomize
|
||||
INSTALL_PATH=/usr/local/bin ./install.sh kustomize && \
|
||||
./install.sh git-lfs
|
||||
|
||||
COPY hack/gpg-wrapper.sh \
|
||||
hack/git-verify-wrapper.sh \
|
||||
|
||||
43
MAINTAINERS.md
Normal file
43
MAINTAINERS.md
Normal file
@@ -0,0 +1,43 @@
|
||||
# Argo CD Maintainers
|
||||
|
||||
This document lists the maintainers of the Argo CD project.
|
||||
|
||||
## Maintainers
|
||||
|
||||
| Maintainer | GitHub ID | Project Roles | Affiliation |
|
||||
|---------------------------|---------------------------------------------------------|----------------------|-------------------------------------------------|
|
||||
| Zach Aller | [zachaller](https://github.com/zachaller) | Reviewer | [Intuit](https://www.github.com/intuit/) |
|
||||
| Leonardo Luz Almeida | [leoluz](https://github.com/leoluz) | Approver | [Intuit](https://www.github.com/intuit/) |
|
||||
| Chetan Banavikalmutt | [chetan-rns](https://github.com/chetan-rns) | Reviewer | [Red Hat](https://redhat.com/) |
|
||||
| Keith Chong | [keithchong](https://github.com/keithchong) | Approver | [Red Hat](https://redhat.com/) |
|
||||
| Alex Collins | [alexec](https://github.com/alexec) | Approver | [Intuit](https://www.github.com/intuit/) |
|
||||
| Michael Crenshaw | [crenshaw-dev](https://github.com/crenshaw-dev) | Lead | [Intuit](https://www.github.com/intuit/) |
|
||||
| Soumya Ghosh Dastidar | [gdsoumya](https://github.com/gdsoumya) | Approver | [Akuity](https://akuity.io/) |
|
||||
| Eugene Doudine | [dudinea](https://github.com/dudinea) | Reviewer | [Octopus Deploy](https://octopus.com/) |
|
||||
| Jann Fischer | [jannfis](https://github.com/jannfis) | Approver | [Red Hat](https://redhat.com/) |
|
||||
| Dan Garfield | [todaywasawesome](https://github.com/todaywasawesome) | Approver(docs) | [Octopus Deploy](https://octopus.com/) |
|
||||
| Alexandre Gaudreault | [agaudreault](https://github.com/agaudreault) | Approver | [Intuit](https://www.github.com/intuit/) |
|
||||
| Christian Hernandez | [christianh814](https://github.com/christianh814) | Reviewer(docs) | [Akuity](https://akuity.io/) |
|
||||
| Peter Jiang | [pjiang-dev](https://github.com/pjiang-dev) | Approver(docs) | [Intuit](https://www.intuit.com/) |
|
||||
| Andrii Korotkov | [andrii-korotkov](https://github.com/andrii-korotkov) | Reviewer | [Verkada](https://www.verkada.com/) |
|
||||
| Pasha Kostohrys | [pasha-codefresh](https://github.com/pasha-codefresh) | Approver | [Codefresh](https://www.github.com/codefresh/) |
|
||||
| Nitish Kumar | [nitishfy](https://github.com/nitishfy) | Approver(cli,docs) | [Akuity](https://akuity.io/) |
|
||||
| Justin Marquis | [34fathombelow](https://github.com/34fathombelow) | Approver(docs/ci) | [Akuity](https://akuity.io/) |
|
||||
| Alexander Matyushentsev | [alexmt](https://github.com/alexmt) | Lead | [Akuity](https://akuity.io/) |
|
||||
| Nicholas Morey | [morey-tech](https://github.com/morey-tech) | Reviewer(docs) | [Akuity](https://akuity.io/) |
|
||||
| Papapetrou Patroklos | [ppapapetrou76](https://github.com/ppapapetrou76) | Approver(docs,cli) | [Octopus Deploy](https://octopus.com/) |
|
||||
| Blake Pettersson | [blakepettersson](https://github.com/blakepettersson) | Approver | [Akuity](https://akuity.io/) |
|
||||
| Ishita Sequeira | [ishitasequeira](https://github.com/ishitasequeira) | Approver | [Red Hat](https://redhat.com/) |
|
||||
| Ashutosh Singh | [ashutosh16](https://github.com/ashutosh16) | Approver(docs) | [Intuit](https://www.github.com/intuit/) |
|
||||
| Linghao Su | [linghaoSu](https://github.com/linghaoSu) | Reviewer | [DaoCloud](https://daocloud.io) |
|
||||
| Jesse Suen | [jessesuen](https://github.com/jessesuen) | Approver | [Akuity](https://akuity.io/) |
|
||||
| Yuan Tang | [terrytangyuan](https://github.com/terrytangyuan) | Reviewer | [Red Hat](https://redhat.com/) |
|
||||
| William Tam | [wtam2018](https://github.com/wtam2018) | Reviewer | [Red Hat](https://redhat.com/) |
|
||||
| Ryan Umstead | [rumstead](https://github.com/rumstead) | Approver | [Black Rock](https://www.github.com/blackrock/) |
|
||||
| Regina Voloshin | [reggie-k](https://github.com/reggie-k) | Approver | [Octopus Deploy](https://octopus.com/) |
|
||||
| Hong Wang | [wanghong230](https://github.com/wanghong230) | Reviewer | [Akuity](https://akuity.io/) |
|
||||
| Jonathan West | [jgwest](https://github.com/jgwest) | Approver | [Red Hat](https://redhat.com/) |
|
||||
| Jaewoo Choi | [choejwoo](https://github.com/choejwoo) | Reviewer | [Hyundai-Autoever](https://www.hyundai-autoever.com/eng/) |
|
||||
| Alexy Mantha | [alexymantha](https://github.com/alexymantha) | Reviewer | GoTo |
|
||||
| Kanika Rana | [ranakan19](https://github.com/ranakan19) | Reviewer | [Red Hat](https://redhat.com/) |
|
||||
| Jonathan Winters | [jwinters01](https://github.com/jwinters01) | Reviewer | [Intuit](https://www.github.com/intuit/) |
|
||||
97
Makefile
97
Makefile
@@ -56,8 +56,8 @@ endif
|
||||
|
||||
ARGOCD_PROCFILE?=Procfile
|
||||
|
||||
# pointing to python 3.7 to match https://github.com/argoproj/argo-cd/blob/master/.readthedocs.yml
|
||||
MKDOCS_DOCKER_IMAGE?=python:3.7-alpine
|
||||
# pointing to python 3.12 to match https://github.com/argoproj/argo-cd/blob/master/.readthedocs.yaml
|
||||
MKDOCS_DOCKER_IMAGE?=python:3.12-alpine
|
||||
MKDOCS_RUN_ARGS?=
|
||||
|
||||
# Configuration for building argocd-test-tools image
|
||||
@@ -76,15 +76,15 @@ ARGOCD_E2E_REDIS_PORT?=6379
|
||||
ARGOCD_E2E_DEX_PORT?=5556
|
||||
ARGOCD_E2E_YARN_HOST?=localhost
|
||||
ARGOCD_E2E_DISABLE_AUTH?=
|
||||
ARGOCD_E2E_DIR?=/tmp/argo-e2e
|
||||
|
||||
ARGOCD_E2E_TEST_TIMEOUT?=90m
|
||||
ARGOCD_E2E_RERUN_FAILS?=5
|
||||
|
||||
ARGOCD_IN_CI?=false
|
||||
ARGOCD_TEST_E2E?=true
|
||||
ARGOCD_BIN_MODE?=true
|
||||
|
||||
ARGOCD_LINT_GOGC?=20
|
||||
|
||||
# Depending on where we are (legacy or non-legacy pwd), we need to use
|
||||
# different Docker volume mounts for our source tree
|
||||
LEGACY_PATH=$(GOPATH)/src/github.com/argoproj/argo-cd
|
||||
@@ -144,7 +144,6 @@ define run-in-test-client
|
||||
-e ARGOCD_E2E_K3S=$(ARGOCD_E2E_K3S) \
|
||||
-e GITHUB_TOKEN \
|
||||
-e GOCACHE=/tmp/go-build-cache \
|
||||
-e ARGOCD_LINT_GOGC=$(ARGOCD_LINT_GOGC) \
|
||||
-v ${DOCKER_SRC_MOUNT} \
|
||||
-v ${GOPATH}/pkg/mod:/go/pkg/mod${VOLUME_MOUNT} \
|
||||
-v ${GOCACHE}:/tmp/go-build-cache${VOLUME_MOUNT} \
|
||||
@@ -198,19 +197,40 @@ endif
|
||||
|
||||
ifneq (${GIT_TAG},)
|
||||
IMAGE_TAG=${GIT_TAG}
|
||||
LDFLAGS += -X ${PACKAGE}.gitTag=${GIT_TAG}
|
||||
override LDFLAGS += -X ${PACKAGE}.gitTag=${GIT_TAG}
|
||||
else
|
||||
IMAGE_TAG?=latest
|
||||
endif
|
||||
|
||||
# defaults for building images and manifests
|
||||
ifeq (${DOCKER_PUSH},true)
|
||||
ifndef IMAGE_NAMESPACE
|
||||
$(error IMAGE_NAMESPACE must be set to push images (e.g. IMAGE_NAMESPACE=argoproj))
|
||||
endif
|
||||
endif
|
||||
|
||||
# Consruct prefix for docker image
|
||||
# Note: keeping same logic as in hacks/update_manifests.sh
|
||||
ifdef IMAGE_REGISTRY
|
||||
ifdef IMAGE_NAMESPACE
|
||||
IMAGE_PREFIX=${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/
|
||||
else
|
||||
$(error IMAGE_NAMESPACE must be set when IMAGE_REGISTRY is set (e.g. IMAGE_NAMESPACE=argoproj))
|
||||
endif
|
||||
else
|
||||
ifdef IMAGE_NAMESPACE
|
||||
# for backwards compatibility with the old way like IMAGE_NAMESPACE='quay.io/argoproj'
|
||||
IMAGE_PREFIX=${IMAGE_NAMESPACE}/
|
||||
else
|
||||
# Neither namespace nor registry given - apply the default values
|
||||
IMAGE_REGISTRY="quay.io"
|
||||
IMAGE_NAMESPACE="argoproj"
|
||||
IMAGE_PREFIX=${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/
|
||||
endif
|
||||
endif
|
||||
|
||||
ifndef IMAGE_REPOSITORY
|
||||
IMAGE_REPOSITORY=argocd
|
||||
endif
|
||||
|
||||
.PHONY: all
|
||||
@@ -308,12 +328,11 @@ endif
|
||||
.PHONY: manifests-local
|
||||
manifests-local:
|
||||
./hack/update-manifests.sh
|
||||
|
||||
.PHONY: manifests
|
||||
manifests: test-tools-image
|
||||
$(call run-in-test-client,make manifests-local IMAGE_NAMESPACE='${IMAGE_NAMESPACE}' IMAGE_TAG='${IMAGE_TAG}')
|
||||
|
||||
$(call run-in-test-client,make manifests-local IMAGE_REGISTRY='${IMAGE_REGISTRY}' IMAGE_NAMESPACE='${IMAGE_NAMESPACE}' IMAGE_REPOSITORY='${IMAGE_REPOSITORY}' IMAGE_TAG='${IMAGE_TAG}')
|
||||
# consolidated binary for cli, util, server, repo-server, controller
|
||||
|
||||
.PHONY: argocd-all
|
||||
argocd-all: clean-debug
|
||||
CGO_ENABLED=${CGO_FLAG} GOOS=${GOOS} GOARCH=${GOARCH} GODEBUG="tarinsecurepath=0,zipinsecurepath=0" go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/${BIN_NAME} ./cmd
|
||||
@@ -334,7 +353,7 @@ controller:
|
||||
build-ui:
|
||||
DOCKER_BUILDKIT=1 $(DOCKER) build -t argocd-ui --platform=$(TARGET_ARCH) --target argocd-ui .
|
||||
find ./ui/dist -type f -not -name gitkeep -delete
|
||||
$(DOCKER) run -v ${CURRENT_DIR}/ui/dist/app:/tmp/app --rm -t argocd-ui sh -c 'cp -r ./dist/app/* /tmp/app/'
|
||||
$(DOCKER) run -u $(CONTAINER_UID):$(CONTAINER_GID) -v ${CURRENT_DIR}/ui/dist/app:/tmp/app --rm -t argocd-ui sh -c 'cp -r ./dist/app/* /tmp/app/'
|
||||
|
||||
.PHONY: image
|
||||
ifeq ($(DEV_IMAGE), true)
|
||||
@@ -344,23 +363,23 @@ ifeq ($(DEV_IMAGE), true)
|
||||
IMAGE_TAG="dev-$(shell git describe --always --dirty)"
|
||||
image: build-ui
|
||||
DOCKER_BUILDKIT=1 $(DOCKER) build --platform=$(TARGET_ARCH) -t argocd-base --target argocd-base .
|
||||
CGO_ENABLED=${CGO_FLAG} GOOS=linux GOARCH=amd64 GODEBUG="tarinsecurepath=0,zipinsecurepath=0" go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd ./cmd
|
||||
GOOS=linux GOARCH=$(TARGET_ARCH:linux/%=%) GODEBUG="tarinsecurepath=0,zipinsecurepath=0" go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd ./cmd
|
||||
ln -sfn ${DIST_DIR}/argocd ${DIST_DIR}/argocd-server
|
||||
ln -sfn ${DIST_DIR}/argocd ${DIST_DIR}/argocd-application-controller
|
||||
ln -sfn ${DIST_DIR}/argocd ${DIST_DIR}/argocd-repo-server
|
||||
ln -sfn ${DIST_DIR}/argocd ${DIST_DIR}/argocd-cmp-server
|
||||
ln -sfn ${DIST_DIR}/argocd ${DIST_DIR}/argocd-dex
|
||||
cp Dockerfile.dev dist
|
||||
DOCKER_BUILDKIT=1 $(DOCKER) build --platform=$(TARGET_ARCH) -t $(IMAGE_PREFIX)argocd:$(IMAGE_TAG) -f dist/Dockerfile.dev dist
|
||||
DOCKER_BUILDKIT=1 $(DOCKER) build --platform=$(TARGET_ARCH) -t $(IMAGE_PREFIX)$(IMAGE_REPOSITORY):$(IMAGE_TAG) -f dist/Dockerfile.dev dist
|
||||
else
|
||||
image:
|
||||
DOCKER_BUILDKIT=1 $(DOCKER) build -t $(IMAGE_PREFIX)argocd:$(IMAGE_TAG) --platform=$(TARGET_ARCH) .
|
||||
DOCKER_BUILDKIT=1 $(DOCKER) build -t $(IMAGE_PREFIX)$(IMAGE_REPOSITORY):$(IMAGE_TAG) --platform=$(TARGET_ARCH) .
|
||||
endif
|
||||
@if [ "$(DOCKER_PUSH)" = "true" ] ; then $(DOCKER) push $(IMAGE_PREFIX)argocd:$(IMAGE_TAG) ; fi
|
||||
@if [ "$(DOCKER_PUSH)" = "true" ] ; then $(DOCKER) push $(IMAGE_PREFIX)$(IMAGE_REPOSITORY):$(IMAGE_TAG) ; fi
|
||||
|
||||
.PHONY: armimage
|
||||
armimage:
|
||||
$(DOCKER) build -t $(IMAGE_PREFIX)argocd:$(IMAGE_TAG)-arm .
|
||||
$(DOCKER) build -t $(IMAGE_PREFIX)(IMAGE_REPOSITORY):$(IMAGE_TAG)-arm .
|
||||
|
||||
.PHONY: builder-image
|
||||
builder-image:
|
||||
@@ -392,9 +411,7 @@ lint: test-tools-image
|
||||
.PHONY: lint-local
|
||||
lint-local:
|
||||
golangci-lint --version
|
||||
# NOTE: If you get a "Killed" OOM message, try reducing the value of GOGC
|
||||
# See https://github.com/golangci/golangci-lint#memory-usage-of-golangci-lint
|
||||
GOGC=$(ARGOCD_LINT_GOGC) GOMAXPROCS=2 golangci-lint run --fix --verbose
|
||||
golangci-lint run --fix --verbose
|
||||
|
||||
.PHONY: lint-ui
|
||||
lint-ui: test-tools-image
|
||||
@@ -426,12 +443,24 @@ test: test-tools-image
|
||||
|
||||
# Run all unit tests (local version)
|
||||
.PHONY: test-local
|
||||
test-local:
|
||||
test-local: test-gitops-engine
|
||||
# run if TEST_MODULE is empty or does not point to gitops-engine tests
|
||||
ifneq ($(if $(TEST_MODULE),,ALL)$(filter-out github.com/argoproj/argo-cd/gitops-engine% ./gitops-engine%,$(TEST_MODULE)),)
|
||||
if test "$(TEST_MODULE)" = ""; then \
|
||||
DIST_DIR=${DIST_DIR} RERUN_FAILS=0 PACKAGES=`go list ./... | grep -v 'test/e2e'` ./hack/test.sh -args -test.gocoverdir="$(PWD)/test-results"; \
|
||||
else \
|
||||
DIST_DIR=${DIST_DIR} RERUN_FAILS=0 PACKAGES="$(TEST_MODULE)" ./hack/test.sh -args -test.gocoverdir="$(PWD)/test-results" "$(TEST_MODULE)"; \
|
||||
fi
|
||||
endif
|
||||
|
||||
# Run gitops-engine unit tests
|
||||
.PHONY: test-gitops-engine
|
||||
test-gitops-engine:
|
||||
# run if TEST_MODULE is empty or points to gitops-engine tests
|
||||
ifneq ($(if $(TEST_MODULE),,ALL)$(filter github.com/argoproj/argo-cd/gitops-engine% ./gitops-engine%,$(TEST_MODULE)),)
|
||||
mkdir -p $(PWD)/test-results
|
||||
cd gitops-engine && go test -race -cover ./... -args -test.gocoverdir="$(PWD)/test-results"
|
||||
endif
|
||||
|
||||
.PHONY: test-race
|
||||
test-race: test-tools-image
|
||||
@@ -458,7 +487,7 @@ test-e2e:
|
||||
test-e2e-local: cli-local
|
||||
# NO_PROXY ensures all tests don't go out through a proxy if one is configured on the test system
|
||||
export GO111MODULE=off
|
||||
DIST_DIR=${DIST_DIR} RERUN_FAILS=5 PACKAGES="./test/e2e" ARGOCD_E2E_RECORD=${ARGOCD_E2E_RECORD} ARGOCD_CONFIG_DIR=$(HOME)/.config/argocd-e2e ARGOCD_GPG_ENABLED=true NO_PROXY=* ./hack/test.sh -timeout $(ARGOCD_E2E_TEST_TIMEOUT) -v -args -test.gocoverdir="$(PWD)/test-results"
|
||||
DIST_DIR=${DIST_DIR} RERUN_FAILS=$(ARGOCD_E2E_RERUN_FAILS) PACKAGES="./test/e2e" ARGOCD_E2E_RECORD=${ARGOCD_E2E_RECORD} ARGOCD_CONFIG_DIR=$(HOME)/.config/argocd-e2e ARGOCD_GPG_ENABLED=true NO_PROXY=* ./hack/test.sh -timeout $(ARGOCD_E2E_TEST_TIMEOUT) -v -args -test.gocoverdir="$(PWD)/test-results"
|
||||
|
||||
# Spawns a shell in the test server container for debugging purposes
|
||||
debug-test-server: test-tools-image
|
||||
@@ -482,13 +511,13 @@ start-e2e-local: mod-vendor-local dep-ui-local cli-local
|
||||
kubectl create ns argocd-e2e-external || true
|
||||
kubectl create ns argocd-e2e-external-2 || true
|
||||
kubectl config set-context --current --namespace=argocd-e2e
|
||||
kustomize build test/manifests/base | kubectl apply -f -
|
||||
kustomize build test/manifests/base | kubectl apply --server-side --force-conflicts -f -
|
||||
kubectl apply -f https://raw.githubusercontent.com/open-cluster-management/api/a6845f2ebcb186ec26b832f60c988537a58f3859/cluster/v1alpha1/0000_04_clusters.open-cluster-management.io_placementdecisions.crd.yaml
|
||||
# Create GPG keys and source directories
|
||||
if test -d /tmp/argo-e2e/app/config/gpg; then rm -rf /tmp/argo-e2e/app/config/gpg/*; fi
|
||||
mkdir -p /tmp/argo-e2e/app/config/gpg/keys && chmod 0700 /tmp/argo-e2e/app/config/gpg/keys
|
||||
mkdir -p /tmp/argo-e2e/app/config/gpg/source && chmod 0700 /tmp/argo-e2e/app/config/gpg/source
|
||||
mkdir -p /tmp/argo-e2e/app/config/plugin && chmod 0700 /tmp/argo-e2e/app/config/plugin
|
||||
if test -d $(ARGOCD_E2E_DIR)/app/config/gpg; then rm -rf $(ARGOCD_E2E_DIR)/app/config/gpg/*; fi
|
||||
mkdir -p $(ARGOCD_E2E_DIR)/app/config/gpg/keys && chmod 0700 $(ARGOCD_E2E_DIR)/app/config/gpg/keys
|
||||
mkdir -p $(ARGOCD_E2E_DIR)/app/config/gpg/source && chmod 0700 $(ARGOCD_E2E_DIR)/app/config/gpg/source
|
||||
mkdir -p $(ARGOCD_E2E_DIR)/app/config/plugin && chmod 0700 $(ARGOCD_E2E_DIR)/app/config/plugin
|
||||
# create folders to hold go coverage results for each component
|
||||
mkdir -p /tmp/coverage/app-controller
|
||||
mkdir -p /tmp/coverage/api-server
|
||||
@@ -497,13 +526,15 @@ start-e2e-local: mod-vendor-local dep-ui-local cli-local
|
||||
mkdir -p /tmp/coverage/notification
|
||||
mkdir -p /tmp/coverage/commit-server
|
||||
# set paths for locally managed ssh known hosts and tls certs data
|
||||
ARGOCD_SSH_DATA_PATH=/tmp/argo-e2e/app/config/ssh \
|
||||
ARGOCD_TLS_DATA_PATH=/tmp/argo-e2e/app/config/tls \
|
||||
ARGOCD_GPG_DATA_PATH=/tmp/argo-e2e/app/config/gpg/source \
|
||||
ARGOCD_GNUPGHOME=/tmp/argo-e2e/app/config/gpg/keys \
|
||||
ARGOCD_E2E_DIR=$(ARGOCD_E2E_DIR) \
|
||||
ARGOCD_SSH_DATA_PATH=$(ARGOCD_E2E_DIR)/app/config/ssh \
|
||||
ARGOCD_TLS_DATA_PATH=$(ARGOCD_E2E_DIR)/app/config/tls \
|
||||
ARGOCD_GPG_DATA_PATH=$(ARGOCD_E2E_DIR)/app/config/gpg/source \
|
||||
ARGOCD_GNUPGHOME=$(ARGOCD_E2E_DIR)/app/config/gpg/keys \
|
||||
ARGOCD_GPG_ENABLED=$(ARGOCD_GPG_ENABLED) \
|
||||
ARGOCD_PLUGINCONFIGFILEPATH=/tmp/argo-e2e/app/config/plugin \
|
||||
ARGOCD_PLUGINSOCKFILEPATH=/tmp/argo-e2e/app/config/plugin \
|
||||
ARGOCD_PLUGINCONFIGFILEPATH=$(ARGOCD_E2E_DIR)/app/config/plugin \
|
||||
ARGOCD_PLUGINSOCKFILEPATH=$(ARGOCD_E2E_DIR)/app/config/plugin \
|
||||
ARGOCD_GIT_CONFIG=$(PWD)/test/e2e/fixture/gitconfig \
|
||||
ARGOCD_E2E_DISABLE_AUTH=false \
|
||||
ARGOCD_ZJWT_FEATURE_FLAG=always \
|
||||
ARGOCD_IN_CI=$(ARGOCD_IN_CI) \
|
||||
@@ -580,7 +611,7 @@ build-docs-local:
|
||||
|
||||
.PHONY: build-docs
|
||||
build-docs:
|
||||
$(DOCKER) run ${MKDOCS_RUN_ARGS} --rm -it -v ${CURRENT_DIR}:/docs -w /docs --entrypoint "" ${MKDOCS_DOCKER_IMAGE} sh -c 'pip install mkdocs; pip install $$(mkdocs get-deps); mkdocs build'
|
||||
$(DOCKER) run ${MKDOCS_RUN_ARGS} --rm -it -v ${CURRENT_DIR}:/docs -w /docs --entrypoint "" ${MKDOCS_DOCKER_IMAGE} sh -c 'pip install -r docs/requirements.txt; mkdocs build'
|
||||
|
||||
.PHONY: serve-docs-local
|
||||
serve-docs-local:
|
||||
@@ -588,7 +619,7 @@ serve-docs-local:
|
||||
|
||||
.PHONY: serve-docs
|
||||
serve-docs:
|
||||
$(DOCKER) run ${MKDOCS_RUN_ARGS} --rm -it -p 8000:8000 -v ${CURRENT_DIR}:/docs -w /docs --entrypoint "" ${MKDOCS_DOCKER_IMAGE} sh -c 'pip install mkdocs; pip install $$(mkdocs get-deps); mkdocs serve -a $$(ip route get 1 | awk '\''{print $$7}'\''):8000'
|
||||
$(DOCKER) run ${MKDOCS_RUN_ARGS} --rm -it -p 8000:8000 -v ${CURRENT_DIR}:/docs -w /docs --entrypoint "" ${MKDOCS_DOCKER_IMAGE} sh -c 'pip install -r docs/requirements.txt; mkdocs serve -a $$(ip route get 1 | awk '\''{print $$7}'\''):8000'
|
||||
|
||||
# Verify that kubectl can connect to your K8s cluster from Docker
|
||||
.PHONY: verify-kube-connect
|
||||
|
||||
4
Procfile
4
Procfile
@@ -2,7 +2,7 @@ controller: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run
|
||||
api-server: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "GOCOVERDIR=${ARGOCD_COVERAGE_DIR:-/tmp/coverage/api-server} FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-server $COMMAND --loglevel debug --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --disable-auth=${ARGOCD_E2E_DISABLE_AUTH:-'true'} --insecure --dex-server http://localhost:${ARGOCD_E2E_DEX_PORT:-5556} --repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081} --port ${ARGOCD_E2E_APISERVER_PORT:-8080} --otlp-address=${ARGOCD_OTLP_ADDRESS} --application-namespaces=${ARGOCD_APPLICATION_NAMESPACES:-''} --hydrator-enabled=${ARGOCD_HYDRATOR_ENABLED:='false'}"
|
||||
dex: sh -c "ARGOCD_BINARY_NAME=argocd-dex go run github.com/argoproj/argo-cd/v3/cmd gendexcfg -o `pwd`/dist/dex.yaml && (test -f dist/dex.yaml || { echo 'Failed to generate dex configuration'; exit 1; }) && docker run --rm -p ${ARGOCD_E2E_DEX_PORT:-5556}:${ARGOCD_E2E_DEX_PORT:-5556} -v `pwd`/dist/dex.yaml:/dex.yaml ghcr.io/dexidp/dex:$(grep "image: ghcr.io/dexidp/dex" manifests/base/dex/argocd-dex-server-deployment.yaml | cut -d':' -f3) dex serve /dex.yaml"
|
||||
redis: hack/start-redis-with-password.sh
|
||||
repo-server: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "GOCOVERDIR=${ARGOCD_COVERAGE_DIR:-/tmp/coverage/repo-server} FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_GNUPGHOME=${ARGOCD_GNUPGHOME:-/tmp/argocd-local/gpg/keys} ARGOCD_PLUGINSOCKFILEPATH=${ARGOCD_PLUGINSOCKFILEPATH:-./test/cmp} ARGOCD_GPG_DATA_PATH=${ARGOCD_GPG_DATA_PATH:-/tmp/argocd-local/gpg/source} ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-repo-server ARGOCD_GPG_ENABLED=${ARGOCD_GPG_ENABLED:-false} $COMMAND --loglevel debug --port ${ARGOCD_E2E_REPOSERVER_PORT:-8081} --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --otlp-address=${ARGOCD_OTLP_ADDRESS}"
|
||||
repo-server: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "export PATH=./dist:\$PATH && [ -n \"\$ARGOCD_GIT_CONFIG\" ] && export GIT_CONFIG_GLOBAL=\$ARGOCD_GIT_CONFIG && export GIT_CONFIG_NOSYSTEM=1; GOCOVERDIR=${ARGOCD_COVERAGE_DIR:-/tmp/coverage/repo-server} FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_GNUPGHOME=${ARGOCD_GNUPGHOME:-/tmp/argocd-local/gpg/keys} ARGOCD_PLUGINSOCKFILEPATH=${ARGOCD_PLUGINSOCKFILEPATH:-./test/cmp} ARGOCD_GPG_DATA_PATH=${ARGOCD_GPG_DATA_PATH:-/tmp/argocd-local/gpg/source} ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-repo-server ARGOCD_GPG_ENABLED=${ARGOCD_GPG_ENABLED:-false} $COMMAND --loglevel debug --port ${ARGOCD_E2E_REPOSERVER_PORT:-8081} --redis localhost:${ARGOCD_E2E_REDIS_PORT:-6379} --otlp-address=${ARGOCD_OTLP_ADDRESS}"
|
||||
cmp-server: [ "$ARGOCD_E2E_TEST" = 'true' ] && exit 0 || [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_BINARY_NAME=argocd-cmp-server ARGOCD_PLUGINSOCKFILEPATH=${ARGOCD_PLUGINSOCKFILEPATH:-./test/cmp} $COMMAND --config-dir-path ./test/cmp --loglevel debug --otlp-address=${ARGOCD_OTLP_ADDRESS}"
|
||||
commit-server: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "GOCOVERDIR=${ARGOCD_COVERAGE_DIR:-/tmp/coverage/commit-server} FORCE_LOG_COLORS=1 ARGOCD_BINARY_NAME=argocd-commit-server $COMMAND --loglevel debug --port ${ARGOCD_E2E_COMMITSERVER_PORT:-8086}"
|
||||
ui: sh -c 'cd ui && ${ARGOCD_E2E_YARN_CMD:-yarn} start'
|
||||
@@ -11,4 +11,4 @@ helm-registry: test/fixture/testrepos/start-helm-registry.sh
|
||||
oci-registry: test/fixture/testrepos/start-authenticated-helm-registry.sh
|
||||
dev-mounter: [ "$ARGOCD_E2E_TEST" != "true" ] && go run hack/dev-mounter/main.go --configmap argocd-ssh-known-hosts-cm=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} --configmap argocd-tls-certs-cm=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} --configmap argocd-gpg-keys-cm=${ARGOCD_GPG_DATA_PATH:-/tmp/argocd-local/gpg/source}
|
||||
applicationset-controller: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "GOCOVERDIR=${ARGOCD_COVERAGE_DIR:-/tmp/coverage/applicationset-controller} FORCE_LOG_COLORS=4 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_SSH_DATA_PATH=${ARGOCD_SSH_DATA_PATH:-/tmp/argocd-local/ssh} ARGOCD_BINARY_NAME=argocd-applicationset-controller $COMMAND --loglevel debug --metrics-addr localhost:12345 --probe-addr localhost:12346 --argocd-repo-server localhost:${ARGOCD_E2E_REPOSERVER_PORT:-8081}"
|
||||
notification: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "GOCOVERDIR=${ARGOCD_COVERAGE_DIR:-/tmp/coverage/notification} FORCE_LOG_COLORS=4 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_BINARY_NAME=argocd-notifications $COMMAND --loglevel debug --application-namespaces=${ARGOCD_APPLICATION_NAMESPACES:-''} --self-service-notification-enabled=${ARGOCD_NOTIFICATION_CONTROLLER_SELF_SERVICE_NOTIFICATION_ENABLED:-'false'}"
|
||||
notification: [ "$BIN_MODE" = 'true' ] && COMMAND=./dist/argocd || COMMAND='go run ./cmd/main.go' && sh -c "GOCOVERDIR=${ARGOCD_COVERAGE_DIR:-/tmp/coverage/notification} FORCE_LOG_COLORS=4 ARGOCD_FAKE_IN_CLUSTER=true ARGOCD_TLS_DATA_PATH=${ARGOCD_TLS_DATA_PATH:-/tmp/argocd-local/tls} ARGOCD_BINARY_NAME=argocd-notifications $COMMAND --loglevel debug --application-namespaces=${ARGOCD_APPLICATION_NAMESPACES:-''} --self-service-notification-enabled=${ARGOCD_NOTIFICATION_CONTROLLER_SELF_SERVICE_NOTIFICATION_ENABLED:-'false'}"
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
[](https://twitter.com/argoproj)
|
||||
[](https://argoproj.github.io/community/join-slack)
|
||||
[](https://www.linkedin.com/company/argoproj/)
|
||||
[](https://bsky.app/profile/argoproj.bsky.social)
|
||||
|
||||
# Argo CD - Declarative Continuous Delivery for Kubernetes
|
||||
|
||||
|
||||
@@ -3,9 +3,9 @@ header:
|
||||
expiration-date: '2024-10-31T00:00:00.000Z' # One year from initial release.
|
||||
last-updated: '2023-10-27'
|
||||
last-reviewed: '2023-10-27'
|
||||
commit-hash: 06ef059f9fc7cf9da2dfaef2a505ee1e3c693485
|
||||
commit-hash: 814db444c36503851dc3d45cf9c44394821ca1a4
|
||||
project-url: https://github.com/argoproj/argo-cd
|
||||
project-release: v3.3.0
|
||||
project-release: v3.4.0
|
||||
changelog: https://github.com/argoproj/argo-cd/releases
|
||||
license: https://github.com/argoproj/argo-cd/blob/master/LICENSE
|
||||
project-lifecycle:
|
||||
|
||||
25
Tiltfile
25
Tiltfile
@@ -60,7 +60,7 @@ k8s_yaml(kustomize('manifests/dev-tilt'))
|
||||
|
||||
# build dev image
|
||||
docker_build_with_restart(
|
||||
'argocd',
|
||||
'quay.io/argoproj/argocd:latest',
|
||||
context='.',
|
||||
dockerfile='Dockerfile.tilt',
|
||||
entrypoint=[
|
||||
@@ -123,6 +123,7 @@ k8s_resource(
|
||||
'9345:2345',
|
||||
'8083:8083'
|
||||
],
|
||||
resource_deps=['build']
|
||||
)
|
||||
|
||||
# track crds
|
||||
@@ -148,6 +149,7 @@ k8s_resource(
|
||||
'9346:2345',
|
||||
'8084:8084'
|
||||
],
|
||||
resource_deps=['build']
|
||||
)
|
||||
|
||||
# track argocd-redis resources and port forward
|
||||
@@ -162,6 +164,7 @@ k8s_resource(
|
||||
port_forwards=[
|
||||
'6379:6379',
|
||||
],
|
||||
resource_deps=['build']
|
||||
)
|
||||
|
||||
# track argocd-applicationset-controller resources
|
||||
@@ -180,6 +183,7 @@ k8s_resource(
|
||||
'8085:8080',
|
||||
'7000:7000'
|
||||
],
|
||||
resource_deps=['build']
|
||||
)
|
||||
|
||||
# track argocd-application-controller resources
|
||||
@@ -197,6 +201,7 @@ k8s_resource(
|
||||
'9348:2345',
|
||||
'8086:8082',
|
||||
],
|
||||
resource_deps=['build']
|
||||
)
|
||||
|
||||
# track argocd-notifications-controller resources
|
||||
@@ -214,6 +219,7 @@ k8s_resource(
|
||||
'9349:2345',
|
||||
'8087:9001',
|
||||
],
|
||||
resource_deps=['build']
|
||||
)
|
||||
|
||||
# track argocd-dex-server resources
|
||||
@@ -225,6 +231,7 @@ k8s_resource(
|
||||
'argocd-dex-server:role',
|
||||
'argocd-dex-server:rolebinding',
|
||||
],
|
||||
resource_deps=['build']
|
||||
)
|
||||
|
||||
# track argocd-commit-server resources
|
||||
@@ -239,6 +246,19 @@ k8s_resource(
|
||||
'8088:8087',
|
||||
'8089:8086',
|
||||
],
|
||||
resource_deps=['build']
|
||||
)
|
||||
|
||||
# ui dependencies
|
||||
local_resource(
|
||||
'node-modules',
|
||||
'yarn',
|
||||
dir='ui',
|
||||
deps = [
|
||||
'ui/package.json',
|
||||
'ui/yarn.lock',
|
||||
],
|
||||
allow_parallel=True,
|
||||
)
|
||||
|
||||
# docker for ui
|
||||
@@ -260,6 +280,7 @@ k8s_resource(
|
||||
port_forwards=[
|
||||
'4000:4000',
|
||||
],
|
||||
resource_deps=['node-modules'],
|
||||
)
|
||||
|
||||
# linting
|
||||
@@ -278,6 +299,7 @@ local_resource(
|
||||
'ui',
|
||||
],
|
||||
allow_parallel=True,
|
||||
resource_deps=['node-modules'],
|
||||
)
|
||||
|
||||
local_resource(
|
||||
@@ -287,5 +309,6 @@ local_resource(
|
||||
'go.mod',
|
||||
'go.sum',
|
||||
],
|
||||
allow_parallel=True,
|
||||
)
|
||||
|
||||
|
||||
13
USERS.md
13
USERS.md
@@ -31,6 +31,7 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [ANSTO - Australian Synchrotron](https://www.synchrotron.org.au/)
|
||||
1. [Ant Group](https://www.antgroup.com/)
|
||||
1. [AppDirect](https://www.appdirect.com)
|
||||
1. [Arcadia](https://www.arcadia.io)
|
||||
1. [Arctiq Inc.](https://www.arctiq.ca)
|
||||
1. [Artemis Health by Nomi Health](https://www.artemishealth.com/)
|
||||
1. [Arturia](https://www.arturia.com)
|
||||
@@ -86,6 +87,7 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Codefresh](https://www.codefresh.io/)
|
||||
1. [Codility](https://www.codility.com/)
|
||||
1. [Cognizant](https://www.cognizant.com/)
|
||||
1. [Collins Aerospace](https://www.collinsaerospace.com/)
|
||||
1. [Commonbond](https://commonbond.co/)
|
||||
1. [Compatio.AI](https://compatio.ai/)
|
||||
1. [Contlo](https://contlo.com/)
|
||||
@@ -99,6 +101,7 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Datarisk](https://www.datarisk.io/)
|
||||
1. [Daydream](https://daydream.ing)
|
||||
1. [Deloitte](https://www.deloitte.com/)
|
||||
1. [Dematic](https://www.dematic.com)
|
||||
1. [Deutsche Telekom AG](https://telekom.com)
|
||||
1. [Deutsche Bank AG](https://www.deutsche-bank.de/)
|
||||
1. [Devopsi - Poland Software/DevOps Consulting](https://devopsi.pl/)
|
||||
@@ -107,6 +110,7 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [DigitalOcean](https://www.digitalocean.com)
|
||||
1. [Divar](https://divar.ir)
|
||||
1. [Divistant](https://divistant.com)
|
||||
2. [DocNetwork](https://docnetwork.org/)
|
||||
1. [Dott](https://ridedott.com)
|
||||
1. [Doubble](https://www.doubble.app)
|
||||
1. [Doximity](https://www.doximity.com/)
|
||||
@@ -121,6 +125,7 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [enigmo](https://enigmo.co.jp/)
|
||||
1. [Envoy](https://envoy.com/)
|
||||
1. [eSave](https://esave.es/)
|
||||
1. [Expedia](https://www.expedia.com)
|
||||
1. [Factorial](https://factorialhr.com/)
|
||||
1. [Farfetch](https://www.farfetch.com)
|
||||
1. [Faro](https://www.faro.com/)
|
||||
@@ -173,6 +178,7 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [IFS](https://www.ifs.com)
|
||||
1. [IITS-Consulting](https://iits-consulting.de)
|
||||
1. [IllumiDesk](https://www.illumidesk.com)
|
||||
1. [Imagine Learning](https://www.imaginelearning.com/)
|
||||
1. [imaware](https://imaware.health)
|
||||
1. [Indeed](https://indeed.com)
|
||||
1. [Index Exchange](https://www.indexexchange.com/)
|
||||
@@ -181,6 +187,7 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Instruqt](https://www.instruqt.com)
|
||||
1. [Intel](https://www.intel.com)
|
||||
1. [Intuit](https://www.intuit.com/)
|
||||
1. [IQVIA](https://www.iqvia.com/)
|
||||
1. [Jellysmack](https://www.jellysmack.com)
|
||||
1. [Joblift](https://joblift.com/)
|
||||
1. [JovianX](https://www.jovianx.com/)
|
||||
@@ -202,6 +209,7 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Kurly](https://www.kurly.com/)
|
||||
1. [Kvist](https://kvistsolutions.com)
|
||||
1. [Kyriba](https://www.kyriba.com/)
|
||||
1. [Lattice](https://lattice.com)
|
||||
1. [LeFigaro](https://www.lefigaro.fr/)
|
||||
1. [Lely](https://www.lely.com/)
|
||||
1. [LexisNexis](https://www.lexisnexis.com/)
|
||||
@@ -232,12 +240,14 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [mixi Group](https://mixi.co.jp/)
|
||||
1. [Moengage](https://www.moengage.com/)
|
||||
1. [Money Forward](https://corp.moneyforward.com/en/)
|
||||
1. [MongoDB](https://www.mongodb.com/)
|
||||
1. [MOO Print](https://www.moo.com/)
|
||||
1. [Mozilla](https://www.mozilla.org)
|
||||
1. [MTN Group](https://www.mtn.com/)
|
||||
1. [Municipality of The Hague](https://www.denhaag.nl/)
|
||||
1. [My Job Glasses](https://myjobglasses.com)
|
||||
1. [Natura &Co](https://naturaeco.com/)
|
||||
1. [Netease Cloud Music](https://music.163.com/)
|
||||
1. [Nethopper](https://nethopper.io)
|
||||
1. [New Relic](https://newrelic.com/)
|
||||
1. [Nextbasket](https://nextbasket.com)
|
||||
@@ -311,6 +321,7 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [RightRev](https://rightrev.com/)
|
||||
1. [Rijkswaterstaat](https://www.rijkswaterstaat.nl/en)
|
||||
1. Rise
|
||||
1. [RISK IDENT](https://riskident.com/)
|
||||
1. [Riskified](https://www.riskified.com/)
|
||||
1. [Robotinfra](https://www.robotinfra.com)
|
||||
1. [Rocket.Chat](https://rocket.chat)
|
||||
@@ -320,6 +331,7 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Salad Technologies](https://salad.com/)
|
||||
1. [Saloodo! GmbH](https://www.saloodo.com)
|
||||
1. [Sap Labs](http://sap.com)
|
||||
1. [SAP Signavio](https://www.signavio.com)
|
||||
1. [Sauce Labs](https://saucelabs.com/)
|
||||
1. [Schneider Electric](https://www.se.com)
|
||||
1. [Schwarz IT](https://jobs.schwarz/it-mission)
|
||||
@@ -377,6 +389,7 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Ticketmaster](https://ticketmaster.com)
|
||||
1. [Tiger Analytics](https://www.tigeranalytics.com/)
|
||||
1. [Tigera](https://www.tigera.io/)
|
||||
1. [Topicus.Education](https://topicus.nl/en/sectors/education)
|
||||
1. [Toss](https://toss.im/en)
|
||||
1. [Trendyol](https://www.trendyol.com/)
|
||||
1. [tru.ID](https://tru.id)
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"runtime/debug"
|
||||
"slices"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -47,7 +48,7 @@ import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/health"
|
||||
"github.com/argoproj/argo-cd/gitops-engine/pkg/health"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/applicationset/controllers/template"
|
||||
"github.com/argoproj/argo-cd/v3/applicationset/generators"
|
||||
@@ -57,6 +58,7 @@ import (
|
||||
"github.com/argoproj/argo-cd/v3/common"
|
||||
applog "github.com/argoproj/argo-cd/v3/util/app/log"
|
||||
"github.com/argoproj/argo-cd/v3/util/db"
|
||||
"github.com/argoproj/argo-cd/v3/util/settings"
|
||||
|
||||
argov1alpha1 "github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
|
||||
argoutil "github.com/argoproj/argo-cd/v3/util/argo"
|
||||
@@ -75,9 +77,15 @@ const (
|
||||
AllAtOnceDeletionOrder = "AllAtOnce"
|
||||
)
|
||||
|
||||
var defaultPreservedFinalizers = []string{
|
||||
argov1alpha1.PreDeleteFinalizerName,
|
||||
argov1alpha1.PostDeleteFinalizerName,
|
||||
}
|
||||
|
||||
var defaultPreservedAnnotations = []string{
|
||||
NotifiedAnnotationKey,
|
||||
argov1alpha1.AnnotationKeyRefresh,
|
||||
argov1alpha1.AnnotationKeyHydrate,
|
||||
}
|
||||
|
||||
type deleteInOrder struct {
|
||||
@@ -104,6 +112,7 @@ type ApplicationSetReconciler struct {
|
||||
GlobalPreservedLabels []string
|
||||
Metrics *metrics.ApplicationsetMetrics
|
||||
MaxResourcesStatusCount int
|
||||
ClusterInformer *settings.ClusterInformer
|
||||
}
|
||||
|
||||
// +kubebuilder:rbac:groups=argoproj.io,resources=applicationsets,verbs=get;list;watch;create;update;patch;delete
|
||||
@@ -176,6 +185,16 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// ensure finalizer exists if deletionOrder is set as Reverse
|
||||
if r.EnableProgressiveSyncs && isProgressiveSyncDeletionOrderReversed(&applicationSetInfo) {
|
||||
if !controllerutil.ContainsFinalizer(&applicationSetInfo, argov1alpha1.ResourcesFinalizerName) {
|
||||
controllerutil.AddFinalizer(&applicationSetInfo, argov1alpha1.ResourcesFinalizerName)
|
||||
if err := r.Update(ctx, &applicationSetInfo); err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Log a warning if there are unrecognized generators
|
||||
_ = utils.CheckInvalidGenerators(&applicationSetInfo)
|
||||
// desiredApplications is the main list of all expected Applications from all generators in this appset.
|
||||
@@ -653,8 +672,9 @@ func (r *ApplicationSetReconciler) SetupWithManager(mgr ctrl.Manager, enableProg
|
||||
Watches(
|
||||
&corev1.Secret{},
|
||||
&clusterSecretEventHandler{
|
||||
Client: mgr.GetClient(),
|
||||
Log: log.WithField("type", "createSecretEventHandler"),
|
||||
Client: mgr.GetClient(),
|
||||
Log: log.WithField("type", "createSecretEventHandler"),
|
||||
ApplicationSetNamespaces: r.ApplicationSetNamespaces,
|
||||
}).
|
||||
Complete(r)
|
||||
}
|
||||
@@ -731,21 +751,19 @@ func (r *ApplicationSetReconciler) createOrUpdateInCluster(ctx context.Context,
|
||||
}
|
||||
}
|
||||
|
||||
// Preserve post-delete finalizers:
|
||||
// https://github.com/argoproj/argo-cd/issues/17181
|
||||
for _, finalizer := range found.Finalizers {
|
||||
if strings.HasPrefix(finalizer, argov1alpha1.PostDeleteFinalizerName) {
|
||||
if generatedApp.Finalizers == nil {
|
||||
generatedApp.Finalizers = []string{}
|
||||
// Preserve deleting finalizers and avoid diff conflicts
|
||||
for _, finalizer := range defaultPreservedFinalizers {
|
||||
for _, f := range found.Finalizers {
|
||||
// For finalizers, use prefix matching in case it contains "/" stages
|
||||
if strings.HasPrefix(f, finalizer) {
|
||||
generatedApp.Finalizers = append(generatedApp.Finalizers, f)
|
||||
}
|
||||
generatedApp.Finalizers = append(generatedApp.Finalizers, finalizer)
|
||||
}
|
||||
}
|
||||
|
||||
found.Annotations = generatedApp.Annotations
|
||||
|
||||
found.Finalizers = generatedApp.Finalizers
|
||||
found.Labels = generatedApp.Labels
|
||||
found.Finalizers = generatedApp.Finalizers
|
||||
|
||||
return controllerutil.SetControllerReference(&applicationSet, found, r.Scheme)
|
||||
})
|
||||
@@ -810,7 +828,7 @@ func (r *ApplicationSetReconciler) getCurrentApplications(ctx context.Context, a
|
||||
// deleteInCluster will delete Applications that are currently on the cluster, but not in appList.
|
||||
// The function must be called after all generators had been called and generated applications
|
||||
func (r *ApplicationSetReconciler) deleteInCluster(ctx context.Context, logCtx *log.Entry, applicationSet argov1alpha1.ApplicationSet, desiredApplications []argov1alpha1.Application) error {
|
||||
clusterList, err := utils.ListClusters(ctx, r.KubeClientset, r.ArgoCDNamespace)
|
||||
clusterList, err := utils.ListClusters(r.ClusterInformer)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error listing clusters: %w", err)
|
||||
}
|
||||
@@ -876,16 +894,14 @@ func (r *ApplicationSetReconciler) removeFinalizerOnInvalidDestination(ctx conte
|
||||
// Detect if the destination's server field does not match an existing cluster
|
||||
matchingCluster := false
|
||||
for _, cluster := range clusterList {
|
||||
if destCluster.Server != cluster.Server {
|
||||
continue
|
||||
// A cluster matches if either the server matches OR the name matches
|
||||
// This handles cases where:
|
||||
// 1. The cluster is the in-cluster (server=https://kubernetes.default.svc, name=in-cluster)
|
||||
// 2. A custom cluster has the same server as in-cluster but a different name
|
||||
if destCluster.Server == cluster.Server || (destCluster.Name != "" && cluster.Name != "" && destCluster.Name == cluster.Name) {
|
||||
matchingCluster = true
|
||||
break
|
||||
}
|
||||
|
||||
if destCluster.Name != cluster.Name {
|
||||
continue
|
||||
}
|
||||
|
||||
matchingCluster = true
|
||||
break
|
||||
}
|
||||
|
||||
if !matchingCluster {
|
||||
@@ -1031,12 +1047,10 @@ func labelMatchedExpression(logCtx *log.Entry, val string, matchExpression argov
|
||||
// if operator == NotIn, default to true
|
||||
valueMatched := matchExpression.Operator == "NotIn"
|
||||
|
||||
for _, value := range matchExpression.Values {
|
||||
if val == value {
|
||||
// first "In" match returns true
|
||||
// first "NotIn" match returns false
|
||||
return matchExpression.Operator == "In"
|
||||
}
|
||||
if slices.Contains(matchExpression.Values, val) {
|
||||
// first "In" match returns true
|
||||
// first "NotIn" match returns false
|
||||
return matchExpression.Operator == "In"
|
||||
}
|
||||
return valueMatched
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
@@ -19,6 +20,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
kubefake "k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
crtclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
@@ -26,8 +28,8 @@ import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/health"
|
||||
"github.com/argoproj/gitops-engine/pkg/sync/common"
|
||||
"github.com/argoproj/argo-cd/gitops-engine/pkg/health"
|
||||
"github.com/argoproj/argo-cd/gitops-engine/pkg/sync/common"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/applicationset/generators"
|
||||
"github.com/argoproj/argo-cd/v3/applicationset/generators/mocks"
|
||||
@@ -588,6 +590,72 @@ func TestCreateOrUpdateInCluster(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Ensure that hydrate annotation is preserved from an existing app",
|
||||
appSet: v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "name",
|
||||
Namespace: "namespace",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "project",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
existingApps: []v1alpha1.Application{
|
||||
{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: application.ApplicationKind,
|
||||
APIVersion: "argoproj.io/v1alpha1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app1",
|
||||
Namespace: "namespace",
|
||||
ResourceVersion: "2",
|
||||
Annotations: map[string]string{
|
||||
"annot-key": "annot-value",
|
||||
v1alpha1.AnnotationKeyHydrate: string(v1alpha1.RefreshTypeNormal),
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "project",
|
||||
},
|
||||
},
|
||||
},
|
||||
desiredApps: []v1alpha1.Application{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app1",
|
||||
Namespace: "namespace",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "project",
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []v1alpha1.Application{
|
||||
{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: application.ApplicationKind,
|
||||
APIVersion: "argoproj.io/v1alpha1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app1",
|
||||
Namespace: "namespace",
|
||||
ResourceVersion: "3",
|
||||
Annotations: map[string]string{
|
||||
v1alpha1.AnnotationKeyHydrate: string(v1alpha1.RefreshTypeNormal),
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "project",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Ensure that configured preserved annotations are preserved from an existing app",
|
||||
appSet: v1alpha1.ApplicationSet{
|
||||
@@ -1010,7 +1078,7 @@ func TestCreateOrUpdateInCluster(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Ensure that argocd post-delete finalizers are preserved from an existing app",
|
||||
name: "Ensure that argocd pre-delete and post-delete finalizers are preserved from an existing app",
|
||||
appSet: v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "name",
|
||||
@@ -1035,8 +1103,11 @@ func TestCreateOrUpdateInCluster(t *testing.T) {
|
||||
Namespace: "namespace",
|
||||
ResourceVersion: "2",
|
||||
Finalizers: []string{
|
||||
"non-argo-finalizer",
|
||||
v1alpha1.PreDeleteFinalizerName,
|
||||
v1alpha1.PreDeleteFinalizerName + "/stage1",
|
||||
v1alpha1.PostDeleteFinalizerName,
|
||||
v1alpha1.PostDeleteFinalizerName + "/mystage",
|
||||
v1alpha1.PostDeleteFinalizerName + "/stage2",
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
@@ -1064,10 +1135,12 @@ func TestCreateOrUpdateInCluster(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app1",
|
||||
Namespace: "namespace",
|
||||
ResourceVersion: "2",
|
||||
ResourceVersion: "3",
|
||||
Finalizers: []string{
|
||||
v1alpha1.PreDeleteFinalizerName,
|
||||
v1alpha1.PreDeleteFinalizerName + "/stage1",
|
||||
v1alpha1.PostDeleteFinalizerName,
|
||||
v1alpha1.PostDeleteFinalizerName + "/mystage",
|
||||
v1alpha1.PostDeleteFinalizerName + "/stage2",
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
@@ -1117,6 +1190,8 @@ func TestRemoveFinalizerOnInvalidDestination_FinalizerTypes(t *testing.T) {
|
||||
scheme := runtime.NewScheme()
|
||||
err := v1alpha1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
err = corev1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, c := range []struct {
|
||||
// name is human-readable test name
|
||||
@@ -1173,9 +1248,6 @@ func TestRemoveFinalizerOnInvalidDestination_FinalizerTypes(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
initObjs := []crtclient.Object{&app, &appSet}
|
||||
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(initObjs...).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()
|
||||
secret := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "my-secret",
|
||||
@@ -1193,11 +1265,23 @@ func TestRemoveFinalizerOnInvalidDestination_FinalizerTypes(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
initObjs := []crtclient.Object{&app, &appSet, secret}
|
||||
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(initObjs...).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()
|
||||
|
||||
objects := append([]runtime.Object{}, secret)
|
||||
kubeclientset := kubefake.NewSimpleClientset(objects...)
|
||||
kubeclientset := kubefake.NewClientset(objects...)
|
||||
metrics := appsetmetrics.NewFakeAppsetMetrics()
|
||||
|
||||
argodb := db.NewDB("argocd", settings.NewSettingsManager(t.Context(), kubeclientset, "argocd"), kubeclientset)
|
||||
settingsMgr := settings.NewSettingsManager(t.Context(), kubeclientset, "argocd")
|
||||
// Initialize the settings manager to ensure cluster cache is ready
|
||||
_ = settingsMgr.ResyncInformers()
|
||||
argodb := db.NewDB("argocd", settingsMgr, kubeclientset)
|
||||
|
||||
clusterInformer, err := settings.NewClusterInformer(kubeclientset, "namespace")
|
||||
require.NoError(t, err)
|
||||
|
||||
defer startAndSyncInformer(t, clusterInformer)()
|
||||
|
||||
r := ApplicationSetReconciler{
|
||||
Client: client,
|
||||
@@ -1207,7 +1291,7 @@ func TestRemoveFinalizerOnInvalidDestination_FinalizerTypes(t *testing.T) {
|
||||
Metrics: metrics,
|
||||
ArgoDB: argodb,
|
||||
}
|
||||
clusterList, err := utils.ListClusters(t.Context(), kubeclientset, "namespace")
|
||||
clusterList, err := utils.ListClusters(clusterInformer)
|
||||
require.NoError(t, err)
|
||||
|
||||
appLog := log.WithFields(applog.GetAppLogFields(&app)).WithField("appSet", "")
|
||||
@@ -1224,7 +1308,7 @@ func TestRemoveFinalizerOnInvalidDestination_FinalizerTypes(t *testing.T) {
|
||||
// App on the cluster should have the expected finalizers
|
||||
assert.ElementsMatch(t, c.expectedFinalizers, retrievedApp.Finalizers)
|
||||
|
||||
// App object passed in as a parameter should have the expected finaliers
|
||||
// App object passed in as a parameter should have the expected finalizers
|
||||
assert.ElementsMatch(t, c.expectedFinalizers, appInputParam.Finalizers)
|
||||
|
||||
bytes, _ := json.MarshalIndent(retrievedApp, "", " ")
|
||||
@@ -1237,6 +1321,8 @@ func TestRemoveFinalizerOnInvalidDestination_DestinationTypes(t *testing.T) {
|
||||
scheme := runtime.NewScheme()
|
||||
err := v1alpha1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
err = corev1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, c := range []struct {
|
||||
// name is human-readable test name
|
||||
@@ -1329,9 +1415,6 @@ func TestRemoveFinalizerOnInvalidDestination_DestinationTypes(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
initObjs := []crtclient.Object{&app, &appSet}
|
||||
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(initObjs...).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()
|
||||
secret := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "my-secret",
|
||||
@@ -1349,10 +1432,22 @@ func TestRemoveFinalizerOnInvalidDestination_DestinationTypes(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
initObjs := []crtclient.Object{&app, &appSet, secret}
|
||||
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(initObjs...).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()
|
||||
|
||||
kubeclientset := getDefaultTestClientSet(secret)
|
||||
metrics := appsetmetrics.NewFakeAppsetMetrics()
|
||||
|
||||
argodb := db.NewDB("argocd", settings.NewSettingsManager(t.Context(), kubeclientset, "argocd"), kubeclientset)
|
||||
settingsMgr := settings.NewSettingsManager(t.Context(), kubeclientset, "argocd")
|
||||
// Initialize the settings manager to ensure cluster cache is ready
|
||||
_ = settingsMgr.ResyncInformers()
|
||||
argodb := db.NewDB("argocd", settingsMgr, kubeclientset)
|
||||
|
||||
clusterInformer, err := settings.NewClusterInformer(kubeclientset, "argocd")
|
||||
require.NoError(t, err)
|
||||
|
||||
defer startAndSyncInformer(t, clusterInformer)()
|
||||
|
||||
r := ApplicationSetReconciler{
|
||||
Client: client,
|
||||
@@ -1363,7 +1458,7 @@ func TestRemoveFinalizerOnInvalidDestination_DestinationTypes(t *testing.T) {
|
||||
ArgoDB: argodb,
|
||||
}
|
||||
|
||||
clusterList, err := utils.ListClusters(t.Context(), kubeclientset, "argocd")
|
||||
clusterList, err := utils.ListClusters(clusterInformer)
|
||||
require.NoError(t, err)
|
||||
|
||||
appLog := log.WithFields(applog.GetAppLogFields(&app)).WithField("appSet", "")
|
||||
@@ -1668,6 +1763,8 @@ func TestDeleteInCluster(t *testing.T) {
|
||||
scheme := runtime.NewScheme()
|
||||
err := v1alpha1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
err = corev1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, c := range []struct {
|
||||
// appSet is the application set on which the delete function is called
|
||||
@@ -1780,12 +1877,19 @@ func TestDeleteInCluster(t *testing.T) {
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(initObjs...).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()
|
||||
metrics := appsetmetrics.NewFakeAppsetMetrics()
|
||||
|
||||
kubeclientset := kubefake.NewClientset()
|
||||
clusterInformer, err := settings.NewClusterInformer(kubeclientset, "namespace")
|
||||
require.NoError(t, err)
|
||||
|
||||
defer startAndSyncInformer(t, clusterInformer)()
|
||||
|
||||
r := ApplicationSetReconciler{
|
||||
Client: client,
|
||||
Scheme: scheme,
|
||||
Recorder: record.NewFakeRecorder(len(initObjs) + len(c.expected)),
|
||||
KubeClientset: kubefake.NewSimpleClientset(),
|
||||
Metrics: metrics,
|
||||
Client: client,
|
||||
Scheme: scheme,
|
||||
Recorder: record.NewFakeRecorder(len(initObjs) + len(c.expected)),
|
||||
KubeClientset: kubeclientset,
|
||||
Metrics: metrics,
|
||||
ClusterInformer: clusterInformer,
|
||||
}
|
||||
|
||||
err = r.deleteInCluster(t.Context(), log.NewEntry(log.StandardLogger()), c.appSet, c.desiredApps)
|
||||
@@ -1937,7 +2041,7 @@ func TestValidateGeneratedApplications(t *testing.T) {
|
||||
Server: "*",
|
||||
},
|
||||
},
|
||||
ClusterResourceWhitelist: []metav1.GroupKind{
|
||||
ClusterResourceWhitelist: []v1alpha1.ClusterResourceRestrictionItem{
|
||||
{
|
||||
Group: "*",
|
||||
Kind: "*",
|
||||
@@ -2116,6 +2220,8 @@ func TestReconcilerValidationProjectErrorBehaviour(t *testing.T) {
|
||||
scheme := runtime.NewScheme()
|
||||
err := v1alpha1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
err = corev1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
|
||||
project := v1alpha1.AppProject{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "good-project", Namespace: "argocd"},
|
||||
@@ -2159,6 +2265,9 @@ func TestReconcilerValidationProjectErrorBehaviour(t *testing.T) {
|
||||
|
||||
argodb := db.NewDB("argocd", settings.NewSettingsManager(t.Context(), kubeclientset, "argocd"), kubeclientset)
|
||||
|
||||
clusterInformer, err := settings.NewClusterInformer(kubeclientset, "argocd")
|
||||
require.NoError(t, err)
|
||||
|
||||
r := ApplicationSetReconciler{
|
||||
Client: client,
|
||||
Scheme: scheme,
|
||||
@@ -2172,6 +2281,7 @@ func TestReconcilerValidationProjectErrorBehaviour(t *testing.T) {
|
||||
Policy: v1alpha1.ApplicationsSyncPolicySync,
|
||||
ArgoCDNamespace: "argocd",
|
||||
Metrics: metrics,
|
||||
ClusterInformer: clusterInformer,
|
||||
}
|
||||
|
||||
req := ctrl.Request{
|
||||
@@ -2202,7 +2312,7 @@ func TestSetApplicationSetStatusCondition(t *testing.T) {
|
||||
scheme := runtime.NewScheme()
|
||||
err := v1alpha1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
kubeclientset := kubefake.NewSimpleClientset([]runtime.Object{}...)
|
||||
kubeclientset := kubefake.NewClientset([]runtime.Object{}...)
|
||||
someTime := &metav1.Time{Time: time.Now().Add(-5 * time.Minute)}
|
||||
existingParameterGeneratedCondition := getParametersGeneratedCondition(true, "")
|
||||
existingParameterGeneratedCondition.LastTransitionTime = someTime
|
||||
@@ -2673,6 +2783,8 @@ func applicationsUpdateSyncPolicyTest(t *testing.T, applicationsSyncPolicy v1alp
|
||||
scheme := runtime.NewScheme()
|
||||
err := v1alpha1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
err = corev1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
|
||||
defaultProject := v1alpha1.AppProject{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "default", Namespace: "argocd"},
|
||||
@@ -2729,10 +2841,14 @@ func applicationsUpdateSyncPolicyTest(t *testing.T, applicationsSyncPolicy v1alp
|
||||
|
||||
kubeclientset := getDefaultTestClientSet(secret)
|
||||
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet, &defaultProject).WithStatusSubresource(&appSet).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet, &defaultProject, secret).WithStatusSubresource(&appSet).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()
|
||||
metrics := appsetmetrics.NewFakeAppsetMetrics()
|
||||
|
||||
argodb := db.NewDB("argocd", settings.NewSettingsManager(t.Context(), kubeclientset, "argocd"), kubeclientset)
|
||||
clusterInformer, err := settings.NewClusterInformer(kubeclientset, "argocd")
|
||||
require.NoError(t, err)
|
||||
|
||||
defer startAndSyncInformer(t, clusterInformer)()
|
||||
|
||||
r := ApplicationSetReconciler{
|
||||
Client: client,
|
||||
@@ -2748,6 +2864,7 @@ func applicationsUpdateSyncPolicyTest(t *testing.T, applicationsSyncPolicy v1alp
|
||||
Policy: v1alpha1.ApplicationsSyncPolicySync,
|
||||
EnablePolicyOverride: allowPolicyOverride,
|
||||
Metrics: metrics,
|
||||
ClusterInformer: clusterInformer,
|
||||
}
|
||||
|
||||
req := ctrl.Request{
|
||||
@@ -2848,6 +2965,8 @@ func applicationsDeleteSyncPolicyTest(t *testing.T, applicationsSyncPolicy v1alp
|
||||
scheme := runtime.NewScheme()
|
||||
err := v1alpha1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
err = corev1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
|
||||
defaultProject := v1alpha1.AppProject{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "default", Namespace: "argocd"},
|
||||
@@ -2904,11 +3023,16 @@ func applicationsDeleteSyncPolicyTest(t *testing.T, applicationsSyncPolicy v1alp
|
||||
|
||||
kubeclientset := getDefaultTestClientSet(secret)
|
||||
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet, &defaultProject).WithStatusSubresource(&appSet).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet, &defaultProject, secret).WithStatusSubresource(&appSet).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()
|
||||
metrics := appsetmetrics.NewFakeAppsetMetrics()
|
||||
|
||||
argodb := db.NewDB("argocd", settings.NewSettingsManager(t.Context(), kubeclientset, "argocd"), kubeclientset)
|
||||
|
||||
clusterInformer, err := settings.NewClusterInformer(kubeclientset, "argocd")
|
||||
require.NoError(t, err)
|
||||
|
||||
defer startAndSyncInformer(t, clusterInformer)()
|
||||
|
||||
r := ApplicationSetReconciler{
|
||||
Client: client,
|
||||
Scheme: scheme,
|
||||
@@ -2923,6 +3047,7 @@ func applicationsDeleteSyncPolicyTest(t *testing.T, applicationsSyncPolicy v1alp
|
||||
Policy: v1alpha1.ApplicationsSyncPolicySync,
|
||||
EnablePolicyOverride: allowPolicyOverride,
|
||||
Metrics: metrics,
|
||||
ClusterInformer: clusterInformer,
|
||||
}
|
||||
|
||||
req := ctrl.Request{
|
||||
@@ -3015,6 +3140,8 @@ func TestPolicies(t *testing.T) {
|
||||
scheme := runtime.NewScheme()
|
||||
err := v1alpha1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
err = corev1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
|
||||
defaultProject := v1alpha1.AppProject{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "default", Namespace: "argocd"},
|
||||
@@ -3098,6 +3225,11 @@ func TestPolicies(t *testing.T) {
|
||||
|
||||
argodb := db.NewDB("argocd", settings.NewSettingsManager(t.Context(), kubeclientset, "argocd"), kubeclientset)
|
||||
|
||||
clusterInformer, err := settings.NewClusterInformer(kubeclientset, "argocd")
|
||||
require.NoError(t, err)
|
||||
|
||||
defer startAndSyncInformer(t, clusterInformer)()
|
||||
|
||||
r := ApplicationSetReconciler{
|
||||
Client: client,
|
||||
Scheme: scheme,
|
||||
@@ -3110,6 +3242,7 @@ func TestPolicies(t *testing.T) {
|
||||
ArgoCDNamespace: "argocd",
|
||||
KubeClientset: kubeclientset,
|
||||
Policy: policy,
|
||||
ClusterInformer: clusterInformer,
|
||||
Metrics: metrics,
|
||||
}
|
||||
|
||||
@@ -3119,27 +3252,27 @@ func TestPolicies(t *testing.T) {
|
||||
Name: "name",
|
||||
},
|
||||
}
|
||||
|
||||
// Check if Application is created
|
||||
res, err := r.Reconcile(t.Context(), req)
|
||||
ctx := t.Context()
|
||||
// Check if the application is created
|
||||
res, err := r.Reconcile(ctx, req)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, time.Duration(0), res.RequeueAfter)
|
||||
|
||||
var app v1alpha1.Application
|
||||
err = r.Get(t.Context(), crtclient.ObjectKey{Namespace: "argocd", Name: "my-app"}, &app)
|
||||
err = r.Get(ctx, crtclient.ObjectKey{Namespace: "argocd", Name: "my-app"}, &app)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "value", app.Annotations["key"])
|
||||
|
||||
// Check if Application is updated
|
||||
// Check if the Application is updated
|
||||
app.Annotations["key"] = "edited"
|
||||
err = r.Update(t.Context(), &app)
|
||||
err = r.Update(ctx, &app)
|
||||
require.NoError(t, err)
|
||||
|
||||
res, err = r.Reconcile(t.Context(), req)
|
||||
res, err = r.Reconcile(ctx, req)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, time.Duration(0), res.RequeueAfter)
|
||||
|
||||
err = r.Get(t.Context(), crtclient.ObjectKey{Namespace: "argocd", Name: "my-app"}, &app)
|
||||
err = r.Get(ctx, crtclient.ObjectKey{Namespace: "argocd", Name: "my-app"}, &app)
|
||||
require.NoError(t, err)
|
||||
|
||||
if c.allowedUpdate {
|
||||
@@ -3148,22 +3281,22 @@ func TestPolicies(t *testing.T) {
|
||||
assert.Equal(t, "edited", app.Annotations["key"])
|
||||
}
|
||||
|
||||
// Check if Application is deleted
|
||||
err = r.Get(t.Context(), crtclient.ObjectKey{Namespace: "argocd", Name: "name"}, &appSet)
|
||||
// Check if the Application is deleted
|
||||
err = r.Get(ctx, crtclient.ObjectKey{Namespace: "argocd", Name: "name"}, &appSet)
|
||||
require.NoError(t, err)
|
||||
appSet.Spec.Generators[0] = v1alpha1.ApplicationSetGenerator{
|
||||
List: &v1alpha1.ListGenerator{
|
||||
Elements: []apiextensionsv1.JSON{},
|
||||
},
|
||||
}
|
||||
err = r.Update(t.Context(), &appSet)
|
||||
err = r.Update(ctx, &appSet)
|
||||
require.NoError(t, err)
|
||||
|
||||
res, err = r.Reconcile(t.Context(), req)
|
||||
res, err = r.Reconcile(ctx, req)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, time.Duration(0), res.RequeueAfter)
|
||||
|
||||
err = r.Get(t.Context(), crtclient.ObjectKey{Namespace: "argocd", Name: "my-app"}, &app)
|
||||
err = r.Get(ctx, crtclient.ObjectKey{Namespace: "argocd", Name: "my-app"}, &app)
|
||||
require.NoError(t, err)
|
||||
if c.allowedDelete {
|
||||
assert.NotNil(t, app.DeletionTimestamp)
|
||||
@@ -3179,7 +3312,7 @@ func TestSetApplicationSetApplicationStatus(t *testing.T) {
|
||||
err := v1alpha1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
|
||||
kubeclientset := kubefake.NewSimpleClientset([]runtime.Object{}...)
|
||||
kubeclientset := kubefake.NewClientset([]runtime.Object{}...)
|
||||
|
||||
for _, cc := range []struct {
|
||||
name string
|
||||
@@ -4056,7 +4189,7 @@ func TestBuildAppDependencyList(t *testing.T) {
|
||||
},
|
||||
} {
|
||||
t.Run(cc.name, func(t *testing.T) {
|
||||
kubeclientset := kubefake.NewSimpleClientset([]runtime.Object{}...)
|
||||
kubeclientset := kubefake.NewClientset([]runtime.Object{}...)
|
||||
|
||||
argodb := db.NewDB("argocd", settings.NewSettingsManager(t.Context(), kubeclientset, "argocd"), kubeclientset)
|
||||
|
||||
@@ -4491,7 +4624,7 @@ func TestGetAppsToSync(t *testing.T) {
|
||||
},
|
||||
} {
|
||||
t.Run(cc.name, func(t *testing.T) {
|
||||
kubeclientset := kubefake.NewSimpleClientset([]runtime.Object{}...)
|
||||
kubeclientset := kubefake.NewClientset([]runtime.Object{}...)
|
||||
|
||||
argodb := db.NewDB("argocd", settings.NewSettingsManager(t.Context(), kubeclientset, "argocd"), kubeclientset)
|
||||
|
||||
@@ -4519,7 +4652,7 @@ func TestUpdateApplicationSetApplicationStatus(t *testing.T) {
|
||||
|
||||
newDefaultAppSet := func(stepsCount int, status []v1alpha1.ApplicationSetApplicationStatus) v1alpha1.ApplicationSet {
|
||||
steps := []v1alpha1.ApplicationSetRolloutStep{}
|
||||
for i := 0; i < stepsCount; i++ {
|
||||
for range stepsCount {
|
||||
steps = append(steps, v1alpha1.ApplicationSetRolloutStep{MatchExpressions: []v1alpha1.ApplicationMatchExpression{}})
|
||||
}
|
||||
return v1alpha1.ApplicationSet{
|
||||
@@ -5178,7 +5311,7 @@ func TestUpdateApplicationSetApplicationStatus(t *testing.T) {
|
||||
},
|
||||
} {
|
||||
t.Run(cc.name, func(t *testing.T) {
|
||||
kubeclientset := kubefake.NewSimpleClientset([]runtime.Object{}...)
|
||||
kubeclientset := kubefake.NewClientset([]runtime.Object{}...)
|
||||
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&cc.appSet).WithStatusSubresource(&cc.appSet).Build()
|
||||
metrics := appsetmetrics.NewFakeAppsetMetrics()
|
||||
@@ -5931,7 +6064,7 @@ func TestUpdateApplicationSetApplicationStatusProgress(t *testing.T) {
|
||||
},
|
||||
} {
|
||||
t.Run(cc.name, func(t *testing.T) {
|
||||
kubeclientset := kubefake.NewSimpleClientset([]runtime.Object{}...)
|
||||
kubeclientset := kubefake.NewClientset([]runtime.Object{}...)
|
||||
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&cc.appSet).WithStatusSubresource(&cc.appSet).Build()
|
||||
metrics := appsetmetrics.NewFakeAppsetMetrics()
|
||||
@@ -6204,7 +6337,7 @@ func TestUpdateResourceStatus(t *testing.T) {
|
||||
},
|
||||
} {
|
||||
t.Run(cc.name, func(t *testing.T) {
|
||||
kubeclientset := kubefake.NewSimpleClientset([]runtime.Object{}...)
|
||||
kubeclientset := kubefake.NewClientset([]runtime.Object{}...)
|
||||
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).WithStatusSubresource(&cc.appSet).WithObjects(&cc.appSet).Build()
|
||||
metrics := appsetmetrics.NewFakeAppsetMetrics()
|
||||
@@ -6232,7 +6365,7 @@ func TestUpdateResourceStatus(t *testing.T) {
|
||||
|
||||
func generateNAppResourceStatuses(n int) []v1alpha1.ResourceStatus {
|
||||
var r []v1alpha1.ResourceStatus
|
||||
for i := 0; i < n; i++ {
|
||||
for i := range n {
|
||||
r = append(r, v1alpha1.ResourceStatus{
|
||||
Name: "app" + strconv.Itoa(i),
|
||||
Status: v1alpha1.SyncStatusCodeSynced,
|
||||
@@ -6247,7 +6380,7 @@ func generateNAppResourceStatuses(n int) []v1alpha1.ResourceStatus {
|
||||
|
||||
func generateNHealthyApps(n int) []v1alpha1.Application {
|
||||
var r []v1alpha1.Application
|
||||
for i := 0; i < n; i++ {
|
||||
for i := range n {
|
||||
r = append(r, v1alpha1.Application{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app" + strconv.Itoa(i),
|
||||
@@ -6294,7 +6427,7 @@ func TestResourceStatusAreOrdered(t *testing.T) {
|
||||
},
|
||||
} {
|
||||
t.Run(cc.name, func(t *testing.T) {
|
||||
kubeclientset := kubefake.NewSimpleClientset([]runtime.Object{}...)
|
||||
kubeclientset := kubefake.NewClientset([]runtime.Object{}...)
|
||||
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).WithStatusSubresource(&cc.appSet).WithObjects(&cc.appSet).Build()
|
||||
metrics := appsetmetrics.NewFakeAppsetMetrics()
|
||||
@@ -7277,12 +7410,229 @@ func TestIsRollingSyncDeletionReversed(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestReconcileAddsFinalizer_WhenDeletionOrderReverse(t *testing.T) {
|
||||
scheme := runtime.NewScheme()
|
||||
err := v1alpha1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
|
||||
kubeclientset := kubefake.NewClientset([]runtime.Object{}...)
|
||||
|
||||
for _, cc := range []struct {
|
||||
name string
|
||||
appSet v1alpha1.ApplicationSet
|
||||
progressiveSyncEnabled bool
|
||||
expectedFinalizers []string
|
||||
}{
|
||||
{
|
||||
name: "adds finalizer when DeletionOrder is Reverse",
|
||||
appSet: v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-appset",
|
||||
Namespace: "argocd",
|
||||
// No finalizers initially
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Strategy: &v1alpha1.ApplicationSetStrategy{
|
||||
Type: "RollingSync",
|
||||
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{
|
||||
Steps: []v1alpha1.ApplicationSetRolloutStep{
|
||||
{
|
||||
MatchExpressions: []v1alpha1.ApplicationMatchExpression{
|
||||
{
|
||||
Key: "env",
|
||||
Operator: "In",
|
||||
Values: []string{"dev"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
DeletionOrder: ReverseDeletionOrder,
|
||||
},
|
||||
Template: v1alpha1.ApplicationSetTemplate{},
|
||||
},
|
||||
},
|
||||
progressiveSyncEnabled: true,
|
||||
expectedFinalizers: []string{v1alpha1.ResourcesFinalizerName},
|
||||
},
|
||||
{
|
||||
name: "does not add finalizer when already exists and DeletionOrder is Reverse",
|
||||
appSet: v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-appset",
|
||||
Namespace: "argocd",
|
||||
Finalizers: []string{
|
||||
v1alpha1.ResourcesFinalizerName,
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Strategy: &v1alpha1.ApplicationSetStrategy{
|
||||
Type: "RollingSync",
|
||||
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{
|
||||
Steps: []v1alpha1.ApplicationSetRolloutStep{
|
||||
{
|
||||
MatchExpressions: []v1alpha1.ApplicationMatchExpression{
|
||||
{
|
||||
Key: "env",
|
||||
Operator: "In",
|
||||
Values: []string{"dev"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
DeletionOrder: ReverseDeletionOrder,
|
||||
},
|
||||
Template: v1alpha1.ApplicationSetTemplate{},
|
||||
},
|
||||
},
|
||||
progressiveSyncEnabled: true,
|
||||
expectedFinalizers: []string{v1alpha1.ResourcesFinalizerName},
|
||||
},
|
||||
{
|
||||
name: "does not add finalizer when DeletionOrder is AllAtOnce",
|
||||
appSet: v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-appset",
|
||||
Namespace: "argocd",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Strategy: &v1alpha1.ApplicationSetStrategy{
|
||||
Type: "RollingSync",
|
||||
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{
|
||||
Steps: []v1alpha1.ApplicationSetRolloutStep{
|
||||
{
|
||||
MatchExpressions: []v1alpha1.ApplicationMatchExpression{
|
||||
{
|
||||
Key: "env",
|
||||
Operator: "In",
|
||||
Values: []string{"dev"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
DeletionOrder: AllAtOnceDeletionOrder,
|
||||
},
|
||||
Template: v1alpha1.ApplicationSetTemplate{},
|
||||
},
|
||||
},
|
||||
progressiveSyncEnabled: true,
|
||||
expectedFinalizers: nil,
|
||||
},
|
||||
{
|
||||
name: "does not add finalizer when DeletionOrder is not set",
|
||||
appSet: v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-appset",
|
||||
Namespace: "argocd",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Strategy: &v1alpha1.ApplicationSetStrategy{
|
||||
Type: "RollingSync",
|
||||
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{
|
||||
Steps: []v1alpha1.ApplicationSetRolloutStep{
|
||||
{
|
||||
MatchExpressions: []v1alpha1.ApplicationMatchExpression{
|
||||
{
|
||||
Key: "env",
|
||||
Operator: "In",
|
||||
Values: []string{"dev"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Template: v1alpha1.ApplicationSetTemplate{},
|
||||
},
|
||||
},
|
||||
progressiveSyncEnabled: true,
|
||||
expectedFinalizers: nil,
|
||||
},
|
||||
{
|
||||
name: "does not add finalizer when progressive sync not enabled",
|
||||
appSet: v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-appset",
|
||||
Namespace: "argocd",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Strategy: &v1alpha1.ApplicationSetStrategy{
|
||||
Type: "RollingSync",
|
||||
RollingSync: &v1alpha1.ApplicationSetRolloutStrategy{
|
||||
Steps: []v1alpha1.ApplicationSetRolloutStep{
|
||||
{
|
||||
MatchExpressions: []v1alpha1.ApplicationMatchExpression{
|
||||
{
|
||||
Key: "env",
|
||||
Operator: "In",
|
||||
Values: []string{"dev"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
DeletionOrder: ReverseDeletionOrder,
|
||||
},
|
||||
Template: v1alpha1.ApplicationSetTemplate{},
|
||||
},
|
||||
},
|
||||
progressiveSyncEnabled: false,
|
||||
expectedFinalizers: nil,
|
||||
},
|
||||
} {
|
||||
t.Run(cc.name, func(t *testing.T) {
|
||||
client := fake.NewClientBuilder().
|
||||
WithScheme(scheme).
|
||||
WithObjects(&cc.appSet).
|
||||
WithStatusSubresource(&cc.appSet).
|
||||
WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).
|
||||
Build()
|
||||
metrics := appsetmetrics.NewFakeAppsetMetrics()
|
||||
argodb := db.NewDB("argocd", settings.NewSettingsManager(t.Context(), kubeclientset, "argocd"), kubeclientset)
|
||||
|
||||
r := ApplicationSetReconciler{
|
||||
Client: client,
|
||||
Scheme: scheme,
|
||||
Renderer: &utils.Render{},
|
||||
Recorder: record.NewFakeRecorder(1),
|
||||
Generators: map[string]generators.Generator{},
|
||||
ArgoDB: argodb,
|
||||
KubeClientset: kubeclientset,
|
||||
Metrics: metrics,
|
||||
EnableProgressiveSyncs: cc.progressiveSyncEnabled,
|
||||
}
|
||||
|
||||
req := ctrl.Request{
|
||||
NamespacedName: types.NamespacedName{
|
||||
Namespace: cc.appSet.Namespace,
|
||||
Name: cc.appSet.Name,
|
||||
},
|
||||
}
|
||||
|
||||
// Run reconciliation
|
||||
_, err = r.Reconcile(t.Context(), req)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Fetch the updated ApplicationSet
|
||||
var updatedAppSet v1alpha1.ApplicationSet
|
||||
err = r.Get(t.Context(), req.NamespacedName, &updatedAppSet)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify the finalizers
|
||||
assert.Equal(t, cc.expectedFinalizers, updatedAppSet.Finalizers,
|
||||
"finalizers should match expected value")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestReconcileProgressiveSyncDisabled(t *testing.T) {
|
||||
scheme := runtime.NewScheme()
|
||||
err := v1alpha1.AddToScheme(scheme)
|
||||
require.NoError(t, err)
|
||||
|
||||
kubeclientset := kubefake.NewSimpleClientset([]runtime.Object{}...)
|
||||
kubeclientset := kubefake.NewClientset([]runtime.Object{}...)
|
||||
|
||||
for _, cc := range []struct {
|
||||
name string
|
||||
@@ -7355,3 +7705,14 @@ func TestReconcileProgressiveSyncDisabled(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func startAndSyncInformer(t *testing.T, informer cache.SharedIndexInformer) context.CancelFunc {
|
||||
t.Helper()
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
go informer.Run(ctx.Done())
|
||||
if !cache.WaitForCacheSync(ctx.Done(), informer.HasSynced) {
|
||||
cancel()
|
||||
t.Fatal("Timed out waiting for caches to sync")
|
||||
}
|
||||
return cancel
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/applicationset/utils"
|
||||
"github.com/argoproj/argo-cd/v3/common"
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
|
||||
)
|
||||
@@ -22,8 +23,9 @@ import (
|
||||
// requeue any related ApplicationSets.
|
||||
type clusterSecretEventHandler struct {
|
||||
// handler.EnqueueRequestForOwner
|
||||
Log log.FieldLogger
|
||||
Client client.Client
|
||||
Log log.FieldLogger
|
||||
Client client.Client
|
||||
ApplicationSetNamespaces []string
|
||||
}
|
||||
|
||||
func (h *clusterSecretEventHandler) Create(ctx context.Context, e event.CreateEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) {
|
||||
@@ -68,6 +70,10 @@ func (h *clusterSecretEventHandler) queueRelatedAppGenerators(ctx context.Contex
|
||||
|
||||
h.Log.WithField("count", len(appSetList.Items)).Info("listed ApplicationSets")
|
||||
for _, appSet := range appSetList.Items {
|
||||
if !utils.IsNamespaceAllowed(h.ApplicationSetNamespaces, appSet.GetNamespace()) {
|
||||
// Ignore it as not part of the allowed list of namespaces in which to watch Appsets
|
||||
continue
|
||||
}
|
||||
foundClusterGenerator := false
|
||||
for _, generator := range appSet.Spec.Generators {
|
||||
if generator.Clusters != nil {
|
||||
|
||||
@@ -137,7 +137,7 @@ func TestClusterEventHandler(t *testing.T) {
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "my-app-set",
|
||||
Namespace: "another-namespace",
|
||||
Namespace: "argocd",
|
||||
},
|
||||
Spec: argov1alpha1.ApplicationSetSpec{
|
||||
Generators: []argov1alpha1.ApplicationSetGenerator{
|
||||
@@ -171,9 +171,37 @@ func TestClusterEventHandler(t *testing.T) {
|
||||
},
|
||||
},
|
||||
expectedRequests: []reconcile.Request{
|
||||
{NamespacedName: types.NamespacedName{Namespace: "another-namespace", Name: "my-app-set"}},
|
||||
{NamespacedName: types.NamespacedName{Namespace: "argocd", Name: "my-app-set"}},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "cluster generators in other namespaces should not match",
|
||||
items: []argov1alpha1.ApplicationSet{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "my-app-set",
|
||||
Namespace: "my-namespace-not-allowed",
|
||||
},
|
||||
Spec: argov1alpha1.ApplicationSetSpec{
|
||||
Generators: []argov1alpha1.ApplicationSetGenerator{
|
||||
{
|
||||
Clusters: &argov1alpha1.ClusterGenerator{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
secret: corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "argocd",
|
||||
Name: "my-secret",
|
||||
Labels: map[string]string{
|
||||
argocommon.LabelKeySecretType: argocommon.LabelValueSecretTypeCluster,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedRequests: []reconcile.Request{},
|
||||
},
|
||||
{
|
||||
name: "non-argo cd secret should not match",
|
||||
items: []argov1alpha1.ApplicationSet{
|
||||
@@ -552,8 +580,9 @@ func TestClusterEventHandler(t *testing.T) {
|
||||
fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithLists(&appSetList).Build()
|
||||
|
||||
handler := &clusterSecretEventHandler{
|
||||
Client: fakeClient,
|
||||
Log: log.WithField("type", "createSecretEventHandler"),
|
||||
Client: fakeClient,
|
||||
Log: log.WithField("type", "createSecretEventHandler"),
|
||||
ApplicationSetNamespaces: []string{"argocd"},
|
||||
}
|
||||
|
||||
mockAddRateLimitingInterface := mockAddRateLimitingInterface{}
|
||||
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
appsetmetrics "github.com/argoproj/argo-cd/v3/applicationset/metrics"
|
||||
"github.com/argoproj/argo-cd/v3/applicationset/services/mocks"
|
||||
argov1alpha1 "github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v3/util/settings"
|
||||
)
|
||||
|
||||
func TestRequeueAfter(t *testing.T) {
|
||||
@@ -57,12 +58,17 @@ func TestRequeueAfter(t *testing.T) {
|
||||
}
|
||||
fakeDynClient := dynfake.NewSimpleDynamicClientWithCustomListKinds(runtime.NewScheme(), gvrToListKind, duckType)
|
||||
scmConfig := generators.NewSCMConfig("", []string{""}, true, true, nil, true)
|
||||
clusterInformer, err := settings.NewClusterInformer(appClientset, "argocd")
|
||||
require.NoError(t, err)
|
||||
|
||||
defer startAndSyncInformer(t, clusterInformer)()
|
||||
|
||||
terminalGenerators := map[string]generators.Generator{
|
||||
"List": generators.NewListGenerator(),
|
||||
"Clusters": generators.NewClusterGenerator(ctx, k8sClient, appClientset, "argocd"),
|
||||
"Clusters": generators.NewClusterGenerator(k8sClient, "argocd"),
|
||||
"Git": generators.NewGitGenerator(mockServer, "namespace"),
|
||||
"SCMProvider": generators.NewSCMProviderGenerator(fake.NewClientBuilder().WithObjects(&corev1.Secret{}).Build(), scmConfig),
|
||||
"ClusterDecisionResource": generators.NewDuckTypeGenerator(ctx, fakeDynClient, appClientset, "argocd"),
|
||||
"ClusterDecisionResource": generators.NewDuckTypeGenerator(ctx, fakeDynClient, appClientset, "argocd", clusterInformer),
|
||||
"PullRequest": generators.NewPullRequestGenerator(k8sClient, scmConfig),
|
||||
}
|
||||
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/applicationset/utils"
|
||||
@@ -22,19 +21,15 @@ var _ Generator = (*ClusterGenerator)(nil)
|
||||
// ClusterGenerator generates Applications for some or all clusters registered with ArgoCD.
|
||||
type ClusterGenerator struct {
|
||||
client.Client
|
||||
ctx context.Context
|
||||
clientset kubernetes.Interface
|
||||
// namespace is the Argo CD namespace
|
||||
namespace string
|
||||
}
|
||||
|
||||
var render = &utils.Render{}
|
||||
|
||||
func NewClusterGenerator(ctx context.Context, c client.Client, clientset kubernetes.Interface, namespace string) Generator {
|
||||
func NewClusterGenerator(c client.Client, namespace string) Generator {
|
||||
g := &ClusterGenerator{
|
||||
Client: c,
|
||||
ctx: ctx,
|
||||
clientset: clientset,
|
||||
namespace: namespace,
|
||||
}
|
||||
return g
|
||||
@@ -64,16 +59,7 @@ func (g *ClusterGenerator) GenerateParams(appSetGenerator *argoappsetv1alpha1.Ap
|
||||
// - Since local clusters do not have secrets, they do not have labels to match against
|
||||
ignoreLocalClusters := len(appSetGenerator.Clusters.Selector.MatchExpressions) > 0 || len(appSetGenerator.Clusters.Selector.MatchLabels) > 0
|
||||
|
||||
// ListCluster will include the local cluster in the list of clusters
|
||||
clustersFromArgoCD, err := utils.ListClusters(g.ctx, g.clientset, g.namespace)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error listing clusters: %w", err)
|
||||
}
|
||||
|
||||
if clustersFromArgoCD == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Get cluster secrets using the cached controller-runtime client
|
||||
clusterSecrets, err := g.getSecretsByClusterName(logCtx, appSetGenerator)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error getting cluster secrets: %w", err)
|
||||
@@ -82,32 +68,14 @@ func (g *ClusterGenerator) GenerateParams(appSetGenerator *argoappsetv1alpha1.Ap
|
||||
paramHolder := ¶mHolder{isFlatMode: appSetGenerator.Clusters.FlatList}
|
||||
logCtx.Debugf("Using flat mode = %t for cluster generator", paramHolder.isFlatMode)
|
||||
|
||||
secretsFound := []corev1.Secret{}
|
||||
for _, cluster := range clustersFromArgoCD {
|
||||
// If there is a secret for this cluster, then it's a non-local cluster, so it will be
|
||||
// handled by the next step.
|
||||
if secretForCluster, exists := clusterSecrets[cluster.Name]; exists {
|
||||
secretsFound = append(secretsFound, secretForCluster)
|
||||
} else if !ignoreLocalClusters {
|
||||
// If there is no secret for the cluster, it's the local cluster, so handle it here.
|
||||
params := map[string]any{}
|
||||
params["name"] = cluster.Name
|
||||
params["nameNormalized"] = cluster.Name
|
||||
params["server"] = cluster.Server
|
||||
params["project"] = ""
|
||||
|
||||
err = appendTemplatedValues(appSetGenerator.Clusters.Values, params, appSet.Spec.GoTemplate, appSet.Spec.GoTemplateOptions)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error appending templated values for local cluster: %w", err)
|
||||
}
|
||||
|
||||
paramHolder.append(params)
|
||||
logCtx.WithField("cluster", "local cluster").Info("matched local cluster")
|
||||
}
|
||||
// Convert map values to slice to check for an in-cluster secret
|
||||
secretsList := make([]corev1.Secret, 0, len(clusterSecrets))
|
||||
for _, secret := range clusterSecrets {
|
||||
secretsList = append(secretsList, secret)
|
||||
}
|
||||
|
||||
// For each matching cluster secret (non-local clusters only)
|
||||
for _, cluster := range secretsFound {
|
||||
for _, cluster := range clusterSecrets {
|
||||
params := g.getClusterParameters(cluster, appSet)
|
||||
|
||||
err = appendTemplatedValues(appSetGenerator.Clusters.Values, params, appSet.Spec.GoTemplate, appSet.Spec.GoTemplateOptions)
|
||||
@@ -119,6 +87,23 @@ func (g *ClusterGenerator) GenerateParams(appSetGenerator *argoappsetv1alpha1.Ap
|
||||
logCtx.WithField("cluster", cluster.Name).Debug("matched cluster secret")
|
||||
}
|
||||
|
||||
// Add the in-cluster last if it doesn't have a secret, and we're not ignoring in-cluster
|
||||
if !ignoreLocalClusters && !utils.SecretsContainInClusterCredentials(secretsList) {
|
||||
params := map[string]any{}
|
||||
params["name"] = argoappsetv1alpha1.KubernetesInClusterName
|
||||
params["nameNormalized"] = argoappsetv1alpha1.KubernetesInClusterName
|
||||
params["server"] = argoappsetv1alpha1.KubernetesInternalAPIServerAddr
|
||||
params["project"] = ""
|
||||
|
||||
err = appendTemplatedValues(appSetGenerator.Clusters.Values, params, appSet.Spec.GoTemplate, appSet.Spec.GoTemplateOptions)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error appending templated values for local cluster: %w", err)
|
||||
}
|
||||
|
||||
paramHolder.append(params)
|
||||
logCtx.WithField("cluster", "local cluster").Info("matched local cluster")
|
||||
}
|
||||
|
||||
return paramHolder.consolidate(), nil
|
||||
}
|
||||
|
||||
@@ -186,7 +171,7 @@ func (g *ClusterGenerator) getSecretsByClusterName(log *log.Entry, appSetGenerat
|
||||
return nil, fmt.Errorf("error converting label selector: %w", err)
|
||||
}
|
||||
|
||||
if err := g.List(context.Background(), clusterSecretList, client.MatchingLabelsSelector{Selector: secretSelector}); err != nil {
|
||||
if err := g.List(context.Background(), clusterSecretList, client.InNamespace(g.namespace), client.MatchingLabelsSelector{Selector: secretSelector}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.Debugf("clusters matching labels: %d", len(clusterSecretList.Items))
|
||||
|
||||
@@ -7,12 +7,9 @@ import (
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
|
||||
kubefake "k8s.io/client-go/kubernetes/fake"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/applicationset/utils"
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
|
||||
|
||||
@@ -299,23 +296,15 @@ func TestGenerateParams(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
// convert []client.Object to []runtime.Object, for use by kubefake package
|
||||
runtimeClusters := []runtime.Object{}
|
||||
for _, clientCluster := range clusters {
|
||||
runtimeClusters = append(runtimeClusters, clientCluster)
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
appClientset := kubefake.NewSimpleClientset(runtimeClusters...)
|
||||
|
||||
fakeClient := fake.NewClientBuilder().WithObjects(clusters...).Build()
|
||||
cl := &possiblyErroringFakeCtrlRuntimeClient{
|
||||
fakeClient,
|
||||
testCase.clientError,
|
||||
}
|
||||
|
||||
clusterGenerator := NewClusterGenerator(t.Context(), cl, appClientset, "namespace")
|
||||
clusterGenerator := NewClusterGenerator(cl, "namespace")
|
||||
|
||||
applicationSetInfo := argoprojiov1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -336,12 +325,25 @@ func TestGenerateParams(t *testing.T) {
|
||||
require.EqualError(t, err, testCase.expectedError.Error())
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
assert.ElementsMatch(t, testCase.expected, got)
|
||||
assertEqualParamsFlat(t, testCase.expected, got, testCase.isFlatMode)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func assertEqualParamsFlat(t *testing.T, expected, got []map[string]any, isFlatMode bool) {
|
||||
t.Helper()
|
||||
if isFlatMode && len(expected) == 1 && len(got) == 1 {
|
||||
expectedClusters, ok1 := expected[0]["clusters"].([]map[string]any)
|
||||
gotClusters, ok2 := got[0]["clusters"].([]map[string]any)
|
||||
if ok1 && ok2 {
|
||||
assert.ElementsMatch(t, expectedClusters, gotClusters)
|
||||
return
|
||||
}
|
||||
}
|
||||
assert.ElementsMatch(t, expected, got)
|
||||
}
|
||||
|
||||
func TestGenerateParamsGoTemplate(t *testing.T) {
|
||||
clusters := []client.Object{
|
||||
&corev1.Secret{
|
||||
@@ -837,23 +839,15 @@ func TestGenerateParamsGoTemplate(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
// convert []client.Object to []runtime.Object, for use by kubefake package
|
||||
runtimeClusters := []runtime.Object{}
|
||||
for _, clientCluster := range clusters {
|
||||
runtimeClusters = append(runtimeClusters, clientCluster)
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
appClientset := kubefake.NewSimpleClientset(runtimeClusters...)
|
||||
|
||||
fakeClient := fake.NewClientBuilder().WithObjects(clusters...).Build()
|
||||
cl := &possiblyErroringFakeCtrlRuntimeClient{
|
||||
fakeClient,
|
||||
testCase.clientError,
|
||||
}
|
||||
|
||||
clusterGenerator := NewClusterGenerator(t.Context(), cl, appClientset, "namespace")
|
||||
clusterGenerator := NewClusterGenerator(cl, "namespace")
|
||||
|
||||
applicationSetInfo := argoprojiov1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -876,7 +870,7 @@ func TestGenerateParamsGoTemplate(t *testing.T) {
|
||||
require.EqualError(t, err, testCase.expectedError.Error())
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
assert.ElementsMatch(t, testCase.expected, got)
|
||||
assertEqualParamsFlat(t, testCase.expected, got, testCase.isFlatMode)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -19,24 +19,27 @@ import (
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/applicationset/utils"
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v3/util/settings"
|
||||
)
|
||||
|
||||
var _ Generator = (*DuckTypeGenerator)(nil)
|
||||
|
||||
// DuckTypeGenerator generates Applications for some or all clusters registered with ArgoCD.
|
||||
type DuckTypeGenerator struct {
|
||||
ctx context.Context
|
||||
dynClient dynamic.Interface
|
||||
clientset kubernetes.Interface
|
||||
namespace string // namespace is the Argo CD namespace
|
||||
ctx context.Context
|
||||
dynClient dynamic.Interface
|
||||
clientset kubernetes.Interface
|
||||
namespace string // namespace is the Argo CD namespace
|
||||
clusterInformer *settings.ClusterInformer
|
||||
}
|
||||
|
||||
func NewDuckTypeGenerator(ctx context.Context, dynClient dynamic.Interface, clientset kubernetes.Interface, namespace string) Generator {
|
||||
func NewDuckTypeGenerator(ctx context.Context, dynClient dynamic.Interface, clientset kubernetes.Interface, namespace string, clusterInformer *settings.ClusterInformer) Generator {
|
||||
g := &DuckTypeGenerator{
|
||||
ctx: ctx,
|
||||
dynClient: dynClient,
|
||||
clientset: clientset,
|
||||
namespace: namespace,
|
||||
ctx: ctx,
|
||||
dynClient: dynClient,
|
||||
clientset: clientset,
|
||||
namespace: namespace,
|
||||
clusterInformer: clusterInformer,
|
||||
}
|
||||
return g
|
||||
}
|
||||
@@ -65,8 +68,7 @@ func (g *DuckTypeGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha1.A
|
||||
return nil, ErrEmptyAppSetGenerator
|
||||
}
|
||||
|
||||
// ListCluster from Argo CD's util/db package will include the local cluster in the list of clusters
|
||||
clustersFromArgoCD, err := utils.ListClusters(g.ctx, g.clientset, g.namespace)
|
||||
clustersFromArgoCD, err := utils.ListClusters(g.clusterInformer)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error listing clusters: %w", err)
|
||||
}
|
||||
|
||||
@@ -11,11 +11,13 @@ import (
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
dynfake "k8s.io/client-go/dynamic/fake"
|
||||
"k8s.io/client-go/dynamic/fake"
|
||||
kubefake "k8s.io/client-go/kubernetes/fake"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v3/test"
|
||||
"github.com/argoproj/argo-cd/v3/util/settings"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -290,9 +292,14 @@ func TestGenerateParamsForDuckType(t *testing.T) {
|
||||
Resource: "ducks",
|
||||
}: "DuckList"}
|
||||
|
||||
fakeDynClient := dynfake.NewSimpleDynamicClientWithCustomListKinds(runtime.NewScheme(), gvrToListKind, testCase.resource)
|
||||
fakeDynClient := fake.NewSimpleDynamicClientWithCustomListKinds(runtime.NewScheme(), gvrToListKind, testCase.resource)
|
||||
|
||||
duckTypeGenerator := NewDuckTypeGenerator(t.Context(), fakeDynClient, appClientset, "namespace")
|
||||
clusterInformer, err := settings.NewClusterInformer(appClientset, "namespace")
|
||||
require.NoError(t, err)
|
||||
|
||||
defer test.StartInformer(clusterInformer)()
|
||||
|
||||
duckTypeGenerator := NewDuckTypeGenerator(t.Context(), fakeDynClient, appClientset, "namespace", clusterInformer)
|
||||
|
||||
applicationSetInfo := argoprojiov1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -586,9 +593,14 @@ func TestGenerateParamsForDuckTypeGoTemplate(t *testing.T) {
|
||||
Resource: "ducks",
|
||||
}: "DuckList"}
|
||||
|
||||
fakeDynClient := dynfake.NewSimpleDynamicClientWithCustomListKinds(runtime.NewScheme(), gvrToListKind, testCase.resource)
|
||||
fakeDynClient := fake.NewSimpleDynamicClientWithCustomListKinds(runtime.NewScheme(), gvrToListKind, testCase.resource)
|
||||
|
||||
duckTypeGenerator := NewDuckTypeGenerator(t.Context(), fakeDynClient, appClientset, "namespace")
|
||||
clusterInformer, err := settings.NewClusterInformer(appClientset, "namespace")
|
||||
require.NoError(t, err)
|
||||
|
||||
defer test.StartInformer(clusterInformer)()
|
||||
|
||||
duckTypeGenerator := NewDuckTypeGenerator(t.Context(), fakeDynClient, appClientset, "namespace", clusterInformer)
|
||||
|
||||
applicationSetInfo := argoprojiov1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package generators
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
@@ -16,8 +15,6 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/mock"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
kubefake "k8s.io/client-go/kubernetes/fake"
|
||||
crtclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
)
|
||||
@@ -223,7 +220,7 @@ func TestTransForm(t *testing.T) {
|
||||
for _, testCase := range testCases {
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
testGenerators := map[string]Generator{
|
||||
"Clusters": getMockClusterGenerator(t.Context()),
|
||||
"Clusters": getMockClusterGenerator(),
|
||||
}
|
||||
|
||||
applicationSetInfo := argov1alpha1.ApplicationSet{
|
||||
@@ -260,7 +257,7 @@ func emptyTemplate() argov1alpha1.ApplicationSetTemplate {
|
||||
}
|
||||
}
|
||||
|
||||
func getMockClusterGenerator(ctx context.Context) Generator {
|
||||
func getMockClusterGenerator() Generator {
|
||||
clusters := []crtclient.Object{
|
||||
&corev1.Secret{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
@@ -335,14 +332,8 @@ func getMockClusterGenerator(ctx context.Context) Generator {
|
||||
Type: corev1.SecretType("Opaque"),
|
||||
},
|
||||
}
|
||||
runtimeClusters := []runtime.Object{}
|
||||
for _, clientCluster := range clusters {
|
||||
runtimeClusters = append(runtimeClusters, clientCluster)
|
||||
}
|
||||
appClientset := kubefake.NewSimpleClientset(runtimeClusters...)
|
||||
|
||||
fakeClient := fake.NewClientBuilder().WithObjects(clusters...).Build()
|
||||
return NewClusterGenerator(ctx, fakeClient, appClientset, "namespace")
|
||||
return NewClusterGenerator(fakeClient, "namespace")
|
||||
}
|
||||
|
||||
func getMockGitGenerator() Generator {
|
||||
@@ -354,7 +345,7 @@ func getMockGitGenerator() Generator {
|
||||
|
||||
func TestGetRelevantGenerators(t *testing.T) {
|
||||
testGenerators := map[string]Generator{
|
||||
"Clusters": getMockClusterGenerator(t.Context()),
|
||||
"Clusters": getMockClusterGenerator(),
|
||||
"Git": getMockGitGenerator(),
|
||||
}
|
||||
|
||||
@@ -551,7 +542,7 @@ func TestInterpolateGeneratorError(t *testing.T) {
|
||||
},
|
||||
useGoTemplate: true,
|
||||
goTemplateOptions: []string{},
|
||||
}, want: argov1alpha1.ApplicationSetGenerator{}, expectedErrStr: "failed to replace parameters in generator: failed to execute go template {{ index .rmap (default .override .test) }}: template: :1:3: executing \"\" at <index .rmap (default .override .test)>: error calling index: index of untyped nil"},
|
||||
}, want: argov1alpha1.ApplicationSetGenerator{}, expectedErrStr: "failed to replace parameters in generator: failed to execute go template {{ index .rmap (default .override .test) }}: template: base:1:3: executing \"base\" at <index .rmap (default .override .test)>: error calling index: index of untyped nil"},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
|
||||
@@ -3,6 +3,7 @@ package generators
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"maps"
|
||||
"path"
|
||||
"sort"
|
||||
"strconv"
|
||||
@@ -168,9 +169,7 @@ func (g *GitGenerator) generateParamsForGitFiles(appSetGenerator *argoprojiov1al
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for absPath, content := range retrievedFiles {
|
||||
fileContentMap[absPath] = content
|
||||
}
|
||||
maps.Copy(fileContentMap, retrievedFiles)
|
||||
}
|
||||
|
||||
// Now remove files matching any exclude pattern
|
||||
@@ -242,9 +241,7 @@ func (g *GitGenerator) generateParamsFromGitFile(filePath string, fileContent []
|
||||
params := map[string]any{}
|
||||
|
||||
if useGoTemplate {
|
||||
for k, v := range objectFound {
|
||||
params[k] = v
|
||||
}
|
||||
maps.Copy(params, objectFound)
|
||||
|
||||
paramPath := map[string]any{}
|
||||
|
||||
@@ -316,7 +313,7 @@ func (g *GitGenerator) filterApps(directories []argoprojiov1alpha1.GitDirectoryG
|
||||
appExclude = true
|
||||
}
|
||||
}
|
||||
// Whenever there is a path with exclude: true it wont be included, even if it is included in a different path pattern
|
||||
// Whenever there is a path with exclude: true it won't be included, even if it is included in a different path pattern
|
||||
if appInclude && !appExclude {
|
||||
res = append(res, appPath)
|
||||
}
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
kubefake "k8s.io/client-go/kubernetes/fake"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
|
||||
@@ -624,11 +623,6 @@ func TestInterpolatedMatrixGenerate(t *testing.T) {
|
||||
Type: corev1.SecretType("Opaque"),
|
||||
},
|
||||
}
|
||||
// convert []client.Object to []runtime.Object, for use by kubefake package
|
||||
runtimeClusters := []runtime.Object{}
|
||||
for _, clientCluster := range clusters {
|
||||
runtimeClusters = append(runtimeClusters, clientCluster)
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
testCaseCopy := testCase // Since tests may run in parallel
|
||||
@@ -637,13 +631,12 @@ func TestInterpolatedMatrixGenerate(t *testing.T) {
|
||||
genMock := &generatorsMock.Generator{}
|
||||
appSet := &v1alpha1.ApplicationSet{}
|
||||
|
||||
appClientset := kubefake.NewSimpleClientset(runtimeClusters...)
|
||||
fakeClient := fake.NewClientBuilder().WithObjects(clusters...).Build()
|
||||
cl := &possiblyErroringFakeCtrlRuntimeClient{
|
||||
fakeClient,
|
||||
testCase.clientError,
|
||||
}
|
||||
clusterGenerator := NewClusterGenerator(t.Context(), cl, appClientset, "namespace")
|
||||
clusterGenerator := NewClusterGenerator(cl, "namespace")
|
||||
|
||||
for _, g := range testCaseCopy.baseGenerators {
|
||||
gitGeneratorSpec := v1alpha1.ApplicationSetGenerator{
|
||||
@@ -803,11 +796,6 @@ func TestInterpolatedMatrixGenerateGoTemplate(t *testing.T) {
|
||||
Type: corev1.SecretType("Opaque"),
|
||||
},
|
||||
}
|
||||
// convert []client.Object to []runtime.Object, for use by kubefake package
|
||||
runtimeClusters := []runtime.Object{}
|
||||
for _, clientCluster := range clusters {
|
||||
runtimeClusters = append(runtimeClusters, clientCluster)
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
testCaseCopy := testCase // Since tests may run in parallel
|
||||
@@ -820,13 +808,12 @@ func TestInterpolatedMatrixGenerateGoTemplate(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
appClientset := kubefake.NewSimpleClientset(runtimeClusters...)
|
||||
fakeClient := fake.NewClientBuilder().WithObjects(clusters...).Build()
|
||||
cl := &possiblyErroringFakeCtrlRuntimeClient{
|
||||
fakeClient,
|
||||
testCase.clientError,
|
||||
}
|
||||
clusterGenerator := NewClusterGenerator(t.Context(), cl, appClientset, "namespace")
|
||||
clusterGenerator := NewClusterGenerator(cl, "namespace")
|
||||
|
||||
for _, g := range testCaseCopy.baseGenerators {
|
||||
gitGeneratorSpec := v1alpha1.ApplicationSetGenerator{
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"maps"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -115,9 +116,7 @@ func (g *PluginGenerator) generateParams(appSetGenerator *argoprojiov1alpha1.App
|
||||
params := map[string]any{}
|
||||
|
||||
if useGoTemplate {
|
||||
for k, v := range objectFound {
|
||||
params[k] = v
|
||||
}
|
||||
maps.Copy(params, objectFound)
|
||||
} else {
|
||||
flat, err := flatten.Flatten(objectFound, "", flatten.DotStyle)
|
||||
if err != nil {
|
||||
|
||||
@@ -96,18 +96,12 @@ func (g *PullRequestGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha
|
||||
var shortSHALength int
|
||||
var shortSHALength7 int
|
||||
for _, pull := range pulls {
|
||||
shortSHALength = 8
|
||||
if len(pull.HeadSHA) < 8 {
|
||||
shortSHALength = len(pull.HeadSHA)
|
||||
}
|
||||
shortSHALength = min(len(pull.HeadSHA), 8)
|
||||
|
||||
shortSHALength7 = 7
|
||||
if len(pull.HeadSHA) < 7 {
|
||||
shortSHALength7 = len(pull.HeadSHA)
|
||||
}
|
||||
shortSHALength7 = min(len(pull.HeadSHA), 7)
|
||||
|
||||
paramMap := map[string]any{
|
||||
"number": strconv.Itoa(pull.Number),
|
||||
"number": strconv.FormatInt(pull.Number, 10),
|
||||
"title": pull.Title,
|
||||
"branch": pull.Branch,
|
||||
"branch_slug": slug.Make(pull.Branch),
|
||||
@@ -243,9 +237,9 @@ func (g *PullRequestGenerator) github(ctx context.Context, cfg *argoprojiov1alph
|
||||
}
|
||||
|
||||
if g.enableGitHubAPIMetrics {
|
||||
return pullrequest.NewGithubAppService(*auth, cfg.API, cfg.Owner, cfg.Repo, cfg.Labels, httpClient)
|
||||
return pullrequest.NewGithubAppService(ctx, *auth, cfg.API, cfg.Owner, cfg.Repo, cfg.Labels, httpClient)
|
||||
}
|
||||
return pullrequest.NewGithubAppService(*auth, cfg.API, cfg.Owner, cfg.Repo, cfg.Labels)
|
||||
return pullrequest.NewGithubAppService(ctx, *auth, cfg.API, cfg.Owner, cfg.Repo, cfg.Labels)
|
||||
}
|
||||
|
||||
// always default to token, even if not set (public access)
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -105,10 +106,8 @@ func ScmProviderAllowed(applicationSetInfo *argoprojiov1alpha1.ApplicationSet, g
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, allowedScmProvider := range allowedScmProviders {
|
||||
if url == allowedScmProvider {
|
||||
return nil
|
||||
}
|
||||
if slices.Contains(allowedScmProviders, url) {
|
||||
return nil
|
||||
}
|
||||
|
||||
log.WithFields(log.Fields{
|
||||
@@ -244,15 +243,9 @@ func (g *SCMProviderGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha
|
||||
var shortSHALength int
|
||||
var shortSHALength7 int
|
||||
for _, repo := range repos {
|
||||
shortSHALength = 8
|
||||
if len(repo.SHA) < 8 {
|
||||
shortSHALength = len(repo.SHA)
|
||||
}
|
||||
shortSHALength = min(len(repo.SHA), 8)
|
||||
|
||||
shortSHALength7 = 7
|
||||
if len(repo.SHA) < 7 {
|
||||
shortSHALength7 = len(repo.SHA)
|
||||
}
|
||||
shortSHALength7 = min(len(repo.SHA), 7)
|
||||
|
||||
params := map[string]any{
|
||||
"organization": repo.Organization,
|
||||
@@ -296,9 +289,9 @@ func (g *SCMProviderGenerator) githubProvider(ctx context.Context, github *argop
|
||||
}
|
||||
|
||||
if g.enableGitHubAPIMetrics {
|
||||
return scm_provider.NewGithubAppProviderFor(*auth, github.Organization, github.API, github.AllBranches, httpClient)
|
||||
return scm_provider.NewGithubAppProviderFor(ctx, *auth, github.Organization, github.API, github.AllBranches, httpClient)
|
||||
}
|
||||
return scm_provider.NewGithubAppProviderFor(*auth, github.Organization, github.API, github.AllBranches)
|
||||
return scm_provider.NewGithubAppProviderFor(ctx, *auth, github.Organization, github.API, github.AllBranches)
|
||||
}
|
||||
|
||||
token, err := utils.GetSecretRef(ctx, g.client, github.TokenRef, applicationSetInfo.Namespace, g.tokenRefStrictMode)
|
||||
|
||||
@@ -8,15 +8,16 @@ import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/applicationset/services"
|
||||
"github.com/argoproj/argo-cd/v3/util/settings"
|
||||
)
|
||||
|
||||
func GetGenerators(ctx context.Context, c client.Client, k8sClient kubernetes.Interface, controllerNamespace string, argoCDService services.Repos, dynamicClient dynamic.Interface, scmConfig SCMConfig) map[string]Generator {
|
||||
func GetGenerators(ctx context.Context, c client.Client, k8sClient kubernetes.Interface, controllerNamespace string, argoCDService services.Repos, dynamicClient dynamic.Interface, scmConfig SCMConfig, clusterInformer *settings.ClusterInformer) map[string]Generator {
|
||||
terminalGenerators := map[string]Generator{
|
||||
"List": NewListGenerator(),
|
||||
"Clusters": NewClusterGenerator(ctx, c, k8sClient, controllerNamespace),
|
||||
"Clusters": NewClusterGenerator(c, controllerNamespace),
|
||||
"Git": NewGitGenerator(argoCDService, controllerNamespace),
|
||||
"SCMProvider": NewSCMProviderGenerator(c, scmConfig),
|
||||
"ClusterDecisionResource": NewDuckTypeGenerator(ctx, dynamicClient, k8sClient, controllerNamespace),
|
||||
"ClusterDecisionResource": NewDuckTypeGenerator(ctx, dynamicClient, k8sClient, controllerNamespace, clusterInformer),
|
||||
"PullRequest": NewPullRequestGenerator(c, scmConfig),
|
||||
"Plugin": NewPluginGenerator(c, controllerNamespace),
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package generators
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"maps"
|
||||
)
|
||||
|
||||
func appendTemplatedValues(values map[string]string, params map[string]any, useGoTemplate bool, goTemplateOptions []string) error {
|
||||
@@ -26,9 +27,7 @@ func appendTemplatedValues(values map[string]string, params map[string]any, useG
|
||||
}
|
||||
}
|
||||
|
||||
for key, value := range tmp {
|
||||
params[key] = value
|
||||
}
|
||||
maps.Copy(params, tmp)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -151,9 +151,9 @@ spec:
|
||||
func newFakeAppsets(fakeAppsetYAML string) []argoappv1.ApplicationSet {
|
||||
var results []argoappv1.ApplicationSet
|
||||
|
||||
appsetRawYamls := strings.Split(fakeAppsetYAML, "---")
|
||||
appsetRawYamls := strings.SplitSeq(fakeAppsetYAML, "---")
|
||||
|
||||
for _, appsetRawYaml := range appsetRawYamls {
|
||||
for appsetRawYaml := range appsetRawYamls {
|
||||
var appset argoappv1.ApplicationSet
|
||||
err := yaml.Unmarshal([]byte(appsetRawYaml), &appset)
|
||||
if err != nil {
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package github_app
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
@@ -8,40 +10,65 @@ import (
|
||||
"github.com/google/go-github/v69/github"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/applicationset/services/github_app_auth"
|
||||
appsetutils "github.com/argoproj/argo-cd/v3/applicationset/utils"
|
||||
"github.com/argoproj/argo-cd/v3/util/git"
|
||||
)
|
||||
|
||||
func getOptionalHTTPClientAndTransport(optionalHTTPClient ...*http.Client) (*http.Client, http.RoundTripper) {
|
||||
httpClient := appsetutils.GetOptionalHTTPClient(optionalHTTPClient...)
|
||||
if len(optionalHTTPClient) > 0 && optionalHTTPClient[0] != nil && optionalHTTPClient[0].Transport != nil {
|
||||
// will either use the provided custom httpClient and it's transport
|
||||
return httpClient, optionalHTTPClient[0].Transport
|
||||
// getInstallationClient creates a new GitHub client with the specified installation ID.
|
||||
// It also returns a ghinstallation.Transport, which can be used for git requests.
|
||||
func getInstallationClient(g github_app_auth.Authentication, url string, httpClient ...*http.Client) (*github.Client, error) {
|
||||
if g.InstallationId <= 0 {
|
||||
return nil, errors.New("installation ID is required for github")
|
||||
}
|
||||
// or the default httpClient and transport
|
||||
return httpClient, http.DefaultTransport
|
||||
}
|
||||
|
||||
// Client builds a github client for the given app authentication.
|
||||
func Client(g github_app_auth.Authentication, url string, optionalHTTPClient ...*http.Client) (*github.Client, error) {
|
||||
httpClient, transport := getOptionalHTTPClientAndTransport(optionalHTTPClient...)
|
||||
// Use provided HTTP client's transport or default
|
||||
var transport http.RoundTripper
|
||||
if len(httpClient) > 0 && httpClient[0] != nil && httpClient[0].Transport != nil {
|
||||
transport = httpClient[0].Transport
|
||||
} else {
|
||||
transport = http.DefaultTransport
|
||||
}
|
||||
|
||||
rt, err := ghinstallation.New(transport, g.Id, g.InstallationId, []byte(g.PrivateKey))
|
||||
itr, err := ghinstallation.New(transport, g.Id, g.InstallationId, []byte(g.PrivateKey))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create github app install: %w", err)
|
||||
return nil, fmt.Errorf("failed to create GitHub installation transport: %w", err)
|
||||
}
|
||||
|
||||
if url == "" {
|
||||
url = g.EnterpriseBaseURL
|
||||
}
|
||||
|
||||
var client *github.Client
|
||||
httpClient.Transport = rt
|
||||
if url == "" {
|
||||
client = github.NewClient(httpClient)
|
||||
} else {
|
||||
rt.BaseURL = url
|
||||
client, err = github.NewClient(httpClient).WithEnterpriseURLs(url, url)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create github enterprise client: %w", err)
|
||||
}
|
||||
client = github.NewClient(&http.Client{Transport: itr})
|
||||
return client, nil
|
||||
}
|
||||
|
||||
itr.BaseURL = url
|
||||
client, err = github.NewClient(&http.Client{Transport: itr}).WithEnterpriseURLs(url, url)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create GitHub enterprise client: %w", err)
|
||||
}
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// Client builds a github client for the given app authentication.
|
||||
func Client(ctx context.Context, g github_app_auth.Authentication, url, org string, optionalHTTPClient ...*http.Client) (*github.Client, error) {
|
||||
if url == "" {
|
||||
url = g.EnterpriseBaseURL
|
||||
}
|
||||
|
||||
// If an installation ID is already provided, use it directly.
|
||||
if g.InstallationId != 0 {
|
||||
return getInstallationClient(g, url, optionalHTTPClient...)
|
||||
}
|
||||
|
||||
// Auto-discover installation ID using shared utility
|
||||
// Pass optional HTTP client for metrics tracking
|
||||
installationId, err := git.DiscoverGitHubAppInstallationID(ctx, g.Id, g.PrivateKey, url, org, optionalHTTPClient...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
g.InstallationId = installationId
|
||||
return getInstallationClient(g, url, optionalHTTPClient...)
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package pull_request
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/microsoft/azure-devops-go-api/azuredevops/v7"
|
||||
@@ -107,7 +108,7 @@ func (a *AzureDevOpsService) List(ctx context.Context) ([]*PullRequest, error) {
|
||||
|
||||
if *pr.Repository.Name == a.repo {
|
||||
pullRequests = append(pullRequests, &PullRequest{
|
||||
Number: *pr.PullRequestId,
|
||||
Number: int64(*pr.PullRequestId),
|
||||
Title: *pr.Title,
|
||||
Branch: strings.Replace(*pr.SourceRefName, "refs/heads/", "", 1),
|
||||
TargetBranch: strings.Replace(*pr.TargetRefName, "refs/heads/", "", 1),
|
||||
@@ -136,13 +137,7 @@ func convertLabels(tags *[]core.WebApiTagDefinition) []string {
|
||||
// containAzureDevOpsLabels returns true if gotLabels contains expectedLabels
|
||||
func containAzureDevOpsLabels(expectedLabels []string, gotLabels []string) bool {
|
||||
for _, expected := range expectedLabels {
|
||||
found := false
|
||||
for _, got := range gotLabels {
|
||||
if expected == got {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
found := slices.Contains(gotLabels, expected)
|
||||
if !found {
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -87,7 +87,7 @@ func TestListPullRequest(t *testing.T) {
|
||||
assert.Equal(t, "main", list[0].TargetBranch)
|
||||
assert.Equal(t, prHeadSha, list[0].HeadSHA)
|
||||
assert.Equal(t, "feat(123)", list[0].Title)
|
||||
assert.Equal(t, prID, list[0].Number)
|
||||
assert.Equal(t, int64(prID), list[0].Number)
|
||||
assert.Equal(t, uniqueName, list[0].Author)
|
||||
}
|
||||
|
||||
|
||||
@@ -81,7 +81,10 @@ func NewBitbucketCloudServiceBasicAuth(baseURL, username, password, owner, repos
|
||||
return nil, fmt.Errorf("error parsing base url of %s for %s/%s: %w", baseURL, owner, repositorySlug, err)
|
||||
}
|
||||
|
||||
bitbucketClient := bitbucket.NewBasicAuth(username, password)
|
||||
bitbucketClient, err := bitbucket.NewBasicAuth(username, password)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating BitBucket Cloud client with basic auth: %w", err)
|
||||
}
|
||||
bitbucketClient.SetApiBaseURL(*url)
|
||||
|
||||
return &BitbucketCloudService{
|
||||
@@ -97,14 +100,13 @@ func NewBitbucketCloudServiceBearerToken(baseURL, bearerToken, owner, repository
|
||||
return nil, fmt.Errorf("error parsing base url of %s for %s/%s: %w", baseURL, owner, repositorySlug, err)
|
||||
}
|
||||
|
||||
bitbucketClient := bitbucket.NewOAuthbearerToken(bearerToken)
|
||||
bitbucketClient, err := bitbucket.NewOAuthbearerToken(bearerToken)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating BitBucket Cloud client with oauth bearer token: %w", err)
|
||||
}
|
||||
bitbucketClient.SetApiBaseURL(*url)
|
||||
|
||||
return &BitbucketCloudService{
|
||||
client: bitbucketClient,
|
||||
owner: owner,
|
||||
repositorySlug: repositorySlug,
|
||||
}, nil
|
||||
return &BitbucketCloudService{client: bitbucketClient, owner: owner, repositorySlug: repositorySlug}, nil
|
||||
}
|
||||
|
||||
func NewBitbucketCloudServiceNoAuth(baseURL, owner, repositorySlug string) (PullRequestService, error) {
|
||||
@@ -154,7 +156,7 @@ func (b *BitbucketCloudService) List(_ context.Context) ([]*PullRequest, error)
|
||||
|
||||
for _, pull := range pulls {
|
||||
pullRequests = append(pullRequests, &PullRequest{
|
||||
Number: pull.ID,
|
||||
Number: int64(pull.ID),
|
||||
Title: pull.Title,
|
||||
Branch: pull.Source.Branch.Name,
|
||||
TargetBranch: pull.Destination.Branch.Name,
|
||||
|
||||
@@ -89,7 +89,7 @@ func TestListPullRequestBearerTokenCloud(t *testing.T) {
|
||||
pullRequests, err := ListPullRequests(t.Context(), svc, []v1alpha1.PullRequestGeneratorFilter{})
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, pullRequests, 1)
|
||||
assert.Equal(t, 101, pullRequests[0].Number)
|
||||
assert.Equal(t, int64(101), pullRequests[0].Number)
|
||||
assert.Equal(t, "feat(foo-bar)", pullRequests[0].Title)
|
||||
assert.Equal(t, "feature/foo-bar", pullRequests[0].Branch)
|
||||
assert.Equal(t, "1a8dd249c04a", pullRequests[0].HeadSHA)
|
||||
@@ -107,7 +107,7 @@ func TestListPullRequestNoAuthCloud(t *testing.T) {
|
||||
pullRequests, err := ListPullRequests(t.Context(), svc, []v1alpha1.PullRequestGeneratorFilter{})
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, pullRequests, 1)
|
||||
assert.Equal(t, 101, pullRequests[0].Number)
|
||||
assert.Equal(t, int64(101), pullRequests[0].Number)
|
||||
assert.Equal(t, "feat(foo-bar)", pullRequests[0].Title)
|
||||
assert.Equal(t, "feature/foo-bar", pullRequests[0].Branch)
|
||||
assert.Equal(t, "1a8dd249c04a", pullRequests[0].HeadSHA)
|
||||
@@ -125,7 +125,7 @@ func TestListPullRequestBasicAuthCloud(t *testing.T) {
|
||||
pullRequests, err := ListPullRequests(t.Context(), svc, []v1alpha1.PullRequestGeneratorFilter{})
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, pullRequests, 1)
|
||||
assert.Equal(t, 101, pullRequests[0].Number)
|
||||
assert.Equal(t, int64(101), pullRequests[0].Number)
|
||||
assert.Equal(t, "feat(foo-bar)", pullRequests[0].Title)
|
||||
assert.Equal(t, "feature/foo-bar", pullRequests[0].Branch)
|
||||
assert.Equal(t, "1a8dd249c04a", pullRequests[0].HeadSHA)
|
||||
|
||||
@@ -82,7 +82,7 @@ func (b *BitbucketService) List(_ context.Context) ([]*PullRequest, error) {
|
||||
|
||||
for _, pull := range pulls {
|
||||
pullRequests = append(pullRequests, &PullRequest{
|
||||
Number: pull.ID,
|
||||
Number: int64(pull.ID),
|
||||
Title: pull.Title,
|
||||
Branch: pull.FromRef.DisplayID, // ID: refs/heads/main DisplayID: main
|
||||
TargetBranch: pull.ToRef.DisplayID,
|
||||
|
||||
@@ -68,7 +68,7 @@ func TestListPullRequestNoAuth(t *testing.T) {
|
||||
pullRequests, err := ListPullRequests(t.Context(), svc, []v1alpha1.PullRequestGeneratorFilter{})
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, pullRequests, 1)
|
||||
assert.Equal(t, 101, pullRequests[0].Number)
|
||||
assert.Equal(t, int64(101), pullRequests[0].Number)
|
||||
assert.Equal(t, "feat(ABC) : 123", pullRequests[0].Title)
|
||||
assert.Equal(t, "feature-ABC-123", pullRequests[0].Branch)
|
||||
assert.Equal(t, "master", pullRequests[0].TargetBranch)
|
||||
@@ -211,7 +211,7 @@ func TestListPullRequestBasicAuth(t *testing.T) {
|
||||
pullRequests, err := ListPullRequests(t.Context(), svc, []v1alpha1.PullRequestGeneratorFilter{})
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, pullRequests, 1)
|
||||
assert.Equal(t, 101, pullRequests[0].Number)
|
||||
assert.Equal(t, int64(101), pullRequests[0].Number)
|
||||
assert.Equal(t, "feature-ABC-123", pullRequests[0].Branch)
|
||||
assert.Equal(t, "cb3cf2e4d1517c83e720d2585b9402dbef71f992", pullRequests[0].HeadSHA)
|
||||
}
|
||||
@@ -228,7 +228,7 @@ func TestListPullRequestBearerAuth(t *testing.T) {
|
||||
pullRequests, err := ListPullRequests(t.Context(), svc, []v1alpha1.PullRequestGeneratorFilter{})
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, pullRequests, 1)
|
||||
assert.Equal(t, 101, pullRequests[0].Number)
|
||||
assert.Equal(t, int64(101), pullRequests[0].Number)
|
||||
assert.Equal(t, "feat(ABC) : 123", pullRequests[0].Title)
|
||||
assert.Equal(t, "feature-ABC-123", pullRequests[0].Branch)
|
||||
assert.Equal(t, "cb3cf2e4d1517c83e720d2585b9402dbef71f992", pullRequests[0].HeadSHA)
|
||||
@@ -268,7 +268,6 @@ func TestListPullRequestTLS(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
test := test
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
defaultHandler(t)(w, r)
|
||||
|
||||
@@ -68,7 +68,7 @@ func (g *GiteaService) List(ctx context.Context) ([]*PullRequest, error) {
|
||||
continue
|
||||
}
|
||||
list = append(list, &PullRequest{
|
||||
Number: int(pr.Index),
|
||||
Number: int64(pr.Index),
|
||||
Title: pr.Title,
|
||||
Branch: pr.Head.Ref,
|
||||
TargetBranch: pr.Base.Ref,
|
||||
@@ -83,7 +83,7 @@ func (g *GiteaService) List(ctx context.Context) ([]*PullRequest, error) {
|
||||
// containLabels returns true if gotLabels contains expectedLabels
|
||||
func giteaContainLabels(expectedLabels []string, gotLabels []*gitea.Label) bool {
|
||||
gotLabelNamesMap := make(map[string]bool)
|
||||
for i := 0; i < len(gotLabels); i++ {
|
||||
for i := range gotLabels {
|
||||
gotLabelNamesMap[gotLabels[i].Name] = true
|
||||
}
|
||||
for _, expected := range expectedLabels {
|
||||
|
||||
@@ -303,7 +303,7 @@ func TestGiteaList(t *testing.T) {
|
||||
prs, err := host.List(t.Context())
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, prs, 1)
|
||||
assert.Equal(t, 1, prs[0].Number)
|
||||
assert.Equal(t, int64(1), prs[0].Number)
|
||||
assert.Equal(t, "add an empty file", prs[0].Title)
|
||||
assert.Equal(t, "test", prs[0].Branch)
|
||||
assert.Equal(t, "main", prs[0].TargetBranch)
|
||||
|
||||
@@ -76,7 +76,7 @@ func (g *GithubService) List(ctx context.Context) ([]*PullRequest, error) {
|
||||
continue
|
||||
}
|
||||
pullRequests = append(pullRequests, &PullRequest{
|
||||
Number: *pull.Number,
|
||||
Number: int64(*pull.Number),
|
||||
Title: *pull.Title,
|
||||
Branch: *pull.Head.Ref,
|
||||
TargetBranch: *pull.Base.Ref,
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package pull_request
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/applicationset/services/github_app_auth"
|
||||
@@ -8,9 +9,9 @@ import (
|
||||
appsetutils "github.com/argoproj/argo-cd/v3/applicationset/utils"
|
||||
)
|
||||
|
||||
func NewGithubAppService(g github_app_auth.Authentication, url, owner, repo string, labels []string, optionalHTTPClient ...*http.Client) (PullRequestService, error) {
|
||||
func NewGithubAppService(ctx context.Context, g github_app_auth.Authentication, url, owner, repo string, labels []string, optionalHTTPClient ...*http.Client) (PullRequestService, error) {
|
||||
httpClient := appsetutils.GetOptionalHTTPClient(optionalHTTPClient...)
|
||||
client, err := github_app.Client(g, url, httpClient)
|
||||
client, err := github_app.Client(ctx, g, url, owner, httpClient)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -61,11 +61,15 @@ func (g *GitLabService) List(ctx context.Context) ([]*PullRequest, error) {
|
||||
var labelsList gitlab.LabelOptions = g.labels
|
||||
labels = &labelsList
|
||||
}
|
||||
opts := &gitlab.ListProjectMergeRequestsOptions{
|
||||
|
||||
snippetsListOptions := gitlab.ExploreSnippetsOptions{
|
||||
ListOptions: gitlab.ListOptions{
|
||||
PerPage: 100,
|
||||
},
|
||||
Labels: labels,
|
||||
}
|
||||
opts := &gitlab.ListProjectMergeRequestsOptions{
|
||||
ListOptions: snippetsListOptions.ListOptions,
|
||||
Labels: labels,
|
||||
}
|
||||
|
||||
if g.pullRequestState != "" {
|
||||
|
||||
@@ -78,7 +78,7 @@ func TestList(t *testing.T) {
|
||||
prs, err := svc.List(t.Context())
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, prs, 1)
|
||||
assert.Equal(t, 15442, prs[0].Number)
|
||||
assert.Equal(t, int64(15442), prs[0].Number)
|
||||
assert.Equal(t, "Draft: Use structured logging for DB load balancer", prs[0].Title)
|
||||
assert.Equal(t, "use-structured-logging-for-db-load-balancer", prs[0].Branch)
|
||||
assert.Equal(t, "master", prs[0].TargetBranch)
|
||||
@@ -158,7 +158,6 @@ func TestListWithStateTLS(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
test := test
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
|
||||
writeMRListResponse(t, w)
|
||||
|
||||
@@ -7,7 +7,8 @@ import (
|
||||
|
||||
type PullRequest struct {
|
||||
// Number is a number that will be the ID of the pull request.
|
||||
Number int
|
||||
// Gitlab uses int64 for the pull request number.
|
||||
Number int64
|
||||
// Title of the pull request.
|
||||
Title string
|
||||
// Branch is the name of the branch from which the pull request originated.
|
||||
|
||||
@@ -53,8 +53,12 @@ func (c *ExtendedClient) GetContents(repo *Repository, path string) (bool, error
|
||||
var _ SCMProviderService = &BitBucketCloudProvider{}
|
||||
|
||||
func NewBitBucketCloudProvider(owner string, user string, password string, allBranches bool) (*BitBucketCloudProvider, error) {
|
||||
bitbucketClient, err := bitbucket.NewBasicAuth(user, password)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating BitBucket Cloud client with basic auth: %w", err)
|
||||
}
|
||||
client := &ExtendedClient{
|
||||
bitbucket.NewBasicAuth(user, password),
|
||||
bitbucketClient,
|
||||
user,
|
||||
password,
|
||||
owner,
|
||||
|
||||
@@ -445,7 +445,6 @@ func TestListReposTLS(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
test := test
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
defaultHandler(t)(w, r)
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package scm_provider
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/applicationset/services/github_app_auth"
|
||||
@@ -8,9 +9,9 @@ import (
|
||||
appsetutils "github.com/argoproj/argo-cd/v3/applicationset/utils"
|
||||
)
|
||||
|
||||
func NewGithubAppProviderFor(g github_app_auth.Authentication, organization string, url string, allBranches bool, optionalHTTPClient ...*http.Client) (*GithubProvider, error) {
|
||||
func NewGithubAppProviderFor(ctx context.Context, g github_app_auth.Authentication, organization string, url string, allBranches bool, optionalHTTPClient ...*http.Client) (*GithubProvider, error) {
|
||||
httpClient := appsetutils.GetOptionalHTTPClient(optionalHTTPClient...)
|
||||
client, err := github_app.Client(g, url, httpClient)
|
||||
client, err := github_app.Client(ctx, g, url, organization, httpClient)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -76,8 +76,13 @@ func (g *GitlabProvider) GetBranches(ctx context.Context, repo *Repository) ([]*
|
||||
}
|
||||
|
||||
func (g *GitlabProvider) ListRepos(_ context.Context, cloneProtocol string) ([]*Repository, error) {
|
||||
snippetsListOptions := gitlab.ExploreSnippetsOptions{
|
||||
ListOptions: gitlab.ListOptions{
|
||||
PerPage: 100,
|
||||
},
|
||||
}
|
||||
opt := &gitlab.ListGroupProjectsOptions{
|
||||
ListOptions: gitlab.ListOptions{PerPage: 100},
|
||||
ListOptions: snippetsListOptions.ListOptions,
|
||||
IncludeSubGroups: &g.includeSubgroups,
|
||||
WithShared: &g.includeSharedProjects,
|
||||
Topic: &g.topic,
|
||||
@@ -173,8 +178,13 @@ func (g *GitlabProvider) listBranches(_ context.Context, repo *Repository) ([]gi
|
||||
return branches, nil
|
||||
}
|
||||
// Otherwise, scrape the ListBranches API.
|
||||
snippetsListOptions := gitlab.ExploreSnippetsOptions{
|
||||
ListOptions: gitlab.ListOptions{
|
||||
PerPage: 100,
|
||||
},
|
||||
}
|
||||
opt := &gitlab.ListBranchesOptions{
|
||||
ListOptions: gitlab.ListOptions{PerPage: 100},
|
||||
ListOptions: snippetsListOptions.ListOptions,
|
||||
}
|
||||
for {
|
||||
gitlabBranches, resp, err := g.client.Branches.ListBranches(repo.RepositoryId, opt)
|
||||
|
||||
@@ -1301,7 +1301,6 @@ func TestGetBranchesTLS(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
test := test
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
gitlabMockHandler(t)(w, r)
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
|
||||
@@ -58,13 +59,7 @@ func matchFilter(ctx context.Context, provider SCMProviderService, repo *Reposit
|
||||
}
|
||||
|
||||
if filter.LabelMatch != nil {
|
||||
found := false
|
||||
for _, label := range repo.Labels {
|
||||
if filter.LabelMatch.MatchString(label) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
found := slices.ContainsFunc(repo.Labels, filter.LabelMatch.MatchString)
|
||||
if !found {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
@@ -1,15 +1,12 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/common"
|
||||
appv1 "github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v3/util/db"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
appv1 "github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v3/util/settings"
|
||||
)
|
||||
|
||||
// ClusterSpecifier contains only the name and server URL of a cluster. We use this struct to avoid partially-populating
|
||||
@@ -19,42 +16,44 @@ type ClusterSpecifier struct {
|
||||
Server string
|
||||
}
|
||||
|
||||
func ListClusters(ctx context.Context, clientset kubernetes.Interface, namespace string) ([]ClusterSpecifier, error) {
|
||||
clusterSecretsList, err := clientset.CoreV1().Secrets(namespace).List(ctx,
|
||||
metav1.ListOptions{LabelSelector: common.LabelKeySecretType + "=" + common.LabelValueSecretTypeCluster})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if clusterSecretsList == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
clusterSecrets := clusterSecretsList.Items
|
||||
|
||||
clusterList := make([]ClusterSpecifier, len(clusterSecrets))
|
||||
|
||||
hasInClusterCredentials := false
|
||||
for i, clusterSecret := range clusterSecrets {
|
||||
cluster, err := db.SecretToCluster(&clusterSecret)
|
||||
if err != nil || cluster == nil {
|
||||
return nil, fmt.Errorf("unable to convert cluster secret to cluster object '%s': %w", clusterSecret.Name, err)
|
||||
// SecretsContainInClusterCredentials checks if any of the provided secrets represent the in-cluster configuration.
|
||||
func SecretsContainInClusterCredentials(secrets []corev1.Secret) bool {
|
||||
for _, secret := range secrets {
|
||||
if string(secret.Data["server"]) == appv1.KubernetesInternalAPIServerAddr {
|
||||
return true
|
||||
}
|
||||
clusterList[i] = ClusterSpecifier{
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ListClusters returns a list of cluster specifiers using the ClusterInformer.
|
||||
func ListClusters(clusterInformer *settings.ClusterInformer) ([]ClusterSpecifier, error) {
|
||||
clusters, err := clusterInformer.ListClusters()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error listing clusters: %w", err)
|
||||
}
|
||||
// len of clusters +1 for the in cluster secret
|
||||
clusterList := make([]ClusterSpecifier, 0, len(clusters)+1)
|
||||
hasInCluster := false
|
||||
|
||||
for _, cluster := range clusters {
|
||||
clusterList = append(clusterList, ClusterSpecifier{
|
||||
Name: cluster.Name,
|
||||
Server: cluster.Server,
|
||||
}
|
||||
})
|
||||
if cluster.Server == appv1.KubernetesInternalAPIServerAddr {
|
||||
hasInClusterCredentials = true
|
||||
hasInCluster = true
|
||||
}
|
||||
}
|
||||
if !hasInClusterCredentials {
|
||||
|
||||
if !hasInCluster {
|
||||
// There was no secret for the in-cluster config, so we add it here. We don't fully-populate the Cluster struct,
|
||||
// since only the name and server fields are used by the generator.
|
||||
clusterList = append(clusterList, ClusterSpecifier{
|
||||
Name: "in-cluster",
|
||||
Name: appv1.KubernetesInClusterName,
|
||||
Server: appv1.KubernetesInternalAPIServerAddr,
|
||||
})
|
||||
}
|
||||
|
||||
return clusterList, nil
|
||||
}
|
||||
|
||||
@@ -216,7 +216,6 @@ spec:
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
foundApp := v1alpha1.Application{TypeMeta: appMeta}
|
||||
|
||||
@@ -2,6 +2,7 @@ package utils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"slices"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -207,12 +208,7 @@ type Requirement struct {
|
||||
}
|
||||
|
||||
func (r *Requirement) hasValue(value string) bool {
|
||||
for i := range r.strValues {
|
||||
if r.strValues[i] == value {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
return slices.Contains(r.strValues, value)
|
||||
}
|
||||
|
||||
func (r *Requirement) Matches(ls labels.Labels) bool {
|
||||
|
||||
@@ -30,6 +30,10 @@ import (
|
||||
|
||||
var sprigFuncMap = sprig.GenericFuncMap() // a singleton for better performance
|
||||
|
||||
// baseTemplate is a pre-initialized template with all sprig functions loaded.
|
||||
// Cloning this is much faster than calling Funcs() on a new template each time.
|
||||
var baseTemplate *template.Template
|
||||
|
||||
func init() {
|
||||
// Avoid allowing the user to learn things about the environment.
|
||||
delete(sprigFuncMap, "env")
|
||||
@@ -40,6 +44,10 @@ func init() {
|
||||
sprigFuncMap["toYaml"] = toYAML
|
||||
sprigFuncMap["fromYaml"] = fromYAML
|
||||
sprigFuncMap["fromYamlArray"] = fromYAMLArray
|
||||
|
||||
// Initialize the base template with sprig functions once at startup.
|
||||
// This must be done after modifying sprigFuncMap above.
|
||||
baseTemplate = template.New("base").Funcs(sprigFuncMap)
|
||||
}
|
||||
|
||||
type Renderer interface {
|
||||
@@ -309,16 +317,21 @@ var isTemplatedRegex = regexp.MustCompile(".*{{.*}}.*")
|
||||
// remaining in the substituted template.
|
||||
func (r *Render) Replace(tmpl string, replaceMap map[string]any, useGoTemplate bool, goTemplateOptions []string) (string, error) {
|
||||
if useGoTemplate {
|
||||
template, err := template.New("").Funcs(sprigFuncMap).Parse(tmpl)
|
||||
// Clone the base template which has sprig funcs pre-loaded
|
||||
cloned, err := baseTemplate.Clone()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to clone base template: %w", err)
|
||||
}
|
||||
for _, option := range goTemplateOptions {
|
||||
cloned = cloned.Option(option)
|
||||
}
|
||||
parsed, err := cloned.Parse(tmpl)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to parse template %s: %w", tmpl, err)
|
||||
}
|
||||
for _, option := range goTemplateOptions {
|
||||
template = template.Option(option)
|
||||
}
|
||||
|
||||
var replacedTmplBuffer bytes.Buffer
|
||||
if err = template.Execute(&replacedTmplBuffer, replaceMap); err != nil {
|
||||
if err = parsed.Execute(&replacedTmplBuffer, replaceMap); err != nil {
|
||||
return "", fmt.Errorf("failed to execute go template %s: %w", tmpl, err)
|
||||
}
|
||||
|
||||
@@ -375,8 +388,7 @@ func invalidGenerators(applicationSetInfo *argoappsv1.ApplicationSet) (bool, map
|
||||
for index, generator := range applicationSetInfo.Spec.Generators {
|
||||
v := reflect.Indirect(reflect.ValueOf(generator))
|
||||
found := false
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
field := v.Field(i)
|
||||
for _, field := range v.Fields() {
|
||||
if !field.CanInterface() {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -514,7 +514,7 @@ func TestRenderTemplateParamsGoTemplate(t *testing.T) {
|
||||
params: map[string]any{
|
||||
"data": `a data string`,
|
||||
},
|
||||
errorMessage: `failed to parse template {{functiondoesnotexist}}: template: :1: function "functiondoesnotexist" not defined`,
|
||||
errorMessage: `failed to parse template {{functiondoesnotexist}}: template: base:1: function "functiondoesnotexist" not defined`,
|
||||
},
|
||||
{
|
||||
name: "Test template error",
|
||||
@@ -523,7 +523,7 @@ func TestRenderTemplateParamsGoTemplate(t *testing.T) {
|
||||
params: map[string]any{
|
||||
"data": `a data string`,
|
||||
},
|
||||
errorMessage: `failed to execute go template {{.data.test}}: template: :1:7: executing "" at <.data.test>: can't evaluate field test in type interface {}`,
|
||||
errorMessage: `failed to execute go template {{.data.test}}: template: base:1:7: executing "base" at <.data.test>: can't evaluate field test in type interface {}`,
|
||||
},
|
||||
{
|
||||
name: "lookup missing value with missingkey=default",
|
||||
@@ -543,7 +543,7 @@ func TestRenderTemplateParamsGoTemplate(t *testing.T) {
|
||||
"unused": "this is not used",
|
||||
},
|
||||
templateOptions: []string{"missingkey=error"},
|
||||
errorMessage: `failed to execute go template --> {{.doesnotexist}} <--: template: :1:6: executing "" at <.doesnotexist>: map has no entry for key "doesnotexist"`,
|
||||
errorMessage: `failed to execute go template --> {{.doesnotexist}} <--: template: base:1:6: executing "base" at <.doesnotexist>: map has no entry for key "doesnotexist"`,
|
||||
},
|
||||
{
|
||||
name: "toYaml",
|
||||
@@ -563,7 +563,7 @@ func TestRenderTemplateParamsGoTemplate(t *testing.T) {
|
||||
name: "toYaml Error",
|
||||
fieldVal: `{{ toYaml . | indent 2 }}`,
|
||||
expectedVal: " foo:\n bar:\n bool: true\n number: 2\n str: Hello world",
|
||||
errorMessage: "failed to execute go template {{ toYaml . | indent 2 }}: template: :1:3: executing \"\" at <toYaml .>: error calling toYaml: error marshaling into JSON: json: unsupported type: func(*string)",
|
||||
errorMessage: "failed to execute go template {{ toYaml . | indent 2 }}: template: base:1:3: executing \"base\" at <toYaml .>: error calling toYaml: error marshaling into JSON: json: unsupported type: func(*string)",
|
||||
params: map[string]any{
|
||||
"foo": func(_ *string) {
|
||||
},
|
||||
@@ -581,7 +581,7 @@ func TestRenderTemplateParamsGoTemplate(t *testing.T) {
|
||||
name: "fromYaml error",
|
||||
fieldVal: `{{ get (fromYaml .value) "hello" }}`,
|
||||
expectedVal: "world",
|
||||
errorMessage: "failed to execute go template {{ get (fromYaml .value) \"hello\" }}: template: :1:8: executing \"\" at <fromYaml .value>: error calling fromYaml: error unmarshaling JSON: while decoding JSON: json: cannot unmarshal string into Go value of type map[string]interface {}",
|
||||
errorMessage: "failed to execute go template {{ get (fromYaml .value) \"hello\" }}: template: base:1:8: executing \"base\" at <fromYaml .value>: error calling fromYaml: error unmarshaling JSON: while decoding JSON: json: cannot unmarshal string into Go value of type map[string]interface {}",
|
||||
params: map[string]any{
|
||||
"value": "non\n compliant\n yaml",
|
||||
},
|
||||
@@ -598,7 +598,7 @@ func TestRenderTemplateParamsGoTemplate(t *testing.T) {
|
||||
name: "fromYamlArray error",
|
||||
fieldVal: `{{ fromYamlArray .value | last }}`,
|
||||
expectedVal: "bonjour tout le monde",
|
||||
errorMessage: "failed to execute go template {{ fromYamlArray .value | last }}: template: :1:3: executing \"\" at <fromYamlArray .value>: error calling fromYamlArray: error unmarshaling JSON: while decoding JSON: json: cannot unmarshal string into Go value of type []interface {}",
|
||||
errorMessage: "failed to execute go template {{ fromYamlArray .value | last }}: template: base:1:3: executing \"base\" at <fromYamlArray .value>: error calling fromYamlArray: error unmarshaling JSON: while decoding JSON: json: cannot unmarshal string into Go value of type []interface {}",
|
||||
params: map[string]any{
|
||||
"value": "non\n compliant\n yaml",
|
||||
},
|
||||
|
||||
@@ -107,10 +107,8 @@ func NewWebhookHandler(webhookParallelism int, argocdSettingsMgr *argosettings.S
|
||||
|
||||
func (h *WebhookHandler) startWorkerPool(webhookParallelism int) {
|
||||
compLog := log.WithField("component", "applicationset-webhook")
|
||||
for i := 0; i < webhookParallelism; i++ {
|
||||
h.Add(1)
|
||||
go func() {
|
||||
defer h.Done()
|
||||
for range webhookParallelism {
|
||||
h.Go(func() {
|
||||
for {
|
||||
payload, ok := <-h.queue
|
||||
if !ok {
|
||||
@@ -118,7 +116,7 @@ func (h *WebhookHandler) startWorkerPool(webhookParallelism int) {
|
||||
}
|
||||
guard.RecoverAndLog(func() { h.HandleEvent(payload) }, compLog, panicMsgAppSet)
|
||||
}
|
||||
}()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -609,7 +609,7 @@ func fakeAppWithMatrixAndNestedGitGenerator(name, namespace, repo string) *v1alp
|
||||
},
|
||||
{
|
||||
Matrix: &apiextensionsv1.JSON{
|
||||
Raw: []byte(fmt.Sprintf(`{
|
||||
Raw: fmt.Appendf(nil, `{
|
||||
"Generators": [
|
||||
{
|
||||
"List": {
|
||||
@@ -626,7 +626,7 @@ func fakeAppWithMatrixAndNestedGitGenerator(name, namespace, repo string) *v1alp
|
||||
}
|
||||
}
|
||||
]
|
||||
}`, repo)),
|
||||
}`, repo),
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -707,7 +707,7 @@ func fakeAppWithMergeAndNestedGitGenerator(name, namespace, repo string) *v1alph
|
||||
},
|
||||
{
|
||||
Merge: &apiextensionsv1.JSON{
|
||||
Raw: []byte(fmt.Sprintf(`{
|
||||
Raw: fmt.Appendf(nil, `{
|
||||
"MergeKeys": ["server"],
|
||||
"Generators": [
|
||||
{
|
||||
@@ -719,7 +719,7 @@ func fakeAppWithMergeAndNestedGitGenerator(name, namespace, repo string) *v1alph
|
||||
}
|
||||
}
|
||||
]
|
||||
}`, repo)),
|
||||
}`, repo),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
79
assets/swagger.json
generated
79
assets/swagger.json
generated
@@ -2265,6 +2265,44 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/v1/applicationsets/{name}/events": {
|
||||
"get": {
|
||||
"tags": [
|
||||
"ApplicationSetService"
|
||||
],
|
||||
"summary": "ListResourceEvents returns a list of event resources",
|
||||
"operationId": "ApplicationSetService_ListResourceEvents",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "the applicationsets's name",
|
||||
"name": "name",
|
||||
"in": "path",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"description": "The application set namespace. Default empty is argocd control plane namespace.",
|
||||
"name": "appsetNamespace",
|
||||
"in": "query"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1EventList"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/runtimeError"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/v1/applicationsets/{name}/resource-tree": {
|
||||
"get": {
|
||||
"tags": [
|
||||
@@ -6829,14 +6867,14 @@
|
||||
"type": "array",
|
||||
"title": "ClusterResourceBlacklist contains list of blacklisted cluster level resources",
|
||||
"items": {
|
||||
"$ref": "#/definitions/v1GroupKind"
|
||||
"$ref": "#/definitions/v1alpha1ClusterResourceRestrictionItem"
|
||||
}
|
||||
},
|
||||
"clusterResourceWhitelist": {
|
||||
"type": "array",
|
||||
"title": "ClusterResourceWhitelist contains list of whitelisted cluster level resources",
|
||||
"items": {
|
||||
"$ref": "#/definitions/v1GroupKind"
|
||||
"$ref": "#/definitions/v1alpha1ClusterResourceRestrictionItem"
|
||||
}
|
||||
},
|
||||
"description": {
|
||||
@@ -7050,7 +7088,7 @@
|
||||
},
|
||||
"v1alpha1ApplicationSet": {
|
||||
"type": "object",
|
||||
"title": "ApplicationSet is a set of Application resources\n+genclient\n+genclient:noStatus\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object\n+kubebuilder:resource:path=applicationsets,shortName=appset;appsets\n+kubebuilder:subresource:status",
|
||||
"title": "ApplicationSet is a set of Application resources.\n+genclient\n+genclient:noStatus\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object\n+kubebuilder:resource:path=applicationsets,shortName=appset;appsets\n+kubebuilder:subresource:status",
|
||||
"properties": {
|
||||
"metadata": {
|
||||
"$ref": "#/definitions/v1ObjectMeta"
|
||||
@@ -7261,7 +7299,7 @@
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"applyNestedSelectors": {
|
||||
"description": "ApplyNestedSelectors enables selectors defined within the generators of two level-nested matrix or merge generators\nDeprecated: This field is ignored, and the behavior is always enabled. The field will be removed in a future\nversion of the ApplicationSet CRD.",
|
||||
"description": "ApplyNestedSelectors enables selectors defined within the generators of two level-nested matrix or merge generators.\n\nDeprecated: This field is ignored, and the behavior is always enabled. The field will be removed in a future\nversion of the ApplicationSet CRD.",
|
||||
"type": "boolean"
|
||||
},
|
||||
"generators": {
|
||||
@@ -7319,6 +7357,9 @@
|
||||
"$ref": "#/definitions/v1alpha1ApplicationSetCondition"
|
||||
}
|
||||
},
|
||||
"health": {
|
||||
"$ref": "#/definitions/v1alpha1HealthStatus"
|
||||
},
|
||||
"resources": {
|
||||
"description": "Resources is a list of Applications resources managed by this application set.",
|
||||
"type": "array",
|
||||
@@ -8164,6 +8205,22 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1ClusterResourceRestrictionItem": {
|
||||
"type": "object",
|
||||
"title": "ClusterResourceRestrictionItem is a cluster resource that is restricted by the project's whitelist or blacklist",
|
||||
"properties": {
|
||||
"group": {
|
||||
"type": "string"
|
||||
},
|
||||
"kind": {
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"description": "Name is the name of the restricted resource. Glob patterns using Go's filepath.Match syntax are supported.\nUnlike the group and kind fields, if no name is specified, all resources of the specified group/kind are matched.",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1Command": {
|
||||
"type": "object",
|
||||
"title": "Command holds binary path and arguments list",
|
||||
@@ -8289,10 +8346,22 @@
|
||||
"description": "DrySource specifies a location for dry \"don't repeat yourself\" manifest source information.",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"directory": {
|
||||
"$ref": "#/definitions/v1alpha1ApplicationSourceDirectory"
|
||||
},
|
||||
"helm": {
|
||||
"$ref": "#/definitions/v1alpha1ApplicationSourceHelm"
|
||||
},
|
||||
"kustomize": {
|
||||
"$ref": "#/definitions/v1alpha1ApplicationSourceKustomize"
|
||||
},
|
||||
"path": {
|
||||
"type": "string",
|
||||
"title": "Path is a directory path within the Git repository where the manifests are located"
|
||||
},
|
||||
"plugin": {
|
||||
"$ref": "#/definitions/v1alpha1ApplicationSourcePlugin"
|
||||
},
|
||||
"repoURL": {
|
||||
"type": "string",
|
||||
"title": "RepoURL is the URL to the git repository that contains the application manifests"
|
||||
@@ -9437,7 +9506,7 @@
|
||||
"title": "TLSClientCertKey specifies the TLS client cert key for authenticating at the repo server"
|
||||
},
|
||||
"type": {
|
||||
"description": "Type specifies the type of the repoCreds. Can be either \"git\" or \"helm. \"git\" is assumed if empty or absent.",
|
||||
"description": "Type specifies the type of the repoCreds. Can be either \"git\", \"helm\" or \"oci\". \"git\" is assumed if empty or absent.",
|
||||
"type": "string"
|
||||
},
|
||||
"url": {
|
||||
|
||||
@@ -41,8 +41,6 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
// CLIName is the name of the CLI
|
||||
cliName = common.ApplicationController
|
||||
// Default time in seconds for application resync period
|
||||
defaultAppResyncPeriod = 120
|
||||
// Default time in seconds for application resync period jitter
|
||||
@@ -99,7 +97,7 @@ func NewCommand() *cobra.Command {
|
||||
hydratorEnabled bool
|
||||
)
|
||||
command := cobra.Command{
|
||||
Use: cliName,
|
||||
Use: common.CommandApplicationController,
|
||||
Short: "Run ArgoCD Application Controller",
|
||||
Long: "ArgoCD application controller is a Kubernetes controller that continuously monitors running applications and compares the current, live state against the desired target state (as specified in the repo). This command runs Application Controller in the foreground. It can be configured by following options.",
|
||||
DisableAutoGenTag: true,
|
||||
@@ -202,7 +200,6 @@ func NewCommand() *cobra.Command {
|
||||
time.Duration(appResyncJitter)*time.Second,
|
||||
time.Duration(selfHealTimeoutSeconds)*time.Second,
|
||||
selfHealBackoff,
|
||||
time.Duration(selfHealBackoffCooldownSeconds)*time.Second,
|
||||
time.Duration(syncTimeout)*time.Second,
|
||||
time.Duration(repoErrorGracePeriod)*time.Second,
|
||||
metricsPort,
|
||||
@@ -275,6 +272,7 @@ func NewCommand() *cobra.Command {
|
||||
command.Flags().IntVar(&selfHealBackoffFactor, "self-heal-backoff-factor", env.ParseNumFromEnv("ARGOCD_APPLICATION_CONTROLLER_SELF_HEAL_BACKOFF_FACTOR", 3, 0, math.MaxInt32), "Specifies factor of exponential timeout between application self heal attempts")
|
||||
command.Flags().IntVar(&selfHealBackoffCapSeconds, "self-heal-backoff-cap-seconds", env.ParseNumFromEnv("ARGOCD_APPLICATION_CONTROLLER_SELF_HEAL_BACKOFF_CAP_SECONDS", 300, 0, math.MaxInt32), "Specifies max timeout of exponential backoff between application self heal attempts")
|
||||
command.Flags().IntVar(&selfHealBackoffCooldownSeconds, "self-heal-backoff-cooldown-seconds", env.ParseNumFromEnv("ARGOCD_APPLICATION_CONTROLLER_SELF_HEAL_BACKOFF_COOLDOWN_SECONDS", 330, 0, math.MaxInt32), "Specifies period of time the app needs to stay synced before the self heal backoff can reset")
|
||||
errors.CheckError(command.Flags().MarkDeprecated("self-heal-backoff-cooldown-seconds", "This flag is deprecated and has no effect."))
|
||||
command.Flags().IntVar(&syncTimeout, "sync-timeout", env.ParseNumFromEnv("ARGOCD_APPLICATION_CONTROLLER_SYNC_TIMEOUT", 0, 0, math.MaxInt32), "Specifies the timeout after which a sync would be terminated. 0 means no timeout (default 0).")
|
||||
command.Flags().Int64Var(&kubectlParallelismLimit, "kubectl-parallelism-limit", env.ParseInt64FromEnv("ARGOCD_APPLICATION_CONTROLLER_KUBECTL_PARALLELISM_LIMIT", 20, 0, math.MaxInt64), "Number of allowed concurrent kubectl fork/execs. Any value less than 1 means no limit.")
|
||||
command.Flags().BoolVar(&repoServerPlaintext, "repo-server-plaintext", env.ParseBoolFromEnv("ARGOCD_APPLICATION_CONTROLLER_REPO_SERVER_PLAINTEXT", false), "Disable TLS on connections to repo server")
|
||||
|
||||
@@ -32,6 +32,7 @@ import (
|
||||
"k8s.io/client-go/kubernetes"
|
||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
ctrlcache "sigs.k8s.io/controller-runtime/pkg/cache"
|
||||
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
@@ -48,10 +49,6 @@ import (
|
||||
|
||||
var gitSubmoduleEnabled = env.ParseBoolFromEnv(common.EnvGitSubmoduleEnabled, true)
|
||||
|
||||
const (
|
||||
cliName = common.ApplicationSetController
|
||||
)
|
||||
|
||||
func NewCommand() *cobra.Command {
|
||||
var (
|
||||
clientConfig clientcmd.ClientConfig
|
||||
@@ -86,7 +83,7 @@ func NewCommand() *cobra.Command {
|
||||
_ = clientgoscheme.AddToScheme(scheme)
|
||||
_ = appv1alpha1.AddToScheme(scheme)
|
||||
command := cobra.Command{
|
||||
Use: cliName,
|
||||
Use: common.CommandApplicationSetController,
|
||||
Short: "Starts Argo CD ApplicationSet controller",
|
||||
DisableAutoGenTag: true,
|
||||
RunE: func(c *cobra.Command, _ []string) error {
|
||||
@@ -105,7 +102,12 @@ func NewCommand() *cobra.Command {
|
||||
)
|
||||
|
||||
cli.SetLogFormat(cmdutil.LogFormat)
|
||||
cli.SetLogLevel(cmdutil.LogLevel)
|
||||
|
||||
if debugLog {
|
||||
cli.SetLogLevel("debug")
|
||||
} else {
|
||||
cli.SetLogLevel(cmdutil.LogLevel)
|
||||
}
|
||||
|
||||
ctrl.SetLogger(logutils.NewLogrusLogger(logutils.NewWithCurrentConfig()))
|
||||
|
||||
@@ -188,6 +190,18 @@ func NewCommand() *cobra.Command {
|
||||
argoSettingsMgr := argosettings.NewSettingsManager(ctx, k8sClient, namespace)
|
||||
argoCDDB := db.NewDB(namespace, argoSettingsMgr, k8sClient)
|
||||
|
||||
clusterInformer, err := argosettings.NewClusterInformer(k8sClient, namespace)
|
||||
if err != nil {
|
||||
log.Error(err, "unable to create cluster informer")
|
||||
os.Exit(1)
|
||||
}
|
||||
go clusterInformer.Run(ctx.Done())
|
||||
|
||||
if !cache.WaitForCacheSync(ctx.Done(), clusterInformer.HasSynced) {
|
||||
log.Error("Timed out waiting for cluster cache to sync")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
scmConfig := generators.NewSCMConfig(scmRootCAPath, allowedScmProviders, enableScmProviders, enableGitHubAPIMetrics, github_app.NewAuthCredentials(argoCDDB.(db.RepoCredsDB)), tokenRefStrictMode)
|
||||
|
||||
tlsConfig := apiclient.TLSConfiguration{
|
||||
@@ -207,7 +221,7 @@ func NewCommand() *cobra.Command {
|
||||
repoClientset := apiclient.NewRepoServerClientset(argocdRepoServer, repoServerTimeoutSeconds, tlsConfig)
|
||||
argoCDService := services.NewArgoCDService(argoCDDB, gitSubmoduleEnabled, repoClientset, enableNewGitFileGlobbing)
|
||||
|
||||
topLevelGenerators := generators.GetGenerators(ctx, mgr.GetClient(), k8sClient, namespace, argoCDService, dynamicClient, scmConfig)
|
||||
topLevelGenerators := generators.GetGenerators(ctx, mgr.GetClient(), k8sClient, namespace, argoCDService, dynamicClient, scmConfig, clusterInformer)
|
||||
|
||||
// start a webhook server that listens to incoming webhook payloads
|
||||
webhookHandler, err := webhook.NewWebhookHandler(webhookParallelism, argoSettingsMgr, mgr.GetClient(), topLevelGenerators)
|
||||
@@ -243,6 +257,7 @@ func NewCommand() *cobra.Command {
|
||||
GlobalPreservedLabels: globalPreservedLabels,
|
||||
Metrics: &metrics,
|
||||
MaxResourcesStatusCount: maxResourcesStatusCount,
|
||||
ClusterInformer: clusterInformer,
|
||||
}).SetupWithManager(mgr, enableProgressiveSyncs, maxConcurrentReconciliations); err != nil {
|
||||
log.Error(err, "unable to create controller", "controller", "ApplicationSet")
|
||||
os.Exit(1)
|
||||
@@ -287,7 +302,7 @@ func NewCommand() *cobra.Command {
|
||||
command.Flags().IntVar(&webhookParallelism, "webhook-parallelism-limit", env.ParseNumFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_WEBHOOK_PARALLELISM_LIMIT", 50, 1, 1000), "Number of webhook requests processed concurrently")
|
||||
command.Flags().StringSliceVar(&metricsAplicationsetLabels, "metrics-applicationset-labels", []string{}, "List of Application labels that will be added to the argocd_applicationset_labels metric")
|
||||
command.Flags().BoolVar(&enableGitHubAPIMetrics, "enable-github-api-metrics", env.ParseBoolFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_GITHUB_API_METRICS", false), "Enable GitHub API metrics for generators that use the GitHub API")
|
||||
command.Flags().IntVar(&maxResourcesStatusCount, "max-resources-status-count", env.ParseNumFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_MAX_RESOURCES_STATUS_COUNT", 0, 0, math.MaxInt), "Max number of resources stored in appset status.")
|
||||
command.Flags().IntVar(&maxResourcesStatusCount, "max-resources-status-count", env.ParseNumFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_MAX_RESOURCES_STATUS_COUNT", 5000, 0, math.MaxInt), "Max number of resources stored in appset status.")
|
||||
|
||||
return &command
|
||||
}
|
||||
|
||||
@@ -18,11 +18,6 @@ import (
|
||||
traceutil "github.com/argoproj/argo-cd/v3/util/trace"
|
||||
)
|
||||
|
||||
const (
|
||||
// CLIName is the name of the CLI
|
||||
cliName = "argocd-cmp-server"
|
||||
)
|
||||
|
||||
func NewCommand() *cobra.Command {
|
||||
var (
|
||||
configFilePath string
|
||||
@@ -32,7 +27,7 @@ func NewCommand() *cobra.Command {
|
||||
otlpAttrs []string
|
||||
)
|
||||
command := cobra.Command{
|
||||
Use: cliName,
|
||||
Use: common.CommandCMPServer,
|
||||
Short: "Run ArgoCD ConfigManagementPlugin Server",
|
||||
Long: "ArgoCD ConfigManagementPlugin Server is an internal service which runs as sidecar container in reposerver deployment. The following configuration options are available:",
|
||||
DisableAutoGenTag: true,
|
||||
|
||||
@@ -35,7 +35,7 @@ func NewCommand() *cobra.Command {
|
||||
metricsHost string
|
||||
)
|
||||
command := &cobra.Command{
|
||||
Use: "argocd-commit-server",
|
||||
Use: common.CommandCommitServer,
|
||||
Short: "Run Argo CD Commit Server",
|
||||
Long: "Argo CD Commit Server is an internal service which commits and pushes hydrated manifests to git. This command runs Commit Server in the foreground.",
|
||||
RunE: func(cmd *cobra.Command, _ []string) error {
|
||||
@@ -91,13 +91,11 @@ func NewCommand() *cobra.Command {
|
||||
sigCh := make(chan os.Signal, 1)
|
||||
signal.Notify(sigCh, os.Interrupt, syscall.SIGTERM)
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
wg.Go(func() {
|
||||
s := <-sigCh
|
||||
log.Printf("got signal %v, attempting graceful shutdown", s)
|
||||
grpc.GracefulStop()
|
||||
wg.Done()
|
||||
}()
|
||||
})
|
||||
|
||||
log.Println("starting grpc server")
|
||||
err = grpc.Serve(listener)
|
||||
|
||||
@@ -25,13 +25,9 @@ import (
|
||||
"github.com/argoproj/argo-cd/v3/util/tls"
|
||||
)
|
||||
|
||||
const (
|
||||
cliName = "argocd-dex"
|
||||
)
|
||||
|
||||
func NewCommand() *cobra.Command {
|
||||
command := &cobra.Command{
|
||||
Use: cliName,
|
||||
Use: common.CommandDex,
|
||||
Short: "argocd-dex tools used by Argo CD",
|
||||
Long: "argocd-dex has internal utility tools used by Argo CD",
|
||||
DisableAutoGenTag: true,
|
||||
|
||||
@@ -9,20 +9,16 @@ import (
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
|
||||
"github.com/argoproj/argo-cd/v3/common"
|
||||
"github.com/argoproj/argo-cd/v3/util/askpass"
|
||||
"github.com/argoproj/argo-cd/v3/util/errors"
|
||||
grpc_util "github.com/argoproj/argo-cd/v3/util/grpc"
|
||||
utilio "github.com/argoproj/argo-cd/v3/util/io"
|
||||
)
|
||||
|
||||
const (
|
||||
// cliName is the name of the CLI
|
||||
cliName = "argocd-git-ask-pass"
|
||||
)
|
||||
|
||||
func NewCommand() *cobra.Command {
|
||||
command := cobra.Command{
|
||||
Use: cliName,
|
||||
Use: common.CommandGitAskPass,
|
||||
Short: "Argo CD git credential helper",
|
||||
DisableAutoGenTag: true,
|
||||
Run: func(c *cobra.Command, _ []string) {
|
||||
|
||||
@@ -2,15 +2,13 @@ package commands
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
const (
|
||||
cliName = "argocd-k8s-auth"
|
||||
"github.com/argoproj/argo-cd/v3/common"
|
||||
)
|
||||
|
||||
func NewCommand() *cobra.Command {
|
||||
command := &cobra.Command{
|
||||
Use: cliName,
|
||||
Use: common.CommandK8sAuth,
|
||||
Short: "argocd-k8s-auth a set of commands to generate k8s auth token",
|
||||
DisableAutoGenTag: true,
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
|
||||
@@ -6,12 +6,14 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials/stscreds"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/sts"
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
"github.com/aws/aws-sdk-go-v2/config"
|
||||
"github.com/aws/aws-sdk-go-v2/credentials/stscreds"
|
||||
"github.com/aws/aws-sdk-go-v2/service/sts"
|
||||
smithyhttp "github.com/aws/smithy-go/transport/http"
|
||||
"github.com/spf13/cobra"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientauthv1beta1 "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1"
|
||||
@@ -58,13 +60,13 @@ func newAWSCommand() *cobra.Command {
|
||||
return command
|
||||
}
|
||||
|
||||
type getSignedRequestFunc func(clusterName, roleARN string, profile string) (string, error)
|
||||
type getSignedRequestFunc func(ctx context.Context, clusterName, roleARN string, profile string) (string, error)
|
||||
|
||||
func getSignedRequestWithRetry(ctx context.Context, timeout, interval time.Duration, clusterName, roleARN string, profile string, fn getSignedRequestFunc) (string, error) {
|
||||
ctx, cancel := context.WithTimeout(ctx, timeout)
|
||||
defer cancel()
|
||||
for {
|
||||
signed, err := fn(clusterName, roleARN, profile)
|
||||
signed, err := fn(ctx, clusterName, roleARN, profile)
|
||||
if err == nil {
|
||||
return signed, nil
|
||||
}
|
||||
@@ -76,25 +78,53 @@ func getSignedRequestWithRetry(ctx context.Context, timeout, interval time.Durat
|
||||
}
|
||||
}
|
||||
|
||||
func getSignedRequest(clusterName, roleARN string, profile string) (string, error) {
|
||||
sess, err := session.NewSessionWithOptions(session.Options{
|
||||
Profile: profile,
|
||||
})
|
||||
func getSignedRequest(ctx context.Context, clusterName, roleARN string, profile string) (string, error) {
|
||||
cfg, err := loadAWSConfig(ctx, profile)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error creating new AWS session: %w", err)
|
||||
return "", err
|
||||
}
|
||||
stsAPI := sts.New(sess)
|
||||
return getSignedRequestWithConfig(ctx, clusterName, roleARN, cfg)
|
||||
}
|
||||
|
||||
func loadAWSConfig(ctx context.Context, profile string) (aws.Config, error) {
|
||||
var opts []func(*config.LoadOptions) error
|
||||
if profile != "" {
|
||||
opts = append(opts, config.WithSharedConfigProfile(profile))
|
||||
}
|
||||
cfg, err := config.LoadDefaultConfig(ctx, opts...)
|
||||
if err != nil {
|
||||
return aws.Config{}, fmt.Errorf("error loading AWS configuration: %w", err)
|
||||
}
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
// getSignedRequestWithConfig presigns GetCallerIdentity using the given config. Used by getSignedRequest and by tests
|
||||
// that inject a config with static credentials to exercise the roleARN path without real AWS credentials.
|
||||
func getSignedRequestWithConfig(ctx context.Context, clusterName, roleARN string, cfg aws.Config) (string, error) {
|
||||
// Use PresignOptions.ClientOptions + SetHeaderValue (same as aws-iam-authenticator) so the
|
||||
// canonical request matches what EKS sends when validating. Build middleware can produce
|
||||
// a different canonical form and thus an invalid signature for EKS.
|
||||
// See kubernetes-sigs/aws-iam-authenticator pkg/token/token.go GetWithSTS().
|
||||
client := sts.NewFromConfig(cfg)
|
||||
if roleARN != "" {
|
||||
creds := stscreds.NewCredentials(sess, roleARN)
|
||||
stsAPI = sts.New(sess, &aws.Config{Credentials: creds})
|
||||
appCreds := stscreds.NewAssumeRoleProvider(client, roleARN)
|
||||
cfg.Credentials = aws.NewCredentialsCache(appCreds)
|
||||
client = sts.NewFromConfig(cfg)
|
||||
}
|
||||
request, _ := stsAPI.GetCallerIdentityRequest(&sts.GetCallerIdentityInput{})
|
||||
request.HTTPRequest.Header.Add(clusterIDHeader, clusterName)
|
||||
signed, err := request.Presign(requestPresignParam)
|
||||
|
||||
presignClient := sts.NewPresignClient(client)
|
||||
presigned, err := presignClient.PresignGetCallerIdentity(ctx, &sts.GetCallerIdentityInput{},
|
||||
func(presignOptions *sts.PresignOptions) {
|
||||
presignOptions.ClientOptions = append(presignOptions.ClientOptions, func(stsOptions *sts.Options) {
|
||||
stsOptions.APIOptions = append(stsOptions.APIOptions,
|
||||
smithyhttp.SetHeaderValue(clusterIDHeader, clusterName),
|
||||
smithyhttp.SetHeaderValue("X-Amz-Expires", strconv.Itoa(requestPresignParam)))
|
||||
})
|
||||
})
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error presigning AWS request: %w", err)
|
||||
}
|
||||
return signed, nil
|
||||
return presigned.URL, nil
|
||||
}
|
||||
|
||||
func formatJSON(token string, expiration time.Time) string {
|
||||
|
||||
@@ -1,14 +1,60 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/config"
|
||||
"github.com/aws/aws-sdk-go-v2/credentials"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestGetSignedRequest(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("returns error when context is cancelled", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cancel()
|
||||
|
||||
url, err := getSignedRequest(ctx, "my-cluster", "", "")
|
||||
|
||||
require.ErrorIs(t, err, context.Canceled)
|
||||
assert.Empty(t, url)
|
||||
})
|
||||
|
||||
t.Run("returns error for non-existent profile", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := context.Background()
|
||||
profile := "argocd-k8s-auth-test-nonexistent-profile-12345"
|
||||
|
||||
url, err := getSignedRequest(ctx, "my-cluster", "", profile)
|
||||
|
||||
require.Error(t, err)
|
||||
assert.Empty(t, url)
|
||||
assert.Contains(t, err.Error(), "configuration", "error should mention configuration load failed")
|
||||
})
|
||||
|
||||
t.Run("returns error when roleARN is provided and assume role fails", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := context.Background()
|
||||
cfg, err := config.LoadDefaultConfig(ctx,
|
||||
config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider("test", "test", "")),
|
||||
config.WithRegion("us-east-1"),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
url, err := getSignedRequestWithConfig(ctx, "my-cluster", "arn:aws:iam::123456789012:role/NonExistentRole", cfg)
|
||||
|
||||
require.Error(t, err)
|
||||
assert.Empty(t, url)
|
||||
assert.Contains(t, err.Error(), "presigning", "error should mention presigning failed when assume role is used")
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetSignedRequestWithRetry(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
@@ -72,7 +118,7 @@ type signedRequestMock struct {
|
||||
returnFunc func(m *signedRequestMock) (string, error)
|
||||
}
|
||||
|
||||
func (m *signedRequestMock) getSignedRequestMock(_, _ string, _ string) (string, error) {
|
||||
func (m *signedRequestMock) getSignedRequestMock(_ context.Context, _, _ string, _ string) (string, error) {
|
||||
m.getSignedRequestCalls++
|
||||
return m.returnFunc(m)
|
||||
}
|
||||
|
||||
@@ -52,7 +52,7 @@ func NewCommand() *cobra.Command {
|
||||
selfServiceNotificationEnabled bool
|
||||
)
|
||||
command := cobra.Command{
|
||||
Use: "controller",
|
||||
Use: common.CommandNotifications,
|
||||
Short: "Starts Argo CD Notifications controller",
|
||||
RunE: func(_ *cobra.Command, _ []string) error {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
@@ -150,13 +150,11 @@ func NewCommand() *cobra.Command {
|
||||
sigCh := make(chan os.Signal, 1)
|
||||
signal.Notify(sigCh, os.Interrupt, syscall.SIGTERM)
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
wg.Go(func() {
|
||||
s := <-sigCh
|
||||
log.Printf("got signal %v, attempting graceful shutdown", s)
|
||||
cancel()
|
||||
}()
|
||||
})
|
||||
|
||||
go ctrl.Run(ctx, processorsCount)
|
||||
<-ctx.Done()
|
||||
@@ -34,21 +34,18 @@ import (
|
||||
"github.com/argoproj/argo-cd/v3/util/gpg"
|
||||
"github.com/argoproj/argo-cd/v3/util/healthz"
|
||||
utilio "github.com/argoproj/argo-cd/v3/util/io"
|
||||
"github.com/argoproj/argo-cd/v3/util/profile"
|
||||
"github.com/argoproj/argo-cd/v3/util/tls"
|
||||
traceutil "github.com/argoproj/argo-cd/v3/util/trace"
|
||||
)
|
||||
|
||||
const (
|
||||
// CLIName is the name of the CLI
|
||||
cliName = "argocd-repo-server"
|
||||
)
|
||||
|
||||
var (
|
||||
gnuPGSourcePath = env.StringFromEnv(common.EnvGPGDataPath, "/app/config/gpg/source")
|
||||
pauseGenerationAfterFailedGenerationAttempts = env.ParseNumFromEnv(common.EnvPauseGenerationAfterFailedAttempts, 3, 0, math.MaxInt32)
|
||||
pauseGenerationOnFailureForMinutes = env.ParseNumFromEnv(common.EnvPauseGenerationMinutes, 60, 0, math.MaxInt32)
|
||||
pauseGenerationOnFailureForRequests = env.ParseNumFromEnv(common.EnvPauseGenerationRequests, 0, 0, math.MaxInt32)
|
||||
gitSubmoduleEnabled = env.ParseBoolFromEnv(common.EnvGitSubmoduleEnabled, true)
|
||||
helmUserAgent = env.StringFromEnv(common.EnvHelmUserAgent, "")
|
||||
)
|
||||
|
||||
func NewCommand() *cobra.Command {
|
||||
@@ -80,9 +77,10 @@ func NewCommand() *cobra.Command {
|
||||
includeHiddenDirectories bool
|
||||
cmpUseManifestGeneratePaths bool
|
||||
ociMediaTypes []string
|
||||
enableBuiltinGitConfig bool
|
||||
)
|
||||
command := cobra.Command{
|
||||
Use: cliName,
|
||||
Use: common.CommandRepoServer,
|
||||
Short: "Run ArgoCD Repository Server",
|
||||
Long: "ArgoCD Repository Server is an internal service which maintains a local cache of the Git repository holding the application manifests, and is responsible for generating and returning the Kubernetes manifests. This command runs Repository Server in the foreground. It can be configured by following options.",
|
||||
DisableAutoGenTag: true,
|
||||
@@ -155,6 +153,8 @@ func NewCommand() *cobra.Command {
|
||||
IncludeHiddenDirectories: includeHiddenDirectories,
|
||||
CMPUseManifestGeneratePaths: cmpUseManifestGeneratePaths,
|
||||
OCIMediaTypes: ociMediaTypes,
|
||||
EnableBuiltinGitConfig: enableBuiltinGitConfig,
|
||||
HelmUserAgent: helmUserAgent,
|
||||
}, askPassServer)
|
||||
errors.CheckError(err)
|
||||
|
||||
@@ -173,7 +173,8 @@ func NewCommand() *cobra.Command {
|
||||
listener, err := lc.Listen(ctx, "tcp", fmt.Sprintf("%s:%d", listenHost, listenPort))
|
||||
errors.CheckError(err)
|
||||
|
||||
healthz.ServeHealthCheck(http.DefaultServeMux, func(r *http.Request) error {
|
||||
mux := http.NewServeMux()
|
||||
healthz.ServeHealthCheck(mux, func(r *http.Request) error {
|
||||
if val, ok := r.URL.Query()["full"]; ok && len(val) > 0 && val[0] == "true" {
|
||||
// connect to itself to make sure repo server is able to serve connection
|
||||
// used by liveness probe to auto restart repo server
|
||||
@@ -195,8 +196,9 @@ func NewCommand() *cobra.Command {
|
||||
}
|
||||
return nil
|
||||
})
|
||||
http.Handle("/metrics", metricsServer.GetHandler())
|
||||
go func() { errors.CheckError(http.ListenAndServe(fmt.Sprintf("%s:%d", metricsHost, metricsPort), nil)) }()
|
||||
mux.Handle("/metrics", metricsServer.GetHandler())
|
||||
profile.RegisterProfiler(mux)
|
||||
go func() { errors.CheckError(http.ListenAndServe(fmt.Sprintf("%s:%d", metricsHost, metricsPort), mux)) }()
|
||||
go func() { errors.CheckError(askPassServer.Run()) }()
|
||||
|
||||
if gpg.IsGPGEnabled() {
|
||||
@@ -221,13 +223,11 @@ func NewCommand() *cobra.Command {
|
||||
sigCh := make(chan os.Signal, 1)
|
||||
signal.Notify(sigCh, os.Interrupt, syscall.SIGTERM)
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
wg.Go(func() {
|
||||
s := <-sigCh
|
||||
log.Printf("got signal %v, attempting graceful shutdown", s)
|
||||
grpc.GracefulStop()
|
||||
wg.Done()
|
||||
}()
|
||||
})
|
||||
|
||||
log.Println("starting grpc server")
|
||||
err = grpc.Serve(listener)
|
||||
@@ -265,6 +265,7 @@ func NewCommand() *cobra.Command {
|
||||
command.Flags().BoolVar(&includeHiddenDirectories, "include-hidden-directories", env.ParseBoolFromEnv("ARGOCD_REPO_SERVER_INCLUDE_HIDDEN_DIRECTORIES", false), "Include hidden directories from Git")
|
||||
command.Flags().BoolVar(&cmpUseManifestGeneratePaths, "plugin-use-manifest-generate-paths", env.ParseBoolFromEnv("ARGOCD_REPO_SERVER_PLUGIN_USE_MANIFEST_GENERATE_PATHS", false), "Pass the resources described in argocd.argoproj.io/manifest-generate-paths value to the cmpserver to generate the application manifests.")
|
||||
command.Flags().StringSliceVar(&ociMediaTypes, "oci-layer-media-types", env.StringsFromEnv("ARGOCD_REPO_SERVER_OCI_LAYER_MEDIA_TYPES", []string{"application/vnd.oci.image.layer.v1.tar", "application/vnd.oci.image.layer.v1.tar+gzip", "application/vnd.cncf.helm.chart.content.v1.tar+gzip"}, ","), "Comma separated list of allowed media types for OCI media types. This only accounts for media types within layers.")
|
||||
command.Flags().BoolVar(&enableBuiltinGitConfig, "enable-builtin-git-config", env.ParseBoolFromEnv("ARGOCD_REPO_SERVER_ENABLE_BUILTIN_GIT_CONFIG", true), "Enable builtin git configuration options that are required for correct argocd-repo-server operation.")
|
||||
tlsConfigCustomizerSrc = tls.AddTLSFlagsToCmd(&command)
|
||||
cacheSrc = reposervercache.AddCacheFlagsToCmd(&command, cacheutil.Options{
|
||||
OnClientCreated: func(client *redis.Client) {
|
||||
|
||||
@@ -101,7 +101,7 @@ func NewCommand() *cobra.Command {
|
||||
enableK8sEvent []string
|
||||
)
|
||||
command := &cobra.Command{
|
||||
Use: cliName,
|
||||
Use: common.CommandServer,
|
||||
Short: "Run the ArgoCD API server",
|
||||
Long: "The API server is a gRPC/REST server which exposes the API consumed by the Web UI, CLI, and CI/CD systems. This command runs API server in the foreground. It can be configured by following options.",
|
||||
DisableAutoGenTag: true,
|
||||
@@ -307,7 +307,7 @@ func NewCommand() *cobra.Command {
|
||||
command.Flags().BoolVar(&disableAuth, "disable-auth", env.ParseBoolFromEnv("ARGOCD_SERVER_DISABLE_AUTH", false), "Disable client authentication")
|
||||
command.Flags().StringVar(&contentTypes, "api-content-types", env.StringFromEnv("ARGOCD_API_CONTENT_TYPES", "application/json", env.StringFromEnvOpts{AllowEmpty: true}), "Semicolon separated list of allowed content types for non GET api requests. Any content type is allowed if empty.")
|
||||
command.Flags().BoolVar(&enableGZip, "enable-gzip", env.ParseBoolFromEnv("ARGOCD_SERVER_ENABLE_GZIP", true), "Enable GZIP compression")
|
||||
command.AddCommand(cli.NewVersionCmd(cliName))
|
||||
command.AddCommand(cli.NewVersionCmd(common.CommandServer))
|
||||
command.Flags().StringVar(&listenHost, "address", env.StringFromEnv("ARGOCD_SERVER_LISTEN_ADDRESS", common.DefaultAddressAPIServer), "Listen on given address")
|
||||
command.Flags().IntVar(&listenPort, "port", common.DefaultPortAPIServer, "Listen on given port")
|
||||
command.Flags().StringVar(&metricsHost, env.StringFromEnv("ARGOCD_SERVER_METRICS_LISTEN_ADDRESS", "metrics-address"), common.DefaultAddressAPIServerMetrics, "Listen for metrics on given address")
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
package commands
|
||||
|
||||
const (
|
||||
// cliName is the name of the CLI
|
||||
cliName = "argocd-server"
|
||||
)
|
||||
@@ -183,9 +183,9 @@ func getAdditionalNamespaces(ctx context.Context, configMapsClient dynamic.Resou
|
||||
namespacesListFromString := func(namespaces string) []string {
|
||||
listOfNamespaces := []string{}
|
||||
|
||||
ss := strings.Split(namespaces, ",")
|
||||
ss := strings.SplitSeq(namespaces, ",")
|
||||
|
||||
for _, namespace := range ss {
|
||||
for namespace := range ss {
|
||||
if namespace != "" {
|
||||
listOfNamespaces = append(listOfNamespaces, strings.TrimSpace(namespace))
|
||||
}
|
||||
|
||||
@@ -10,8 +10,8 @@ import (
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/health"
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
"github.com/argoproj/argo-cd/gitops-engine/pkg/health"
|
||||
"github.com/argoproj/argo-cd/gitops-engine/pkg/utils/kube"
|
||||
"github.com/spf13/cobra"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
@@ -3,9 +3,9 @@ package admin
|
||||
import (
|
||||
"testing"
|
||||
|
||||
clustermocks "github.com/argoproj/gitops-engine/pkg/cache/mocks"
|
||||
"github.com/argoproj/gitops-engine/pkg/health"
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
clustermocks "github.com/argoproj/argo-cd/gitops-engine/pkg/cache/mocks"
|
||||
"github.com/argoproj/argo-cd/gitops-engine/pkg/health"
|
||||
"github.com/argoproj/argo-cd/gitops-engine/pkg/utils/kube"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
"github.com/argoproj/argo-cd/gitops-engine/pkg/utils/kube"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
"github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v3/util/security"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
"github.com/argoproj/argo-cd/gitops-engine/pkg/utils/kube"
|
||||
"github.com/stretchr/testify/assert"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -84,7 +84,7 @@ func newAppProject() *unstructured.Unstructured {
|
||||
Server: "*",
|
||||
},
|
||||
},
|
||||
ClusterResourceWhitelist: []metav1.GroupKind{
|
||||
ClusterResourceWhitelist: []v1alpha1.ClusterResourceRestrictionItem{
|
||||
{
|
||||
Group: "*",
|
||||
Kind: "*",
|
||||
@@ -295,7 +295,8 @@ spec:
|
||||
spec:
|
||||
destination: {}
|
||||
project: ""
|
||||
status: {}
|
||||
status:
|
||||
health: {}
|
||||
---
|
||||
`,
|
||||
},
|
||||
@@ -325,7 +326,8 @@ spec:
|
||||
spec:
|
||||
destination: {}
|
||||
project: ""
|
||||
status: {}
|
||||
status:
|
||||
health: {}
|
||||
---
|
||||
`,
|
||||
},
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
"github.com/argoproj/argo-cd/gitops-engine/pkg/utils/kube"
|
||||
"github.com/redis/go-redis/v9"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -128,12 +128,9 @@ func loadClusters(ctx context.Context, kubeClient kubernetes.Interface, appClien
|
||||
|
||||
batchSize := 10
|
||||
batchesCount := int(math.Ceil(float64(len(clusters)) / float64(batchSize)))
|
||||
for batchNum := 0; batchNum < batchesCount; batchNum++ {
|
||||
for batchNum := range batchesCount {
|
||||
batchStart := batchSize * batchNum
|
||||
batchEnd := batchSize * (batchNum + 1)
|
||||
if batchEnd > len(clustersList.Items) {
|
||||
batchEnd = len(clustersList.Items)
|
||||
}
|
||||
batchEnd := min((batchSize * (batchNum + 1)), len(clustersList.Items))
|
||||
batch := clustersList.Items[batchStart:batchEnd]
|
||||
_ = kube.RunAllAsync(len(batch), func(i int) error {
|
||||
clusterShard := 0
|
||||
|
||||
@@ -60,8 +60,7 @@ func Test_loadClusters(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
for i := range clusters {
|
||||
// This changes, nil it to avoid testing it.
|
||||
//nolint:staticcheck
|
||||
clusters[i].ConnectionState.ModifiedAt = nil
|
||||
clusters[i].Info.ConnectionState.ModifiedAt = nil
|
||||
}
|
||||
|
||||
expected := []ClusterWithInfo{{
|
||||
@@ -69,11 +68,13 @@ func Test_loadClusters(t *testing.T) {
|
||||
ID: "",
|
||||
Server: "https://kubernetes.default.svc",
|
||||
Name: "in-cluster",
|
||||
ConnectionState: v1alpha1.ConnectionState{
|
||||
Status: "Successful",
|
||||
Info: v1alpha1.ClusterInfo{
|
||||
ConnectionState: v1alpha1.ConnectionState{
|
||||
Status: "Successful",
|
||||
},
|
||||
ServerVersion: ".",
|
||||
},
|
||||
ServerVersion: ".",
|
||||
Shard: ptr.To(int64(0)),
|
||||
Shard: ptr.To(int64(0)),
|
||||
},
|
||||
Namespaces: []string{"test"},
|
||||
}}
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
"github.com/argoproj/argo-cd/gitops-engine/pkg/utils/kube"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"sigs.k8s.io/yaml"
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
utilio "github.com/argoproj/argo-cd/v3/util/io"
|
||||
"github.com/argoproj/argo-cd/v3/util/templates"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
"github.com/argoproj/argo-cd/gitops-engine/pkg/utils/kube"
|
||||
"github.com/spf13/cobra"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
|
||||
@@ -23,7 +23,7 @@ func generateRandomPassword() (string, error) {
|
||||
const initialPasswordLength = 16
|
||||
const letters = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz-"
|
||||
randBytes := make([]byte, initialPasswordLength)
|
||||
for i := 0; i < initialPasswordLength; i++ {
|
||||
for i := range initialPasswordLength {
|
||||
num, err := rand.Int(rand.Reader, big.NewInt(int64(len(letters))))
|
||||
if err != nil {
|
||||
return "", err
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user