Compare commits
550 Commits
release-1.
...
v1.1.2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
567fcc3314 | ||
|
|
26ba5022aa | ||
|
|
14d5c49f85 | ||
|
|
c961f7417b | ||
|
|
8544bef56b | ||
|
|
b9090df4fa | ||
|
|
0ac0591fdb | ||
|
|
c48712d988 | ||
|
|
e39f96999e | ||
|
|
08d7b6492b | ||
|
|
24f0efd791 | ||
|
|
39a2e5097a | ||
|
|
de881f398a | ||
|
|
70a7855da0 | ||
|
|
6d8a592509 | ||
|
|
5581a85bff | ||
|
|
156b3de4c5 | ||
|
|
257991b69c | ||
|
|
c26c4729a1 | ||
|
|
2a79017c5f | ||
|
|
7fe92adaed | ||
|
|
59d564017e | ||
|
|
6d8d805f92 | ||
|
|
f5ab4d55c3 | ||
|
|
8cfb628d24 | ||
|
|
3c95a4a3c4 | ||
|
|
c924350adf | ||
|
|
4633eb6db8 | ||
|
|
7089e6b0f9 | ||
|
|
513f773ae8 | ||
|
|
75db0b6c8c | ||
|
|
0860b032ec | ||
|
|
ba267f627a | ||
|
|
3000146574 | ||
|
|
686ec75e0a | ||
|
|
d08534f068 | ||
|
|
7c0fd908a0 | ||
|
|
e530a4780e | ||
|
|
8c76771a05 | ||
|
|
65f430c395 | ||
|
|
d409014da7 | ||
|
|
cfddf23275 | ||
|
|
5698d1b6b1 | ||
|
|
0e6895472b | ||
|
|
b7e0f91478 | ||
|
|
78ab336c86 | ||
|
|
a86e074b1f | ||
|
|
b742c66b14 | ||
|
|
e2756210d9 | ||
|
|
97565c0895 | ||
|
|
9a6f9ff824 | ||
|
|
290cefaedd | ||
|
|
69e49d708f | ||
|
|
e27be81947 | ||
|
|
4febc66a64 | ||
|
|
a984af76f1 | ||
|
|
be6a0fc21f | ||
|
|
122729e2a4 | ||
|
|
84635d4dbe | ||
|
|
46550e009b | ||
|
|
683b9072b8 | ||
|
|
33e66dcf5e | ||
|
|
fbf2e9e128 | ||
|
|
770832bcb9 | ||
|
|
40ca1e731d | ||
|
|
87ac100e77 | ||
|
|
05097f3307 | ||
|
|
19f0af6169 | ||
|
|
bb53a8edff | ||
|
|
b7f1639016 | ||
|
|
e57fa0c32e | ||
|
|
8729c093c8 | ||
|
|
bbe800dbac | ||
|
|
1d9cd061b1 | ||
|
|
65cceaf224 | ||
|
|
88231bc93b | ||
|
|
611323b5ce | ||
|
|
4dc102af3f | ||
|
|
3f14f75e51 | ||
|
|
3256e6c29e | ||
|
|
604954561a | ||
|
|
9f60933a6e | ||
|
|
893f142345 | ||
|
|
03b7d24216 | ||
|
|
4860f2ce21 | ||
|
|
00889551e7 | ||
|
|
bdabd5b75c | ||
|
|
ac51f66829 | ||
|
|
bc7bbb9dbc | ||
|
|
fcf9f82da0 | ||
|
|
8275200c82 | ||
|
|
85ff669b66 | ||
|
|
b16c485a2a | ||
|
|
23ad098aa9 | ||
|
|
0f2fe76027 | ||
|
|
a5d957ec06 | ||
|
|
251cbfa99e | ||
|
|
243378b035 | ||
|
|
0dd80f9d8e | ||
|
|
f380deaf86 | ||
|
|
bb5b78e94e | ||
|
|
a234894d01 | ||
|
|
4c1cbbcdfc | ||
|
|
10cf1482ab | ||
|
|
89afb5cac2 | ||
|
|
34f0f286d6 | ||
|
|
b645589ed5 | ||
|
|
d09388bc97 | ||
|
|
64a1ea9e81 | ||
|
|
0fd10be9de | ||
|
|
c214ed9546 | ||
|
|
8a7c870f1c | ||
|
|
556f12fd59 | ||
|
|
ecdf94232f | ||
|
|
c2b6e0f34a | ||
|
|
32bfad21f8 | ||
|
|
2777910d1f | ||
|
|
a49314be07 | ||
|
|
9f9a076433 | ||
|
|
4c41f82d18 | ||
|
|
18b62f9bbe | ||
|
|
b9700b760f | ||
|
|
894b150ac9 | ||
|
|
5515b8ce9d | ||
|
|
24006300e5 | ||
|
|
8cd7d590e0 | ||
|
|
38b5b242b3 | ||
|
|
9f330348ec | ||
|
|
303737c0b0 | ||
|
|
71a8eb1697 | ||
|
|
71f3351d2b | ||
|
|
b93143381f | ||
|
|
5ed0b1a6bf | ||
|
|
847b7f5e11 | ||
|
|
7568b099ee | ||
|
|
7fdd865d5c | ||
|
|
d0d4d593cf | ||
|
|
20810e98f2 | ||
|
|
97ab061ab5 | ||
|
|
edf8a0ede2 | ||
|
|
1c6bb4386f | ||
|
|
ee00a0e049 | ||
|
|
e85eb01831 | ||
|
|
e6697274f4 | ||
|
|
b3ade6159e | ||
|
|
6c0e21780c | ||
|
|
0b945ef616 | ||
|
|
41cad56991 | ||
|
|
f8283a1014 | ||
|
|
da29c05662 | ||
|
|
5bf834e14e | ||
|
|
5c353a12f2 | ||
|
|
5ec5301680 | ||
|
|
d06303c432 | ||
|
|
f268f82780 | ||
|
|
8ea785892f | ||
|
|
5f81dc0d51 | ||
|
|
6ca654294c | ||
|
|
96d0beeaaf | ||
|
|
3f913c0c3f | ||
|
|
31a8e07cec | ||
|
|
fc6df01b8e | ||
|
|
bcefc34287 | ||
|
|
e6fe4f0e05 | ||
|
|
e20e693d70 | ||
|
|
686fab7fec | ||
|
|
1ee6e1c7fa | ||
|
|
444b65ecac | ||
|
|
a12124512e | ||
|
|
e5e1308852 | ||
|
|
7beae2beac | ||
|
|
d9345c99e3 | ||
|
|
d222b935e6 | ||
|
|
8577114e2e | ||
|
|
e3a120b1d8 | ||
|
|
00c12d9a25 | ||
|
|
33353417df | ||
|
|
b667cef4a8 | ||
|
|
5134ca37a7 | ||
|
|
ae23af7061 | ||
|
|
9686a2f16b | ||
|
|
8f658108f2 | ||
|
|
3db5c36e60 | ||
|
|
e803969442 | ||
|
|
5be580c105 | ||
|
|
a0ae6dd32f | ||
|
|
1bbd8f038b | ||
|
|
e7bde586d8 | ||
|
|
5540c9b9aa | ||
|
|
02c81851a8 | ||
|
|
01dad77d44 | ||
|
|
018ce4e9f0 | ||
|
|
0e89b744ec | ||
|
|
6aa12887b3 | ||
|
|
e5d6e9a21a | ||
|
|
3f9d361d4f | ||
|
|
0565dd3df1 | ||
|
|
c8e8c2dc32 | ||
|
|
9c5c420483 | ||
|
|
911425c1c1 | ||
|
|
3d0f85c188 | ||
|
|
ba43a01669 | ||
|
|
f5833da4cd | ||
|
|
67882a9dff | ||
|
|
c03bd896d8 | ||
|
|
8ee3c93c84 | ||
|
|
159a30fdc7 | ||
|
|
56ca350ed2 | ||
|
|
781a9ab627 | ||
|
|
d0ecaed401 | ||
|
|
f28d11bf90 | ||
|
|
7091585dbe | ||
|
|
8d55e72dfa | ||
|
|
cd87a1436b | ||
|
|
27b23f6a00 | ||
|
|
3540859074 | ||
|
|
1b41aba841 | ||
|
|
f787828712 | ||
|
|
471dac48be | ||
|
|
2675367400 | ||
|
|
f120c1dedb | ||
|
|
a54dc192d7 | ||
|
|
0850db530f | ||
|
|
0a1a579714 | ||
|
|
af3a766304 | ||
|
|
d7b1ffd014 | ||
|
|
0a6028e116 | ||
|
|
61173d7e70 | ||
|
|
3ae30c9028 | ||
|
|
fa62cdf127 | ||
|
|
d715ac9e53 | ||
|
|
8fa0d9c4fc | ||
|
|
5d2304b18f | ||
|
|
a886a58421 | ||
|
|
2f5549e0c8 | ||
|
|
915514e37b | ||
|
|
e776d64b6f | ||
|
|
d3c41395bc | ||
|
|
7cc55c078f | ||
|
|
a1edbb5972 | ||
|
|
6fe6a603d7 | ||
|
|
2a9a9884cf | ||
|
|
a5fedca016 | ||
|
|
98caad1ff7 | ||
|
|
b38485e169 | ||
|
|
b92e0a6d0f | ||
|
|
8b366ed5c2 | ||
|
|
7b1bf35b8c | ||
|
|
c631589306 | ||
|
|
138233e97d | ||
|
|
f09e213202 | ||
|
|
960a51853e | ||
|
|
906ac8f987 | ||
|
|
6bd8dea088 | ||
|
|
9f1a1f0f5e | ||
|
|
c4952fe81e | ||
|
|
abad42fcd5 | ||
|
|
fc87fa0630 | ||
|
|
59c5c6276d | ||
|
|
9d81e923b9 | ||
|
|
4d402c1223 | ||
|
|
ce18509697 | ||
|
|
e9990767fa | ||
|
|
cf4896bb3a | ||
|
|
adcac7f7b4 | ||
|
|
4bcef1bc67 | ||
|
|
b1b5ce211e | ||
|
|
150c69bb1d | ||
|
|
6006254716 | ||
|
|
2658cdfa5d | ||
|
|
eb79239e6e | ||
|
|
5c0c5a8446 | ||
|
|
fbc2021ed8 | ||
|
|
564413df01 | ||
|
|
5f32cae938 | ||
|
|
198e4fe520 | ||
|
|
3c2febf8b4 | ||
|
|
0d4c10bd45 | ||
|
|
d4e4d7e4b4 | ||
|
|
943bf8c69c | ||
|
|
be732210a4 | ||
|
|
d60ef39f82 | ||
|
|
23121b3528 | ||
|
|
135dce436e | ||
|
|
f38a3ac6cd | ||
|
|
a1382e107f | ||
|
|
3367c879bd | ||
|
|
ba8005740a | ||
|
|
0d225965ff | ||
|
|
c22aff33ce | ||
|
|
eb73d5c372 | ||
|
|
2bc9995b61 | ||
|
|
7188823ade | ||
|
|
10f4a22192 | ||
|
|
07111fa952 | ||
|
|
adf522454e | ||
|
|
d7b89f5a7c | ||
|
|
39f8662beb | ||
|
|
bf157fd794 | ||
|
|
4c62c19230 | ||
|
|
6f0f9ec1ba | ||
|
|
3c8da80fa4 | ||
|
|
a9f18abb41 | ||
|
|
4a1590c0bd | ||
|
|
2b89f6fb71 | ||
|
|
e5fd75cdd2 | ||
|
|
59eb3ab749 | ||
|
|
d67fd59f65 | ||
|
|
6a90de738c | ||
|
|
cf757831b6 | ||
|
|
c48b9f8edd | ||
|
|
dfb3451000 | ||
|
|
419a40beac | ||
|
|
15032dd3b9 | ||
|
|
649152c97a | ||
|
|
c387dca4fb | ||
|
|
d9b0e6b234 | ||
|
|
8372d751fd | ||
|
|
0a752fb61f | ||
|
|
16be7e708f | ||
|
|
09e4c32832 | ||
|
|
7298289f3a | ||
|
|
136cf5be52 | ||
|
|
d5023bc195 | ||
|
|
e94a551ec2 | ||
|
|
127cf77db4 | ||
|
|
4c2d4d11ef | ||
|
|
89690b1e97 | ||
|
|
ddbb39bb22 | ||
|
|
a775f48cf0 | ||
|
|
8e10610173 | ||
|
|
9356994d6a | ||
|
|
36b8abe601 | ||
|
|
c570186a6f | ||
|
|
db686b67ec | ||
|
|
40e04ab639 | ||
|
|
1891d7cde7 | ||
|
|
98d224a5ec | ||
|
|
f62bd58fae | ||
|
|
1ff4548a2c | ||
|
|
ba36b3f63b | ||
|
|
f353236c8a | ||
|
|
1bcc4d3991 | ||
|
|
b06ae9ea47 | ||
|
|
7fd326eb21 | ||
|
|
2a8fccc6cd | ||
|
|
1295a89911 | ||
|
|
3e2f205045 | ||
|
|
eff5421ce4 | ||
|
|
857ac806ae | ||
|
|
0f7ae16eb6 | ||
|
|
c31a756517 | ||
|
|
e09453d6e4 | ||
|
|
9177011abd | ||
|
|
9e45d5c8db | ||
|
|
fca687f5fb | ||
|
|
1223955cba | ||
|
|
636c896b90 | ||
|
|
2fa93fd694 | ||
|
|
7893e6461b | ||
|
|
1bad5b3179 | ||
|
|
a0330d439c | ||
|
|
3d831c1db7 | ||
|
|
613e294f15 | ||
|
|
013d37f23a | ||
|
|
a38f293246 | ||
|
|
f9c39fbc3b | ||
|
|
a85ff52115 | ||
|
|
c359a24017 | ||
|
|
7816430fd7 | ||
|
|
3e4ed83112 | ||
|
|
617d7be300 | ||
|
|
108dbb8efd | ||
|
|
40fdda3f5a | ||
|
|
c451919511 | ||
|
|
4f6b686ed7 | ||
|
|
455993b164 | ||
|
|
ec47a07195 | ||
|
|
9cbfc37774 | ||
|
|
1928548346 | ||
|
|
bc90faa69f | ||
|
|
29563434df | ||
|
|
eca1789ad1 | ||
|
|
7c60ff0201 | ||
|
|
a930b4fdca | ||
|
|
9c6125deef | ||
|
|
26d390e2bd | ||
|
|
363ca3febb | ||
|
|
6c648ef0d8 | ||
|
|
3d9943c7b3 | ||
|
|
a48b1bcbae | ||
|
|
1483ee4c8c | ||
|
|
ab505fddcd | ||
|
|
62158a0c06 | ||
|
|
83d0c4b084 | ||
|
|
95b237bdc5 | ||
|
|
2e1db8f69b | ||
|
|
66a182e743 | ||
|
|
28580b09c3 | ||
|
|
72bcad4810 | ||
|
|
6862fe3551 | ||
|
|
71b02e3bcd | ||
|
|
af88064c2a | ||
|
|
658a16fb78 | ||
|
|
bc2c2a5189 | ||
|
|
7ea4d5a957 | ||
|
|
1db0fbdedc | ||
|
|
3a25697349 | ||
|
|
4c80d6bc34 | ||
|
|
49f342ad43 | ||
|
|
e849321f62 | ||
|
|
6ded5c5cfe | ||
|
|
191f737d5f | ||
|
|
4a03d1120f | ||
|
|
5bbc94188c | ||
|
|
bf9f634613 | ||
|
|
73452f7b10 | ||
|
|
eb92001626 | ||
|
|
5b5fadce77 | ||
|
|
87f706aa1e | ||
|
|
4b36f0e211 | ||
|
|
b575f45c11 | ||
|
|
3900d11454 | ||
|
|
3434f5e601 | ||
|
|
8381581821 | ||
|
|
e7ef4dbc4f | ||
|
|
42778b5a91 | ||
|
|
ac89d49bea | ||
|
|
9e43ed4293 | ||
|
|
d37b09b6bc | ||
|
|
5e60a65fc6 | ||
|
|
92125c51b6 | ||
|
|
4301fc6b58 | ||
|
|
da1223aa57 | ||
|
|
4330130017 | ||
|
|
db8528c037 | ||
|
|
5cad0db347 | ||
|
|
658f72fe84 | ||
|
|
ee375a0224 | ||
|
|
e3a912a46f | ||
|
|
b94f3895db | ||
|
|
4404df3903 | ||
|
|
a502d5215a | ||
|
|
b64143d314 | ||
|
|
0148112676 | ||
|
|
ceb838d559 | ||
|
|
a30aff9454 | ||
|
|
47c756b243 | ||
|
|
afe84768a5 | ||
|
|
6da644b669 | ||
|
|
a02941cb99 | ||
|
|
4c8f02e35d | ||
|
|
a7d2fddd07 | ||
|
|
01a3ce70cb | ||
|
|
92adcf107c | ||
|
|
cbf7b70a8d | ||
|
|
579c230969 | ||
|
|
561843d006 | ||
|
|
f83ae97fbd | ||
|
|
35afec5884 | ||
|
|
ae41dba29f | ||
|
|
ebf808b0f9 | ||
|
|
8a284f1726 | ||
|
|
530320ca6e | ||
|
|
e71bdcfdd6 | ||
|
|
fc49ca3438 | ||
|
|
a688d38165 | ||
|
|
a3379dceec | ||
|
|
f5ad24f352 | ||
|
|
ae8834a6f2 | ||
|
|
ccd6863ad4 | ||
|
|
57ad86a222 | ||
|
|
d8d32ec1f5 | ||
|
|
56d06482fe | ||
|
|
309f44a079 | ||
|
|
9401f94b78 | ||
|
|
fab12da4e7 | ||
|
|
4bd49b0bf6 | ||
|
|
a972f76224 | ||
|
|
f28cd3f709 | ||
|
|
c87d6ec182 | ||
|
|
d4e781d48f | ||
|
|
6e5efa1e09 | ||
|
|
326489ff60 | ||
|
|
62a7c160ab | ||
|
|
13937ac7f9 | ||
|
|
9128daf883 | ||
|
|
6caa019231 | ||
|
|
4055960757 | ||
|
|
88fa8bb8b2 | ||
|
|
269fcbb091 | ||
|
|
a97ac8fadf | ||
|
|
93cbef4aeb | ||
|
|
929f30c58b | ||
|
|
e59f5b1ba4 | ||
|
|
20c8b0cec9 | ||
|
|
9c0dc4e865 | ||
|
|
28e68a2a3c | ||
|
|
a39d3f28e2 | ||
|
|
2ba7eb83d2 | ||
|
|
d8129ba59f | ||
|
|
360c7e051e | ||
|
|
9a3425cfcd | ||
|
|
f5b0af521c | ||
|
|
658126b7bc | ||
|
|
196d168b65 | ||
|
|
d71927a006 | ||
|
|
8e8017531a | ||
|
|
8be2660994 | ||
|
|
a34bae8905 | ||
|
|
01aaae9774 | ||
|
|
9e7a02e2b2 | ||
|
|
470d4f1dec | ||
|
|
65c2c6bb78 | ||
|
|
62b68a8892 | ||
|
|
c4c9ee4427 | ||
|
|
44790ad1e1 | ||
|
|
9e3727a037 | ||
|
|
6721909257 | ||
|
|
26ffea9bed | ||
|
|
d8fb318253 | ||
|
|
bb5dde23b8 | ||
|
|
94b2b0c208 | ||
|
|
e7a9f311c7 | ||
|
|
8156680b70 | ||
|
|
4d74e57bb6 | ||
|
|
8ba3bf1e5f | ||
|
|
e16b3a25b3 | ||
|
|
972d5ff493 | ||
|
|
059f4e0748 | ||
|
|
9ae501c7ca | ||
|
|
dffac4069d | ||
|
|
5527b3a852 | ||
|
|
537e28a0ce | ||
|
|
5382968864 | ||
|
|
994474aead | ||
|
|
f38c1b3106 | ||
|
|
ff3b5cc3c4 | ||
|
|
c9242b84f8 | ||
|
|
94c8ff5e1b | ||
|
|
e2e5a7715c | ||
|
|
7fc6628934 | ||
|
|
f834803946 | ||
|
|
172aa7e47c | ||
|
|
6ea5b671e7 | ||
|
|
80f373bc59 | ||
|
|
7de1908f48 | ||
|
|
d8ff73b702 | ||
|
|
a0880c58a9 | ||
|
|
eab17ce9fb |
356
.circleci/config.yml
Normal file
@@ -0,0 +1,356 @@
|
||||
version: 2.1
|
||||
commands:
|
||||
before:
|
||||
steps:
|
||||
- restore_go_cache
|
||||
- install_golang
|
||||
- install_tools
|
||||
- clean_checkout
|
||||
- configure_git
|
||||
- install_go_deps
|
||||
- dep_ensure
|
||||
configure_git:
|
||||
steps:
|
||||
- run:
|
||||
name: Configure Git
|
||||
command: |
|
||||
set -x
|
||||
# must be configured for tests to run
|
||||
git config --global user.email you@example.com
|
||||
git config --global user.name "Your Name"
|
||||
echo "export PATH=/home/circleci/.go_workspace/src/github.com/argoproj/argo-cd/hack:\$PATH" | tee -a $BASH_ENV
|
||||
echo "export GIT_ASKPASS=git-ask-pass.sh" | tee -a $BASH_ENV
|
||||
- run:
|
||||
name: Make sure we can clone out the test private repo
|
||||
command: |
|
||||
set -x
|
||||
export GIT_USERNAME=blah
|
||||
export GIT_PASSWORD=B5sBDeoqAVUouoHkrovy
|
||||
git-ask-pass.sh Username
|
||||
git-ask-pass.sh Password
|
||||
git clone https://gitlab.com/argo-cd-test/test-apps.git /tmp/test-apps
|
||||
clean_checkout:
|
||||
steps:
|
||||
- run:
|
||||
name: Remove checked out code
|
||||
command: rm -Rf /home/circleci/.go_workspace/src/github.com/argoproj/argo-cd
|
||||
- checkout
|
||||
install_go_deps:
|
||||
steps:
|
||||
- run:
|
||||
name: Install Go deps
|
||||
command: |
|
||||
set -x
|
||||
go get github.com/gobuffalo/packr/packr
|
||||
go get github.com/gogo/protobuf/gogoproto
|
||||
go get github.com/golang/protobuf/protoc-gen-go
|
||||
go get github.com/golangci/golangci-lint/cmd/golangci-lint
|
||||
go get github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway
|
||||
go get github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger
|
||||
go get github.com/jstemmer/go-junit-report
|
||||
go get github.com/mattn/goreman
|
||||
go get golang.org/x/tools/cmd/goimports
|
||||
dep_ensure:
|
||||
steps:
|
||||
- restore_cache:
|
||||
keys:
|
||||
- vendor-v4-{{ checksum "Gopkg.lock" }}
|
||||
- run:
|
||||
name: Run dep ensure
|
||||
command: dep ensure -v
|
||||
- save_cache:
|
||||
key: vendor-v4-{{ checksum "Gopkg.lock" }}
|
||||
paths:
|
||||
- vendor
|
||||
install_golang:
|
||||
steps:
|
||||
- run:
|
||||
name: Install Golang v1.11.4
|
||||
command: |
|
||||
go get golang.org/dl/go1.11.4
|
||||
[ -e /home/circleci/sdk/go1.11.4 ] || go1.11.4 download
|
||||
echo "export GOPATH=/home/circleci/.go_workspace" | tee -a $BASH_ENV
|
||||
echo "export PATH=/home/circleci/sdk/go1.11.4/bin:\$PATH" | tee -a $BASH_ENV
|
||||
- run:
|
||||
name: Golang diagnostics
|
||||
command: |
|
||||
env
|
||||
which go
|
||||
go version
|
||||
go env
|
||||
install_tools:
|
||||
steps:
|
||||
- run:
|
||||
name: Create downloads dir
|
||||
command: mkdir -p /tmp/dl
|
||||
- restore_cache:
|
||||
keys:
|
||||
- dl-v4
|
||||
- dl-v3
|
||||
- run:
|
||||
name: Install JQ v1.6
|
||||
command: |
|
||||
set -x
|
||||
[ -e /tmp/dl/jq ] || curl -sLf -C - -o /tmp/dl/jq https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64
|
||||
sudo cp /tmp/dl/jq /usr/local/bin/jq
|
||||
sudo chmod +x /usr/local/bin/jq
|
||||
jq --version
|
||||
- run:
|
||||
name: Install Kubectx v0.6.3
|
||||
command: |
|
||||
set -x
|
||||
[ -e /tmp/dl/kubectx.zip ] || curl -sLf -C - -o /tmp/dl/kubectx.zip https://github.com/ahmetb/kubectx/archive/v0.6.3.zip
|
||||
sudo unzip /tmp/dl/kubectx.zip kubectx-0.6.3/kubectx
|
||||
sudo unzip /tmp/dl/kubectx.zip kubectx-0.6.3/kubens
|
||||
sudo mv kubectx-0.6.3/kubectx /usr/local/bin/
|
||||
sudo mv kubectx-0.6.3/kubens /usr/local/bin/
|
||||
sudo chmod +x /usr/local/bin/kubectx
|
||||
sudo chmod +x /usr/local/bin/kubens
|
||||
- run:
|
||||
name: Install Dep v0.5.3
|
||||
command: |
|
||||
set -x
|
||||
[ -e /tmp/dl/dep ] || curl -sLf -C - -o /tmp/dl/dep https://github.com/golang/dep/releases/download/v0.5.3/dep-linux-amd64
|
||||
sudo cp /tmp/dl/dep /usr/local/go/bin/dep
|
||||
sudo chmod +x /usr/local/go/bin/dep
|
||||
dep version
|
||||
- run:
|
||||
name: Install Go Swagger v0.19.0
|
||||
command: |
|
||||
set -x
|
||||
[ -e /tmp/dl/swagger ] || curl -sLf -C - -o /tmp/dl/swagger https://github.com/go-swagger/go-swagger/releases/download/v0.19.0/swagger_linux_amd64
|
||||
sudo cp /tmp/dl/swagger /usr/local/bin/swagger
|
||||
sudo chmod +x /usr/local/bin/swagger
|
||||
swagger version
|
||||
- run:
|
||||
name: Install Ksonnet v0.13.1
|
||||
command: |
|
||||
set -x
|
||||
[ -e /tmp/dl/ks.tar.gz ] || curl -sLf -C - -o /tmp/dl/ks.tar.gz https://github.com/ksonnet/ksonnet/releases/download/v0.13.1/ks_0.13.1_linux_amd64.tar.gz
|
||||
tar -C /tmp -xf /tmp/dl/ks.tar.gz
|
||||
sudo cp /tmp/ks_0.13.1_linux_amd64/ks /usr/local/go/bin/ks
|
||||
sudo chmod +x /usr/local/go/bin/ks
|
||||
ks version
|
||||
- run:
|
||||
name: Install Helm v2.13.1
|
||||
command: |
|
||||
set -x
|
||||
[ -e /tmp/dl/helm.tar.gz ] || curl -sLf -C - -o /tmp/dl/helm.tar.gz https://storage.googleapis.com/kubernetes-helm/helm-v2.13.1-linux-amd64.tar.gz
|
||||
tar -C /tmp/ -xf /tmp/dl/helm.tar.gz
|
||||
sudo cp /tmp/linux-amd64/helm /usr/local/go/bin/helm
|
||||
helm version --client
|
||||
helm init --client-only
|
||||
- run:
|
||||
name: Install Kustomize v1.0.11
|
||||
command: |
|
||||
set -x
|
||||
[ -e /tmp/dl/kustomize1 ] || curl -sLf -C - -o /tmp/dl/kustomize1 https://github.com/kubernetes-sigs/kustomize/releases/download/v1.0.11/kustomize_1.0.11_linux_amd64
|
||||
sudo cp /tmp/dl/kustomize1 /usr/local/go/bin/
|
||||
sudo chmod +x /usr/local/go/bin/kustomize1
|
||||
kustomize1 version
|
||||
- run:
|
||||
name: Install Kustomize v2.0.3
|
||||
command: |
|
||||
set -x
|
||||
[ -e /tmp/dl/kustomize ] || curl -sLf -C - -o /tmp/dl/kustomize https://github.com/kubernetes-sigs/kustomize/releases/download/v2.0.3/kustomize_2.0.3_linux_amd64
|
||||
sudo cp /tmp/dl/kustomize /usr/local/go/bin/
|
||||
sudo chmod +x /usr/local/go/bin/kustomize
|
||||
kustomize version
|
||||
- run:
|
||||
name: Install Protobuf compiler v3.7.1
|
||||
command: |
|
||||
set -x
|
||||
[ -e /tmp/dl/protoc.zip ] || curl -sLf -C - -o /tmp/dl/protoc.zip https://github.com/protocolbuffers/protobuf/releases/download/v3.7.1/protoc-3.7.1-linux-x86_64.zip
|
||||
sudo unzip /tmp/dl/protoc.zip bin/protoc -d /usr/local/
|
||||
sudo chmod +x /usr/local/bin/protoc
|
||||
sudo unzip /tmp/dl/protoc.zip include/* -d /usr/local/
|
||||
protoc --version
|
||||
- save_cache:
|
||||
key: dl-v4
|
||||
paths:
|
||||
- /tmp/dl
|
||||
save_go_cache:
|
||||
steps:
|
||||
- save_cache:
|
||||
key: go-v15-{{ .Branch }}
|
||||
paths:
|
||||
- /home/circleci/.go_workspace
|
||||
- /home/circleci/.cache/go-build
|
||||
- /home/circleci/sdk/go1.11.4
|
||||
restore_go_cache:
|
||||
steps:
|
||||
- restore_cache:
|
||||
keys:
|
||||
- go-v15-{{ .Branch }}
|
||||
- go-v15-master
|
||||
jobs:
|
||||
build:
|
||||
working_directory: /home/circleci/.go_workspace/src/github.com/argoproj/argo-cd
|
||||
machine:
|
||||
image: circleci/classic:201808-01
|
||||
steps:
|
||||
- before
|
||||
- run:
|
||||
name: Run unit tests
|
||||
command: |
|
||||
set -x
|
||||
mkdir -p /tmp/test-results
|
||||
trap "go-junit-report </tmp/test-results/go-test.out > /tmp/test-results/go-test-report.xml" EXIT
|
||||
make test | tee /tmp/test-results/go-test.out
|
||||
- save_go_cache
|
||||
- run:
|
||||
name: Uploading code coverage
|
||||
command: bash <(curl -s https://codecov.io/bash) -f coverage.out
|
||||
# This takes 2m, lets background it.
|
||||
background: true
|
||||
- store_test_results:
|
||||
path: /tmp/test-results
|
||||
- run:
|
||||
name: Generate code
|
||||
command: make codegen
|
||||
- run:
|
||||
name: Lint code
|
||||
# use GOGC to limit memory usage in exchange for CPU usage, https://github.com/golangci/golangci-lint#memory-usage-of-golangci-lint
|
||||
# we have 8GB RAM, 2CPUs https://circleci.com/docs/2.0/executor-types/#using-machine
|
||||
command: LINT_GOGC=50 LINT_CONCURRENCY=2 make lint
|
||||
- run:
|
||||
name: Check nothing has changed
|
||||
command: |
|
||||
set -xo pipefail
|
||||
# This makes sure you ran `make pre-commit` before you pushed.
|
||||
# We exclude the Swagger resources; CircleCI doesn't generate them correctly.
|
||||
# When this fails, it will, create a patch file you can apply locally to fix it.
|
||||
# To troubleshoot builds: https://argoproj.github.io/argo-cd/developer-guide/ci/
|
||||
git diff --exit-code -- . ':!Gopkg.lock' ':!assets/swagger.json' ':!pkg/apis/api-rules/violation_exceptions.list' ':!pkg/apis/application/v1alpha1/openapi_generated.go' | tee codegen.patch
|
||||
- store_artifacts:
|
||||
path: codegen.patch
|
||||
when: always
|
||||
e2e:
|
||||
working_directory: /home/circleci/.go_workspace/src/github.com/argoproj/argo-cd
|
||||
machine:
|
||||
image: circleci/classic:201808-01
|
||||
steps:
|
||||
- run:
|
||||
name: Install and start K3S v0.5.0
|
||||
command: |
|
||||
curl -sfL https://get.k3s.io | sh -
|
||||
sudo chmod -R a+rw /etc/rancher/k3s
|
||||
kubectl version
|
||||
background: true
|
||||
environment:
|
||||
INSTALL_K3S_EXEC: --docker
|
||||
INSTALL_K3S_VERSION: v0.5.0
|
||||
- before
|
||||
- run:
|
||||
# do this before we build everything else in the background, as they tend to explode
|
||||
name: Make CLI
|
||||
command: |
|
||||
set -x
|
||||
make cli
|
||||
# must be added to path for tests
|
||||
echo export PATH="`pwd`/dist:\$PATH" | tee -a $BASH_ENV
|
||||
- run:
|
||||
name: Create namespace
|
||||
command: |
|
||||
set -x
|
||||
kubectl create ns argocd-e2e
|
||||
kubens argocd-e2e
|
||||
# install the certificates (not 100% sure we need this)
|
||||
sudo cp /var/lib/rancher/k3s/server/tls/token-ca.crt /usr/local/share/ca-certificates/k3s.crt
|
||||
sudo update-ca-certificates
|
||||
# create the kubecfg, again - not sure we need this
|
||||
cat /etc/rancher/k3s/k3s.yaml | sed "s/localhost/`hostname`/" | tee ~/.kube/config
|
||||
echo "127.0.0.1 `hostname`" | sudo tee -a /etc/hosts
|
||||
- run:
|
||||
name: Apply manifests
|
||||
command: kustomize build test/manifests/base | kubectl apply -f -
|
||||
- run:
|
||||
name: Start Redis
|
||||
command: docker run --rm --name argocd-redis -i -p 6379:6379 redis:5.0.3-alpine --save "" --appendonly no
|
||||
background: true
|
||||
- run:
|
||||
name: Start repo server
|
||||
command: go run ./cmd/argocd-repo-server/main.go --loglevel debug --redis localhost:6379
|
||||
background: true
|
||||
environment:
|
||||
# pft. if you do not quote "true", CircleCI turns it into "1", stoopid
|
||||
ARGOCD_FAKE_IN_CLUSTER: "true"
|
||||
- run:
|
||||
name: Start API server
|
||||
command: go run ./cmd/argocd-server/main.go --loglevel debug --redis localhost:6379 --insecure --dex-server http://localhost:5556 --repo-server localhost:8081 --staticassets ../argo-cd-ui/dist/app
|
||||
background: true
|
||||
environment:
|
||||
ARGOCD_FAKE_IN_CLUSTER: "true"
|
||||
- run:
|
||||
name: Wait for API server
|
||||
command: |
|
||||
set -x
|
||||
until curl -v http://localhost:8080/healthz; do sleep 3; done
|
||||
- run:
|
||||
name: Start controller
|
||||
command: go run ./cmd/argocd-application-controller/main.go --loglevel debug --redis localhost:6379 --repo-server localhost:8081 --kubeconfig ~/.kube/config
|
||||
background: true
|
||||
environment:
|
||||
ARGOCD_FAKE_IN_CLUSTER: "true"
|
||||
- run:
|
||||
name: Smoke test
|
||||
command: |
|
||||
set -x
|
||||
argocd login localhost:8080 --plaintext --username admin --password password
|
||||
argocd app create guestbook --dest-namespace default --dest-server https://kubernetes.default.svc --repo https://github.com/argoproj/argocd-example-apps.git --path guestbook
|
||||
argocd app sync guestbook
|
||||
argocd app delete guestbook
|
||||
- run:
|
||||
name: Run e2e tests
|
||||
command: |
|
||||
set -x
|
||||
mkdir -p /tmp/test-results
|
||||
trap "go-junit-report </tmp/test-results/go-e2e.out > /tmp/test-results/go-e2e-report.xml" EXIT
|
||||
make test-e2e | tee /tmp/test-results/go-e2e.out
|
||||
environment:
|
||||
ARGOCD_OPTS: "--server localhost:8080 --plaintext"
|
||||
ARGOCD_E2E_EXPECT_TIMEOUT: "30"
|
||||
ARGOCD_E2E_K3S: "true"
|
||||
- store_test_results:
|
||||
path: /tmp/test-results
|
||||
ui:
|
||||
# note that we checkout the code in ~/argo-cd/, but then work in ~/argo-cd/ui
|
||||
working_directory: ~/argo-cd/ui
|
||||
docker:
|
||||
- image: node:11.15.0
|
||||
steps:
|
||||
- checkout:
|
||||
path: ~/argo-cd/
|
||||
- restore_cache:
|
||||
name: Restore Yarn Package Cache
|
||||
keys:
|
||||
- yarn-packages-v3-{{ checksum "yarn.lock" }}
|
||||
- run:
|
||||
name: Install
|
||||
command:
|
||||
yarn install --frozen-lockfile --ignore-optional --non-interactive
|
||||
- save_cache:
|
||||
name: Save Yarn Package Cache
|
||||
key: yarn-packages-v3-{{ checksum "yarn.lock" }}
|
||||
paths:
|
||||
- ~/.cache/yarn
|
||||
- node_modules
|
||||
- run:
|
||||
name: Test
|
||||
command: yarn test
|
||||
# This does not appear to work, and I don't want to spend time on it.
|
||||
- store_test_results:
|
||||
path: junit.xml
|
||||
- run:
|
||||
name: Lint
|
||||
command: yarn lint
|
||||
workflows:
|
||||
version: 2
|
||||
workflow:
|
||||
jobs:
|
||||
- build
|
||||
- e2e
|
||||
- ui:
|
||||
# this isn't strictly true, we just put in here so that we 2/4 executors rather than 3/4
|
||||
requires:
|
||||
- build
|
||||
@@ -5,3 +5,12 @@ ignore:
|
||||
- "pkg/apis/.*"
|
||||
- "pkg/client/.*"
|
||||
- "test/.*"
|
||||
coverage:
|
||||
status:
|
||||
# allow test coverage to drop by 0.1%, assume that it's typically due to CI problems
|
||||
patch:
|
||||
default:
|
||||
threshold: 0.1
|
||||
project:
|
||||
default:
|
||||
threshold: 0.1
|
||||
@@ -10,3 +10,4 @@ dist/
|
||||
cmd/**/debug
|
||||
debug.test
|
||||
coverage.out
|
||||
ui/node_modules/
|
||||
|
||||
22
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -11,7 +11,7 @@ assignees: ''
|
||||
A clear and concise description of what the bug is.
|
||||
|
||||
**To Reproduce**
|
||||
Steps to reproduce the behavior:
|
||||
If we cannot reproduce, we cannot fix! Steps to reproduce the behavior:
|
||||
1. Go to '...'
|
||||
2. Click on '....'
|
||||
3. Scroll down to '....'
|
||||
@@ -23,5 +23,21 @@ A clear and concise description of what you expected to happen.
|
||||
**Screenshots**
|
||||
If applicable, add screenshots to help explain your problem.
|
||||
|
||||
**Additional context**
|
||||
Add any other context about the problem here.
|
||||
**Version**
|
||||
|
||||
```shell
|
||||
Paste the output from `argocd version` here.
|
||||
```
|
||||
|
||||
**Logs**
|
||||
|
||||
```
|
||||
Paste any relevant application logs here.
|
||||
```
|
||||
|
||||
**Have you thought about contributing a fix yourself?**
|
||||
|
||||
Open Source software thrives with your contribution. It not only gives skills you might not be able to get in your day job, it also looks amazing on your resume.
|
||||
|
||||
If you want to get involved, check out the
|
||||
[contributing guide](https://github.com/argoproj/argo-cd/blob/master/docs/CONTRIBUTING.md), then reach out to us on [Slack](https://argoproj.github.io/community/join-slack) so we can see how to get you started.
|
||||
|
||||
9
.github/ISSUE_TEMPLATE/feature_request.md
vendored
@@ -13,8 +13,9 @@ A clear and concise description of what the problem is. Ex. I'm always frustrate
|
||||
**Describe the solution you'd like**
|
||||
A clear and concise description of what you want to happen.
|
||||
|
||||
**Describe alternatives you've considered**
|
||||
A clear and concise description of any alternative solutions or features you've considered.
|
||||
**Have you thought about contributing yourself?**
|
||||
|
||||
**Additional context**
|
||||
Add any other context or screenshots about the feature request here.
|
||||
Open Source software thrives with your contribution. It not only gives skills you might not be able to get in your day job, it also looks amazing on your resume.
|
||||
|
||||
If you want to get involved, check out the
|
||||
[contributing guide](https://github.com/argoproj/argo-cd/blob/master/docs/CONTRIBUTING.md), then reach out to us on [Slack](https://argoproj.github.io/community/join-slack) so we can see how to get you started.
|
||||
|
||||
7
.github/pull_request_template.md
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
<!--
|
||||
Thank you for submitting your PR!
|
||||
|
||||
We'd love your organisation to be listed in the [README](https://github.com/argoproj/argo-cd). Don't forget to add it if you can!
|
||||
|
||||
To troubleshoot builds: https://argoproj.github.io/argo-cd/developer-guide/ci/
|
||||
-->
|
||||
@@ -1,5 +1,5 @@
|
||||
run:
|
||||
deadline: 8m
|
||||
deadline: 2m
|
||||
skip-files:
|
||||
- ".*\\.pb\\.go"
|
||||
skip-dirs:
|
||||
|
||||
50
CHANGELOG.md
@@ -1,38 +1,36 @@
|
||||
# Changelog
|
||||
|
||||
## v1.0.0
|
||||
## v1.0.0 (2019-05-16)
|
||||
|
||||
### New Features
|
||||
|
||||
#### Network View
|
||||
|
||||
TODO
|
||||
A new way to visual application resources had been introduced to the Application Details page. The Network View visualizes connections between Ingresses, Services and Pods
|
||||
based on ingress reference service, service's label selectors and labels. The new view is useful to understand the application traffic flow and troubleshot connectivity issues.
|
||||
|
||||
#### Custom Actions
|
||||
|
||||
Argo CD introduces Custom Resource Actions to allow users to provide their own Lua scripts to modify existing Kubernetes resources in their applications. These actions are exposed in the UI to allow easy, safe, and reliable changes to their resources. This functionality can be used to introduce functionality such as suspending and enabling a Kubernetes cronjob, continue a BlueGreen deployment with Argo Rollouts, or scaling a deployment.
|
||||
|
||||
#### UI Enhancements
|
||||
#### UI Enhancements & Usability Enhancements
|
||||
|
||||
* New color palette intended to highlight unhealthily and out-of-sync resources more clearly.
|
||||
* The health of more resources is displayed, so it easier to quickly zoom to unhealthy pods, replica-sets, etc.
|
||||
* Resources that do not have health no longer appear to be healthy.
|
||||
* Support for configuring Git repo credentials at a domain/org level
|
||||
* Support for configuring requested OIDC provider scopes and enforced RBAC scopes
|
||||
* Support for configuring monitored resources whitelist in addition to excluded resources
|
||||
|
||||
### Breaking Changes
|
||||
|
||||
* Remove deprecated componentParameterOverrides field #1372
|
||||
|
||||
### Changes since v0.12.1
|
||||
|
||||
#### New Features
|
||||
|
||||
+ Issue #357 - Expose application nodes networking information (#1333)
|
||||
+ Support for customizable resource actions as Lua scripts #86
|
||||
+ Surface Service/Ingress external IPs, hostname to application #908
|
||||
+ Update argocd-util import/export to support proper backup and restore (#1328)
|
||||
### Changes since v0.12.2
|
||||
|
||||
#### Enhancements
|
||||
|
||||
* `argocd app wait` should have `--resource` flag like sync #1206
|
||||
* Adds support for `kustomize edit set image`. Closes #1275 (#1324)
|
||||
* Allow wait to return on health or suspended (#1392)
|
||||
* Application warning when a manifest is defined twice #1070
|
||||
@@ -41,31 +39,49 @@ Argo CD introduces Custom Resource Actions to allow users to provide their own L
|
||||
* Display number of errors on resource tab #1477
|
||||
* Displays resources that are being deleted as "Progressing". Closes #1410 (#1426)
|
||||
* Generate random name for grpc proxy unix socket file instead of time stamp (#1455)
|
||||
* Issue #357 - Expose application nodes networking information (#1333)
|
||||
* Issue #1404 - App controller unnecessary set namespace to cluster level resources (#1405)
|
||||
* Nils health if the resource does not provide it. Closes #1383 (#1408)
|
||||
* Perform health assessments on all resource nodes in the tree. Closes #1382 (#1422)
|
||||
* Remove deprecated componentParameterOverrides field #1372
|
||||
* Shows the health of the application. Closes #1433 (#1434)
|
||||
* Surface Service/Ingress external IPs, hostname to application #908
|
||||
* Surface pod status to tree view #1358
|
||||
* Support for customizable resource actions as Lua scripts #86
|
||||
* UI / API Errors Truncated, Time Out #1386
|
||||
* UI Enhancement Proposals Quick Wins #1274
|
||||
* Update argocd-util import/export to support proper backup and restore (#1328)
|
||||
* Whitelisting repos/clusters in projects should consider repo/cluster permissions #1432
|
||||
* Adds support for configuring repo creds at a domain/org level. (#1332)
|
||||
* Implement whitelist option analogous to `resource.exclusions` (#1490)
|
||||
* Added ability to sync specific labels from the command line (#1241)
|
||||
* Improve rendering app image information (#1552)
|
||||
* Add liveness probe to repo server/api servers (#1546)
|
||||
* Support configuring requested OIDC provider scopes and enforced RBAC scopes (#1471)
|
||||
|
||||
#### Bug Fixes
|
||||
|
||||
- "bind: address already in use" after switching to gRPC-Web #1451
|
||||
- Annoying warning while using `--grpc-web` flag #1420
|
||||
- Don't compare secrets in the CLI, since argo-cd doesn't have access to their data (#1459)
|
||||
- Dropdown menu should not have sync item for unmanaged resources #1357
|
||||
- Fixes goroutine leak. Closes #1381 (#1457)
|
||||
- Improve input style #1217
|
||||
- Issue #1389 - Fix null pointer exception in secret normalization function (#1428)
|
||||
- Issue #1425 - Argo CD should not delete CRDs (#1428)
|
||||
- Issue #1446 - Delete helm temp directories (#1449)
|
||||
- Issue #908 - Surface Service/Ingress external IPs, hostname to application (#1347)
|
||||
- kustomization fields are all mandatory #1504
|
||||
- Resource node details is crashing if live resource is missing $1505
|
||||
- Rollback UI is not showing correct ksonnet parameters in preview #1326
|
||||
- See details of applications fails with "r.nodes is undefined" #1371
|
||||
- Unable to create app from private repo: x509: certificate signed by unknown authority #1171
|
||||
- UI fails to load custom actions is resource is not deployed #1502
|
||||
- Unable to create app from private repo: x509: certificate signed by unknown authority (#1171)
|
||||
- Fix hardcoded 'git' user in `util/git.NewClient` (#1555)
|
||||
- Application controller becomes unresponsive (#1476)
|
||||
- Load target resource using K8S if conversion fails (#1414)
|
||||
- Can't ignore a non-existent pointer anymore (#1586)
|
||||
- Impossible to sync to HEAD from UI if auto-sync is enabled (#1579)
|
||||
- Application controller is unable to delete self-referenced app (#1570)
|
||||
- Prevent reconciliation loop for self-managed apps (#1533)
|
||||
- Controller incorrectly report health state of self managed application (#1557)
|
||||
- Fix kustomize manifest generation crash is manifest has image without version (#1540)
|
||||
- Supply resourceVersion to watch request to prevent reading of stale cache (#1605)
|
||||
|
||||
## v0.12.2 (2019-04-22)
|
||||
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
ARG BASE_IMAGE=debian:9.5-slim
|
||||
####################################################################################################
|
||||
# Builder image
|
||||
# Initial stage which pulls prepares build dependencies and CLI tooling we need for our final image
|
||||
@@ -93,7 +94,9 @@ RUN cd ${GOPATH}/src/dummy && \
|
||||
####################################################################################################
|
||||
# Argo CD Base - used as the base for both the release and dev argocd images
|
||||
####################################################################################################
|
||||
FROM debian:9.5-slim as argocd-base
|
||||
FROM $BASE_IMAGE as argocd-base
|
||||
|
||||
USER root
|
||||
|
||||
RUN groupadd -g 999 argocd && \
|
||||
useradd -r -u 999 -g argocd argocd && \
|
||||
|
||||
175
Gopkg.lock
generated
@@ -70,14 +70,15 @@
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:e8ec0abbf32fdcc9f7eb14c0656c1d0fc2fc7ec8f60dff4b7ac080c50afd8e49"
|
||||
digest = "1:4f6afcf4ebe041b3d4aa7926d09344b48d2f588e1f957526bbbe54f9cbb366a1"
|
||||
name = "github.com/argoproj/pkg"
|
||||
packages = [
|
||||
"exec",
|
||||
"rand",
|
||||
"time",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "88ab0e836a8e8c70bc297c5764669bd7da27afd1"
|
||||
revision = "38dba6e98495680ff1f8225642b63db10a96bb06"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:d8a2bb36a048d1571bcc1aee208b61f39dc16c6c53823feffd37449dde162507"
|
||||
@@ -417,14 +418,6 @@
|
||||
revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5"
|
||||
version = "v1.2.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:1e5b1e14524ed08301977b7b8e10c719ed853cbf3f24ecb66fae783a46f207a6"
|
||||
name = "github.com/google/btree"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "4030bb1f1f0c35b30ca7009e9ebd06849dd45306"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:14d826ee25139b4674e9768ac287a135f4e7c14e1134a5b15e4e152edfd49f41"
|
||||
name = "github.com/google/go-jsonnet"
|
||||
@@ -472,17 +465,6 @@
|
||||
revision = "66b9c49e59c6c48f0ffce28c2d8b8a5678502c6d"
|
||||
version = "v1.4.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:009a1928b8c096338b68b5822d838a72b4d8520715c1463614476359f3282ec8"
|
||||
name = "github.com/gregjones/httpcache"
|
||||
packages = [
|
||||
".",
|
||||
"diskcache",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "9cad4c3443a7200dd6400aef47183728de563a38"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:9dca8c981b8aed7448d94e78bc68a76784867a38b3036d5aabc0b32d92ffd1f4"
|
||||
@@ -680,22 +662,6 @@
|
||||
revision = "c37440a7cf42ac63b919c752ca73a85067e05992"
|
||||
version = "v0.2.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:c24598ffeadd2762552269271b3b1510df2d83ee6696c1e543a0ff653af494bc"
|
||||
name = "github.com/petar/GoLLRB"
|
||||
packages = ["llrb"]
|
||||
pruneopts = ""
|
||||
revision = "53be0d36a84c2a886ca057d34b6aa4468df9ccb4"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:b46305723171710475f2dd37547edd57b67b9de9f2a6267cafdd98331fd6897f"
|
||||
name = "github.com/peterbourgon/diskv"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "5f041e8faa004a95c88a202771f4cc3e991971e6"
|
||||
version = "v2.0.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:7365acd48986e205ccb8652cc746f09c8b7876030d53710ea6ef7d0bd0dcd7ca"
|
||||
name = "github.com/pkg/errors"
|
||||
@@ -1033,6 +999,39 @@
|
||||
pruneopts = ""
|
||||
revision = "5e776fee60db37e560cee3fb46db699d2f095386"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:e9e4b928898842a138bc345d42aae33741baa6d64f3ca69b0931f9c7a4fd0437"
|
||||
name = "gonum.org/v1/gonum"
|
||||
packages = [
|
||||
"blas",
|
||||
"blas/blas64",
|
||||
"blas/cblas128",
|
||||
"blas/gonum",
|
||||
"floats",
|
||||
"graph",
|
||||
"graph/internal/linear",
|
||||
"graph/internal/ordered",
|
||||
"graph/internal/set",
|
||||
"graph/internal/uid",
|
||||
"graph/iterator",
|
||||
"graph/simple",
|
||||
"graph/topo",
|
||||
"graph/traverse",
|
||||
"internal/asm/c128",
|
||||
"internal/asm/c64",
|
||||
"internal/asm/f32",
|
||||
"internal/asm/f64",
|
||||
"internal/cmplx64",
|
||||
"internal/math32",
|
||||
"lapack",
|
||||
"lapack/gonum",
|
||||
"lapack/lapack64",
|
||||
"mat",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "90b7154515874cee6c33cf56b29e257403a09a69"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:934fb8966f303ede63aa405e2c8d7f0a427a05ea8df335dfdc1833dd4d40756f"
|
||||
name = "google.golang.org/appengine"
|
||||
@@ -1225,16 +1224,16 @@
|
||||
version = "v2.2.2"
|
||||
|
||||
[[projects]]
|
||||
branch = "release-1.12"
|
||||
digest = "1:3e3e9df293bd6f9fd64effc9fa1f0edcd97e6c74145cd9ab05d35719004dc41f"
|
||||
branch = "release-1.14"
|
||||
digest = "1:d8a6f1ec98713e685346a2e4b46c6ec4a1792a5535f8b0dffe3b1c08c9d69b12"
|
||||
name = "k8s.io/api"
|
||||
packages = [
|
||||
"admission/v1beta1",
|
||||
"admissionregistration/v1alpha1",
|
||||
"admissionregistration/v1beta1",
|
||||
"apps/v1",
|
||||
"apps/v1beta1",
|
||||
"apps/v1beta2",
|
||||
"auditregistration/v1alpha1",
|
||||
"authentication/v1",
|
||||
"authentication/v1beta1",
|
||||
"authorization/v1",
|
||||
@@ -1246,16 +1245,21 @@
|
||||
"batch/v1beta1",
|
||||
"batch/v2alpha1",
|
||||
"certificates/v1beta1",
|
||||
"coordination/v1",
|
||||
"coordination/v1beta1",
|
||||
"core/v1",
|
||||
"events/v1beta1",
|
||||
"extensions/v1beta1",
|
||||
"imagepolicy/v1alpha1",
|
||||
"networking/v1",
|
||||
"networking/v1beta1",
|
||||
"node/v1alpha1",
|
||||
"node/v1beta1",
|
||||
"policy/v1beta1",
|
||||
"rbac/v1",
|
||||
"rbac/v1alpha1",
|
||||
"rbac/v1beta1",
|
||||
"scheduling/v1",
|
||||
"scheduling/v1alpha1",
|
||||
"scheduling/v1beta1",
|
||||
"settings/v1alpha1",
|
||||
@@ -1264,7 +1268,7 @@
|
||||
"storage/v1beta1",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "6db15a15d2d3874a6c3ddb2140ac9f3bc7058428"
|
||||
revision = "40a48860b5abbba9aa891b02b32da429b08d96a0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@@ -1273,14 +1277,13 @@
|
||||
packages = [
|
||||
"pkg/apis/apiextensions",
|
||||
"pkg/apis/apiextensions/v1beta1",
|
||||
"pkg/features",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "7f7d2b94eca3a7a1c49840e119a8bc03c3afb1e3"
|
||||
|
||||
[[projects]]
|
||||
branch = "release-1.12"
|
||||
digest = "1:5899da40e41bcc8c1df101b72954096bba9d85b763bc17efc846062ccc111c7b"
|
||||
branch = "release-1.14"
|
||||
digest = "1:a802c91b189a31200cfb66744441fe62dac961ec7c5c58c47716570de7da195c"
|
||||
name = "k8s.io/apimachinery"
|
||||
packages = [
|
||||
"pkg/api/equality",
|
||||
@@ -1332,22 +1335,11 @@
|
||||
"third_party/forked/golang/reflect",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "f71dbbc36e126f5a371b85f6cca96bc8c57db2b6"
|
||||
revision = "6a84e37a896db9780c75367af8d2ed2bb944022e"
|
||||
|
||||
[[projects]]
|
||||
branch = "release-1.12"
|
||||
digest = "1:b2c55ff9df6d053e40094b943f949c257c3f7dcdbb035c11487c93c96df9eade"
|
||||
name = "k8s.io/apiserver"
|
||||
packages = [
|
||||
"pkg/features",
|
||||
"pkg/util/feature",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "5e1c1f41ee34b3bb153f928f8c91c2a6dd9482a9"
|
||||
|
||||
[[projects]]
|
||||
branch = "release-9.0"
|
||||
digest = "1:77bf3d9f18ec82e08ac6c4c7e2d9d1a2ef8d16b25d3ff72fcefcf9256d751573"
|
||||
branch = "release-11.0"
|
||||
digest = "1:794140b3ac07405646ea3d4a57e1f6155186e672aed8aa0c996779381cd92fe6"
|
||||
name = "k8s.io/client-go"
|
||||
packages = [
|
||||
"discovery",
|
||||
@@ -1359,8 +1351,6 @@
|
||||
"kubernetes",
|
||||
"kubernetes/fake",
|
||||
"kubernetes/scheme",
|
||||
"kubernetes/typed/admissionregistration/v1alpha1",
|
||||
"kubernetes/typed/admissionregistration/v1alpha1/fake",
|
||||
"kubernetes/typed/admissionregistration/v1beta1",
|
||||
"kubernetes/typed/admissionregistration/v1beta1/fake",
|
||||
"kubernetes/typed/apps/v1",
|
||||
@@ -1369,6 +1359,8 @@
|
||||
"kubernetes/typed/apps/v1beta1/fake",
|
||||
"kubernetes/typed/apps/v1beta2",
|
||||
"kubernetes/typed/apps/v1beta2/fake",
|
||||
"kubernetes/typed/auditregistration/v1alpha1",
|
||||
"kubernetes/typed/auditregistration/v1alpha1/fake",
|
||||
"kubernetes/typed/authentication/v1",
|
||||
"kubernetes/typed/authentication/v1/fake",
|
||||
"kubernetes/typed/authentication/v1beta1",
|
||||
@@ -1391,6 +1383,8 @@
|
||||
"kubernetes/typed/batch/v2alpha1/fake",
|
||||
"kubernetes/typed/certificates/v1beta1",
|
||||
"kubernetes/typed/certificates/v1beta1/fake",
|
||||
"kubernetes/typed/coordination/v1",
|
||||
"kubernetes/typed/coordination/v1/fake",
|
||||
"kubernetes/typed/coordination/v1beta1",
|
||||
"kubernetes/typed/coordination/v1beta1/fake",
|
||||
"kubernetes/typed/core/v1",
|
||||
@@ -1401,6 +1395,12 @@
|
||||
"kubernetes/typed/extensions/v1beta1/fake",
|
||||
"kubernetes/typed/networking/v1",
|
||||
"kubernetes/typed/networking/v1/fake",
|
||||
"kubernetes/typed/networking/v1beta1",
|
||||
"kubernetes/typed/networking/v1beta1/fake",
|
||||
"kubernetes/typed/node/v1alpha1",
|
||||
"kubernetes/typed/node/v1alpha1/fake",
|
||||
"kubernetes/typed/node/v1beta1",
|
||||
"kubernetes/typed/node/v1beta1/fake",
|
||||
"kubernetes/typed/policy/v1beta1",
|
||||
"kubernetes/typed/policy/v1beta1/fake",
|
||||
"kubernetes/typed/rbac/v1",
|
||||
@@ -1409,6 +1409,8 @@
|
||||
"kubernetes/typed/rbac/v1alpha1/fake",
|
||||
"kubernetes/typed/rbac/v1beta1",
|
||||
"kubernetes/typed/rbac/v1beta1/fake",
|
||||
"kubernetes/typed/scheduling/v1",
|
||||
"kubernetes/typed/scheduling/v1/fake",
|
||||
"kubernetes/typed/scheduling/v1alpha1",
|
||||
"kubernetes/typed/scheduling/v1alpha1/fake",
|
||||
"kubernetes/typed/scheduling/v1beta1",
|
||||
@@ -1445,23 +1447,22 @@
|
||||
"tools/remotecommand",
|
||||
"transport",
|
||||
"transport/spdy",
|
||||
"util/buffer",
|
||||
"util/cert",
|
||||
"util/connrotation",
|
||||
"util/exec",
|
||||
"util/flowcontrol",
|
||||
"util/homedir",
|
||||
"util/integer",
|
||||
"util/jsonpath",
|
||||
"util/keyutil",
|
||||
"util/retry",
|
||||
"util/workqueue",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "13596e875accbd333e0b5bd5fd9462185acd9958"
|
||||
revision = "11646d1007e006f6f24995cb905c68bc62901c81"
|
||||
|
||||
[[projects]]
|
||||
branch = "release-1.12"
|
||||
digest = "1:8108815d1aef9159daabdb3f0fcef04a88765536daf0c0cd29a31fdba135ee54"
|
||||
branch = "release-1.14"
|
||||
digest = "1:742ce70d2c6de0f02b5331a25d4d549f55de6b214af22044455fd6e6b451cad9"
|
||||
name = "k8s.io/code-generator"
|
||||
packages = [
|
||||
"cmd/go-to-protobuf",
|
||||
@@ -1470,7 +1471,7 @@
|
||||
"third_party/forked/golang/reflect",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "b1289fc74931d4b6b04bd1a259acfc88a2cb0a66"
|
||||
revision = "50b561225d70b3eb79a1faafd3dfe7b1a62cbe73"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@@ -1488,15 +1489,15 @@
|
||||
revision = "e17681d19d3ac4837a019ece36c2a0ec31ffe985"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:4f5eb833037cc0ba0bf8fe9cae6be9df62c19dd1c869415275c708daa8ccfda5"
|
||||
digest = "1:9eaf86f4f6fb4a8f177220d488ef1e3255d06a691cca95f14ef085d4cd1cef3c"
|
||||
name = "k8s.io/klog"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "a5bc97fbc634d635061f3146511332c7e313a55a"
|
||||
version = "v0.1.0"
|
||||
revision = "d98d8acdac006fb39831f1b25640813fef9c314f"
|
||||
version = "v0.3.3"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:324305490fa0113cac244a48e648bf8a566f0a6e5e25c498e8c033a0b4e23c96"
|
||||
digest = "1:42ea993b351fdd39b9aad3c9ebe71f2fdb5d1f8d12eed24e71c3dff1a31b2a43"
|
||||
name = "k8s.io/kube-openapi"
|
||||
packages = [
|
||||
"cmd/openapi-gen",
|
||||
@@ -1508,10 +1509,11 @@
|
||||
"pkg/util/sets",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "master"
|
||||
revision = "411b2483e5034420675ebcdd4a55fc76fe5e55cf"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:6061aa42761235df375f20fa4a1aa6d1845cba3687575f3adb2ef3f3bc540af5"
|
||||
branch = "release-1.14"
|
||||
digest = "1:78aa6079e011ece0d28513c7fe1bd64284fa9eb5d671760803a839ffdf0e9e38"
|
||||
name = "k8s.io/kubernetes"
|
||||
packages = [
|
||||
"pkg/api/v1/pod",
|
||||
@@ -1519,19 +1521,25 @@
|
||||
"pkg/apis/autoscaling",
|
||||
"pkg/apis/batch",
|
||||
"pkg/apis/core",
|
||||
"pkg/apis/extensions",
|
||||
"pkg/apis/networking",
|
||||
"pkg/apis/policy",
|
||||
"pkg/features",
|
||||
"pkg/kubectl/scheme",
|
||||
"pkg/kubectl/util/term",
|
||||
"pkg/kubelet/apis",
|
||||
"pkg/util/interrupt",
|
||||
"pkg/util/node",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "17c77c7898218073f14c8d573582e8d2313dc740"
|
||||
version = "v1.12.2"
|
||||
revision = "2d20b5759406ded89f8b25cf085ff4733b144ba5"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:4c5d39f7ca1c940d7e74dbc62d2221e2c59b3d35c54f1fa9c77f3fd3113bbcb1"
|
||||
name = "k8s.io/utils"
|
||||
packages = [
|
||||
"buffer",
|
||||
"integer",
|
||||
"trace",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "c55fbcfc754a5b2ec2fbae8fb9dcac36bdba6a12"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@@ -1541,6 +1549,14 @@
|
||||
pruneopts = ""
|
||||
revision = "97fed8db84274c421dbfffbb28ec859901556b97"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:321081b4a44256715f2b68411d8eda9a17f17ebfe6f0cc61d2cc52d11c08acfa"
|
||||
name = "sigs.k8s.io/yaml"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "fd68e9863619f6ec2fdd8625fe1f02e7c877e480"
|
||||
version = "v1.1.0"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
@@ -1671,6 +1687,7 @@
|
||||
"k8s.io/client-go/util/flowcontrol",
|
||||
"k8s.io/client-go/util/workqueue",
|
||||
"k8s.io/code-generator/cmd/go-to-protobuf",
|
||||
"k8s.io/klog",
|
||||
"k8s.io/kube-openapi/cmd/openapi-gen",
|
||||
"k8s.io/kube-openapi/pkg/common",
|
||||
"k8s.io/kubernetes/pkg/api/v1/pod",
|
||||
|
||||
24
Gopkg.toml
@@ -35,16 +35,24 @@ required = [
|
||||
name = "github.com/prometheus/client_golang"
|
||||
revision = "7858729281ec582767b20e0d696b6041d995d5e0"
|
||||
|
||||
[[constraint]]
|
||||
branch = "release-1.12"
|
||||
[[override]]
|
||||
branch = "release-1.14"
|
||||
name = "k8s.io/api"
|
||||
|
||||
[[constraint]]
|
||||
branch = "release-1.12"
|
||||
[[override]]
|
||||
branch = "release-1.14"
|
||||
name = "k8s.io/kubernetes"
|
||||
|
||||
[[override]]
|
||||
branch = "release-1.14"
|
||||
name = "k8s.io/code-generator"
|
||||
|
||||
[[constraint]]
|
||||
branch = "release-9.0"
|
||||
[[override]]
|
||||
branch = "release-1.14"
|
||||
name = "k8s.io/apimachinery"
|
||||
|
||||
[[override]]
|
||||
branch = "release-11.0"
|
||||
name = "k8s.io/client-go"
|
||||
|
||||
[[constraint]]
|
||||
@@ -63,6 +71,8 @@ required = [
|
||||
branch = "master"
|
||||
name = "github.com/yudai/gojsondiff"
|
||||
|
||||
# TODO: move off of k8s.io/kube-openapi and use controller-tools for CRD spec generation
|
||||
# (override argoproj/argo contraint on master)
|
||||
[[override]]
|
||||
revision = "master"
|
||||
revision = "411b2483e5034420675ebcdd4a55fc76fe5e55cf"
|
||||
name = "k8s.io/kube-openapi"
|
||||
|
||||
39
Makefile
@@ -1,4 +1,4 @@
|
||||
PACKAGE=github.com/argoproj/argo-cd
|
||||
PACKAGE=github.com/argoproj/argo-cd/common
|
||||
CURRENT_DIR=$(shell pwd)
|
||||
DIST_DIR=${CURRENT_DIR}/dist
|
||||
CLI_NAME=argocd
|
||||
@@ -9,15 +9,19 @@ GIT_COMMIT=$(shell git rev-parse HEAD)
|
||||
GIT_TAG=$(shell if [ -z "`git status --porcelain`" ]; then git describe --exact-match --tags HEAD 2>/dev/null; fi)
|
||||
GIT_TREE_STATE=$(shell if [ -z "`git status --porcelain`" ]; then echo "clean" ; else echo "dirty"; fi)
|
||||
PACKR_CMD=$(shell if [ "`which packr`" ]; then echo "packr"; else echo "go run vendor/github.com/gobuffalo/packr/packr/main.go"; fi)
|
||||
TEST_CMD=$(shell [ "`which gotestsum`" != "" ] && echo gotestsum -- || echo go test)
|
||||
|
||||
PATH:=$(PATH):$(PWD)/hack
|
||||
|
||||
# docker image publishing options
|
||||
DOCKER_PUSH=false
|
||||
IMAGE_TAG=latest
|
||||
DOCKER_PUSH?=false
|
||||
IMAGE_TAG?=latest
|
||||
# perform static compilation
|
||||
STATIC_BUILD=true
|
||||
STATIC_BUILD?=true
|
||||
# build development images
|
||||
DEV_IMAGE=false
|
||||
DEV_IMAGE?=false
|
||||
# lint is memory and CPU intensive, so we can limit on CI to mitigate OOM
|
||||
LINT_GOGC?=off
|
||||
LINT_CONCURRENCY?=8
|
||||
|
||||
override LDFLAGS += \
|
||||
-X ${PACKAGE}.version=${VERSION} \
|
||||
@@ -60,7 +64,7 @@ clientgen:
|
||||
./hack/update-codegen.sh
|
||||
|
||||
.PHONY: codegen
|
||||
codegen: protogen clientgen openapigen
|
||||
codegen: protogen clientgen openapigen manifests
|
||||
|
||||
.PHONY: cli
|
||||
cli: clean-debug
|
||||
@@ -87,7 +91,7 @@ manifests:
|
||||
.PHONY: server
|
||||
server: clean-debug
|
||||
CGO_ENABLED=0 ${PACKR_CMD} build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-server ./cmd/argocd-server
|
||||
|
||||
|
||||
.PHONY: repo-server
|
||||
repo-server:
|
||||
CGO_ENABLED=0 go build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argocd-repo-server ./cmd/argocd-repo-server
|
||||
@@ -132,26 +136,33 @@ dep-ensure:
|
||||
|
||||
.PHONY: lint
|
||||
lint:
|
||||
golangci-lint run --fix
|
||||
# golangci-lint does not do a good job of formatting imports
|
||||
goimports -local github.com/argoproj/argo-cd -w `find . ! -path './vendor/*' ! -path './pkg/client/*' -type f -name '*.go'`
|
||||
GOGC=$(LINT_GOGC) golangci-lint run --fix --verbose --concurrency $(LINT_CONCURRENCY)
|
||||
|
||||
.PHONY: build
|
||||
build:
|
||||
go build `go list ./... | grep -v resource_customizations`
|
||||
go build -v `go list ./... | grep -v 'resource_customizations\|test/e2e'`
|
||||
|
||||
.PHONY: test
|
||||
test:
|
||||
$(TEST_CMD) -covermode=count -coverprofile=coverage.out `go list ./... | grep -v "github.com/argoproj/argo-cd/test/e2e"`
|
||||
go test -v -covermode=count -coverprofile=coverage.out `go list ./... | grep -v "test/e2e"`
|
||||
|
||||
.PHONY: cover
|
||||
cover:
|
||||
go tool cover -html=coverage.out
|
||||
|
||||
.PHONY: test-e2e
|
||||
test-e2e: cli
|
||||
$(TEST_CMD) -v -failfast -timeout 20m ./test/e2e
|
||||
go test -v -timeout 10m ./test/e2e
|
||||
|
||||
.PHONY: start-e2e
|
||||
start-e2e: cli
|
||||
killall goreman || true
|
||||
kubectl create ns argocd-e2e || true
|
||||
kubens argocd-e2e
|
||||
kustomize build test/manifests/base | kubectl apply -f -
|
||||
make start
|
||||
goreman start
|
||||
|
||||
# Cleans VSCode debug.test files from sub-dirs to prevent them from being included in packr boxes
|
||||
.PHONY: clean-debug
|
||||
@@ -178,4 +189,4 @@ release-precheck: manifests
|
||||
@if [ "$(GIT_TAG)" != "v`cat VERSION`" ]; then echo 'VERSION does not match git tag'; exit 1; fi
|
||||
|
||||
.PHONY: release
|
||||
release: release-precheck precheckin image release-cli
|
||||
release: release-precheck pre-commit image release-cli
|
||||
|
||||
7
Procfile
@@ -1,5 +1,6 @@
|
||||
controller: sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true go run ./cmd/argocd-application-controller/main.go --loglevel debug --redis localhost:6379 --repo-server localhost:8081"
|
||||
api-server: sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true go run ./cmd/argocd-server/main.go --loglevel debug --redis localhost:6379 --disable-auth --insecure --dex-server http://localhost:5556 --repo-server localhost:8081 --staticassets ../argo-cd-ui/dist/app"
|
||||
repo-server: sh -c "FORCE_LOG_COLORS=1 go run ./cmd/argocd-repo-server/main.go --loglevel debug --redis localhost:6379"
|
||||
api-server: sh -c "FORCE_LOG_COLORS=1 ARGOCD_FAKE_IN_CLUSTER=true go run ./cmd/argocd-server/main.go --loglevel debug --redis localhost:6379 --disable-auth --insecure --dex-server http://localhost:5556 --repo-server localhost:8081 --staticassets ui/dist/app"
|
||||
dex: sh -c "go run ./cmd/argocd-util/main.go gendexcfg -o `pwd`/dist/dex.yaml && docker run --rm -p 5556:5556 -v `pwd`/dist/dex.yaml:/dex.yaml quay.io/dexidp/dex:v2.14.0 serve /dex.yaml"
|
||||
redis: docker run --rm -i -p 6379:6379 redis:5.0.3-alpine --save "" --appendonly no
|
||||
redis: docker run --rm --name argocd-redis -i -p 6379:6379 redis:5.0.3-alpine --save "" --appendonly no
|
||||
repo-server: sh -c "FORCE_LOG_COLORS=1 go run ./cmd/argocd-repo-server/main.go --loglevel debug --redis localhost:6379"
|
||||
ui: sh -c 'cd ui && yarn start'
|
||||
22
README.md
@@ -19,13 +19,23 @@ Application deployment and lifecycle management should be automated, auditable,
|
||||
|
||||
Organizations below are **officially** using Argo CD. Please send a PR with your organization name if you are using Argo CD.
|
||||
|
||||
1. [Codility](https://www.codility.com/)
|
||||
1. [Commonbond](https://commonbond.co/)
|
||||
1. [CyberAgent](https://www.cyberagent.co.jp/en/)
|
||||
1. [END.](https://www.endclothing.com/)
|
||||
1. [GMETRI](https://gmetri.com/)
|
||||
1. [Intuit](https://www.intuit.com/)
|
||||
2. [KompiTech GmbH](https://www.kompitech.com/)
|
||||
3. [Yieldlab](https://www.yieldlab.de/)
|
||||
4. [Ticketmaster](https://ticketmaster.com)
|
||||
5. [CyberAgent](https://www.cyberagent.co.jp/en/)
|
||||
6. [OpenSaaS Studio](https://opensaas.studio)
|
||||
7. [Riskified](https://www.riskified.com/)
|
||||
1. [KintoHub](https://www.kintohub.com/)
|
||||
1. [KompiTech GmbH](https://www.kompitech.com/)
|
||||
1. [Mirantis](https://mirantis.com/)
|
||||
1. [OpenSaaS Studio](https://opensaas.studio)
|
||||
1. [Optoro](https://www.optoro.com/)
|
||||
1. [Riskified](https://www.riskified.com/)
|
||||
1. [Tesla](https://tesla.com/)
|
||||
1. [tZERO](https://www.tzero.com/)
|
||||
1. [Ticketmaster](https://ticketmaster.com)
|
||||
1. [Yieldlab](https://www.yieldlab.de/)
|
||||
1. [Volvo Cars](https://www.volvocars.com/)
|
||||
|
||||
## Documentation
|
||||
|
||||
|
||||
@@ -15,6 +15,7 @@ p, role:admin, applications, create, */*, allow
|
||||
p, role:admin, applications, update, */*, allow
|
||||
p, role:admin, applications, delete, */*, allow
|
||||
p, role:admin, applications, sync, */*, allow
|
||||
p, role:admin, applications, override, */*, allow
|
||||
p, role:admin, clusters, create, *, allow
|
||||
p, role:admin, clusters, update, *, allow
|
||||
p, role:admin, clusters, delete, *, allow
|
||||
|
||||
|
@@ -68,6 +68,11 @@
|
||||
},
|
||||
"name": "project",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"name": "resourceVersion",
|
||||
"in": "query"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
@@ -212,6 +217,11 @@
|
||||
},
|
||||
"name": "project",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"name": "resourceVersion",
|
||||
"in": "query"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
@@ -777,33 +787,6 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/v1/clusters-kubeconfig": {
|
||||
"post": {
|
||||
"tags": [
|
||||
"ClusterService"
|
||||
],
|
||||
"summary": "CreateFromKubeConfig installs the argocd-manager service account into the cluster specified in the given kubeconfig and context",
|
||||
"operationId": "CreateFromKubeConfig",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "body",
|
||||
"in": "body",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"$ref": "#/definitions/clusterClusterCreateFromKubeConfigRequest"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "(empty)",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1alpha1Cluster"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/v1/clusters/{cluster.server}": {
|
||||
"put": {
|
||||
"tags": [
|
||||
@@ -885,6 +868,31 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/v1/clusters/{server}/rotate-auth": {
|
||||
"post": {
|
||||
"tags": [
|
||||
"ClusterService"
|
||||
],
|
||||
"summary": "RotateAuth returns a cluster by server address",
|
||||
"operationId": "RotateAuth",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"name": "server",
|
||||
"in": "path",
|
||||
"required": true
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "(empty)",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/clusterClusterResponse"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/v1/projects": {
|
||||
"get": {
|
||||
"tags": [
|
||||
@@ -1387,6 +1395,11 @@
|
||||
},
|
||||
"name": "project",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"name": "resourceVersion",
|
||||
"in": "query"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
@@ -1483,6 +1496,12 @@
|
||||
"type": "boolean",
|
||||
"format": "boolean"
|
||||
},
|
||||
"manifests": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
@@ -1540,25 +1559,6 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"clusterClusterCreateFromKubeConfigRequest": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"context": {
|
||||
"type": "string"
|
||||
},
|
||||
"inCluster": {
|
||||
"type": "boolean",
|
||||
"format": "boolean"
|
||||
},
|
||||
"kubeconfig": {
|
||||
"type": "string"
|
||||
},
|
||||
"upsert": {
|
||||
"type": "boolean",
|
||||
"format": "boolean"
|
||||
}
|
||||
}
|
||||
},
|
||||
"clusterClusterResponse": {
|
||||
"type": "object"
|
||||
},
|
||||
@@ -1598,6 +1598,12 @@
|
||||
},
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"scopes": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -2002,6 +2008,19 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1Fields": {
|
||||
"type": "object",
|
||||
"title": "Fields stores a set of fields in a data structure like a Trie.\nTo understand how this is used, see: https://github.com/kubernetes-sigs/structured-merge-diff",
|
||||
"properties": {
|
||||
"map": {
|
||||
"description": "Map stores a set of fields in a data structure like a Trie.\n\nEach key is either a '.' representing the field itself, and will always map to an empty set,\nor a string representing a sub-field or item. The string will follow one of these four formats:\n'f:<name>', where <name> is the name of a field in a struct, or key in a map\n'v:<value>', where <value> is the exact json formatted value of a list item\n'i:<index>', where <index> is position of a item in a list\n'k:<keys>', where <keys> is a map of a list item's key fields to their unique values\nIf a key maps to an empty Fields value, the field that key represents is part of the set.\n\nThe exact format is defined in k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal",
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"$ref": "#/definitions/v1Fields"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1GroupKind": {
|
||||
"description": "+protobuf.options.(gogoproto.goproto_stringer)=false",
|
||||
"type": "object",
|
||||
@@ -2073,6 +2092,30 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1ManagedFieldsEntry": {
|
||||
"description": "ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource\nthat the fieldset applies to.",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"apiVersion": {
|
||||
"description": "APIVersion defines the version of this resource that this field set\napplies to. The format is \"group/version\" just like the top-level\nAPIVersion field. It is necessary to track the version of a field\nset because it cannot be automatically converted.",
|
||||
"type": "string"
|
||||
},
|
||||
"fields": {
|
||||
"$ref": "#/definitions/v1Fields"
|
||||
},
|
||||
"manager": {
|
||||
"description": "Manager is an identifier of the workflow managing these fields.",
|
||||
"type": "string"
|
||||
},
|
||||
"operation": {
|
||||
"description": "Operation is the type of operation which lead to this ManagedFieldsEntry being created.\nThe only valid values for this field are 'Apply' and 'Update'.",
|
||||
"type": "string"
|
||||
},
|
||||
"time": {
|
||||
"$ref": "#/definitions/v1Time"
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1MicroTime": {
|
||||
"description": "MicroTime is version of Time with microsecond level precision.\n\n+protobuf.options.marshal=false\n+protobuf.as=Timestamp\n+protobuf.options.(gogoproto.goproto_stringer)=false",
|
||||
"type": "object",
|
||||
@@ -2141,6 +2184,13 @@
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"managedFields": {
|
||||
"description": "ManagedFields maps workflow-id and version to the set of fields\nthat are managed by that workflow. This is mostly for internal\nhousekeeping, and users typically shouldn't need to set or\nunderstand this field. A workflow can be the user's name, a\ncontroller's name, or the name of a specific apply path like\n\"ci-cd\". The set of fields is always in the version that the\nworkflow used when modifying the object.\n\nThis field is alpha and can be changed or removed without notice.\n\n+optional",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/v1ManagedFieldsEntry"
|
||||
}
|
||||
},
|
||||
"name": {
|
||||
"type": "string",
|
||||
"title": "Name must be unique within a namespace. Is required when creating resources, although\nsome resources may allow a client to request the generation of an appropriate name\nautomatically. Name is primarily intended for creation idempotence and configuration\ndefinition.\nCannot be updated.\nMore info: http://kubernetes.io/docs/user-guide/identifiers#names\n+optional"
|
||||
@@ -2205,7 +2255,7 @@
|
||||
}
|
||||
},
|
||||
"v1OwnerReference": {
|
||||
"description": "OwnerReference contains enough information to let you identify an owning\nobject. Currently, an owning object must be in the same namespace, so there\nis no namespace field.",
|
||||
"description": "OwnerReference contains enough information to let you identify an owning\nobject. An owning object must be in the same namespace as the dependent, or\nbe cluster-scoped, so there is no namespace field.",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"apiVersion": {
|
||||
@@ -2536,6 +2586,10 @@
|
||||
"$ref": "#/definitions/v1alpha1HelmParameter"
|
||||
}
|
||||
},
|
||||
"releaseName": {
|
||||
"type": "string",
|
||||
"title": "The Helm release name. If omitted it will use the application name"
|
||||
},
|
||||
"valueFiles": {
|
||||
"type": "array",
|
||||
"title": "ValuesFiles is a list of Helm value files to use when generating a template",
|
||||
@@ -2586,6 +2640,13 @@
|
||||
"type": "object",
|
||||
"title": "ApplicationSourceKustomize holds kustomize specific options",
|
||||
"properties": {
|
||||
"commonLabels": {
|
||||
"type": "object",
|
||||
"title": "CommonLabels adds additional kustomize commonLabels",
|
||||
"additionalProperties": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"imageTags": {
|
||||
"type": "array",
|
||||
"title": "ImageTags are kustomize 1.0 image tag overrides",
|
||||
@@ -2629,6 +2690,13 @@
|
||||
"$ref": "#/definitions/v1alpha1ResourceIgnoreDifferences"
|
||||
}
|
||||
},
|
||||
"info": {
|
||||
"type": "array",
|
||||
"title": "Infos contains a list of useful information (URLs, email addresses, and plain text) that relates to the application",
|
||||
"items": {
|
||||
"$ref": "#/definitions/v1alpha1Info"
|
||||
}
|
||||
},
|
||||
"project": {
|
||||
"description": "Project is a application project name. Empty name means that application belongs to 'default' project.",
|
||||
"type": "string"
|
||||
@@ -2651,13 +2719,6 @@
|
||||
"$ref": "#/definitions/v1alpha1ApplicationCondition"
|
||||
}
|
||||
},
|
||||
"externalURLs": {
|
||||
"description": "ExternalURLs holds all external URLs of application child resources.",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"health": {
|
||||
"$ref": "#/definitions/v1alpha1HealthStatus"
|
||||
},
|
||||
@@ -2685,11 +2746,33 @@
|
||||
"sourceType": {
|
||||
"type": "string"
|
||||
},
|
||||
"summary": {
|
||||
"$ref": "#/definitions/v1alpha1ApplicationSummary"
|
||||
},
|
||||
"sync": {
|
||||
"$ref": "#/definitions/v1alpha1SyncStatus"
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1ApplicationSummary": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"externalURLs": {
|
||||
"description": "ExternalURLs holds all external URLs of application child resources.",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"images": {
|
||||
"description": "Images holds all images of application child resources.",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1ApplicationTree": {
|
||||
"type": "object",
|
||||
"title": "ApplicationTree holds nodes which belongs to the application",
|
||||
@@ -2824,6 +2907,17 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1Info": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"value": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1InfoItem": {
|
||||
"type": "object",
|
||||
"title": "InfoItem contains human readable information about object",
|
||||
@@ -3193,6 +3287,9 @@
|
||||
"namespace": {
|
||||
"type": "string"
|
||||
},
|
||||
"uid": {
|
||||
"type": "string"
|
||||
},
|
||||
"version": {
|
||||
"type": "string"
|
||||
}
|
||||
@@ -3206,16 +3303,19 @@
|
||||
"type": "string"
|
||||
},
|
||||
"hookPhase": {
|
||||
"type": "string"
|
||||
"type": "string",
|
||||
"title": "the state of any operation associated with this resource OR hook\nnote: can contain values for non-hook resources"
|
||||
},
|
||||
"hookType": {
|
||||
"type": "string"
|
||||
"type": "string",
|
||||
"title": "the type of the hook, empty for non-hook resources"
|
||||
},
|
||||
"kind": {
|
||||
"type": "string"
|
||||
},
|
||||
"message": {
|
||||
"type": "string"
|
||||
"type": "string",
|
||||
"title": "message for the last sync OR operation"
|
||||
},
|
||||
"name": {
|
||||
"type": "string"
|
||||
@@ -3224,7 +3324,12 @@
|
||||
"type": "string"
|
||||
},
|
||||
"status": {
|
||||
"type": "string"
|
||||
"type": "string",
|
||||
"title": "the final result of the sync, this is be empty if the resources is yet to be applied/pruned and is always zero-value for hooks"
|
||||
},
|
||||
"syncPhase": {
|
||||
"type": "string",
|
||||
"title": "indicates the particular phase of the sync that this is for"
|
||||
},
|
||||
"version": {
|
||||
"type": "string"
|
||||
@@ -3290,6 +3395,13 @@
|
||||
"format": "boolean",
|
||||
"title": "DryRun will perform a `kubectl apply --dry-run` without actually performing the sync"
|
||||
},
|
||||
"manifests": {
|
||||
"type": "array",
|
||||
"title": "Manifests is an optional field that overrides sync source with a local directory for development",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"prune": {
|
||||
"type": "boolean",
|
||||
"format": "boolean",
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
// load the oidc plugin (required to authenticate with OpenID Connect).
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
|
||||
|
||||
argocd "github.com/argoproj/argo-cd"
|
||||
"github.com/argoproj/argo-cd/common"
|
||||
"github.com/argoproj/argo-cd/controller"
|
||||
"github.com/argoproj/argo-cd/errors"
|
||||
@@ -37,14 +36,16 @@ const (
|
||||
|
||||
func newCommand() *cobra.Command {
|
||||
var (
|
||||
clientConfig clientcmd.ClientConfig
|
||||
appResyncPeriod int64
|
||||
repoServerAddress string
|
||||
statusProcessors int
|
||||
operationProcessors int
|
||||
logLevel string
|
||||
glogLevel int
|
||||
cacheSrc func() (*cache.Cache, error)
|
||||
clientConfig clientcmd.ClientConfig
|
||||
appResyncPeriod int64
|
||||
repoServerAddress string
|
||||
repoServerTimeoutSeconds int
|
||||
statusProcessors int
|
||||
operationProcessors int
|
||||
logLevel string
|
||||
glogLevel int
|
||||
metricsPort int
|
||||
cacheSrc func() (*cache.Cache, error)
|
||||
)
|
||||
var command = cobra.Command{
|
||||
Use: cliName,
|
||||
@@ -54,9 +55,9 @@ func newCommand() *cobra.Command {
|
||||
cli.SetGLogLevel(glogLevel)
|
||||
|
||||
config, err := clientConfig.ClientConfig()
|
||||
errors.CheckError(err)
|
||||
config.QPS = common.K8sClientConfigQPS
|
||||
config.Burst = common.K8sClientConfigBurst
|
||||
errors.CheckError(err)
|
||||
|
||||
kubeClient := kubernetes.NewForConfigOrDie(config)
|
||||
appClient := appclientset.NewForConfigOrDie(config)
|
||||
@@ -65,7 +66,7 @@ func newCommand() *cobra.Command {
|
||||
errors.CheckError(err)
|
||||
|
||||
resyncDuration := time.Duration(appResyncPeriod) * time.Second
|
||||
repoClientset := reposerver.NewRepoServerClientset(repoServerAddress)
|
||||
repoClientset := reposerver.NewRepoServerClientset(repoServerAddress, repoServerTimeoutSeconds)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
@@ -80,10 +81,11 @@ func newCommand() *cobra.Command {
|
||||
appClient,
|
||||
repoClientset,
|
||||
cache,
|
||||
resyncDuration)
|
||||
resyncDuration,
|
||||
metricsPort)
|
||||
errors.CheckError(err)
|
||||
|
||||
log.Infof("Application Controller (version: %s) starting (namespace: %s)", argocd.GetVersion(), namespace)
|
||||
log.Infof("Application Controller (version: %s) starting (namespace: %s)", common.GetVersion(), namespace)
|
||||
stats.RegisterStackDumper()
|
||||
stats.StartStatsTicker(10 * time.Minute)
|
||||
stats.RegisterHeapDumper("memprofile")
|
||||
@@ -98,10 +100,12 @@ func newCommand() *cobra.Command {
|
||||
clientConfig = cli.AddKubectlFlagsToCmd(&command)
|
||||
command.Flags().Int64Var(&appResyncPeriod, "app-resync", defaultAppResyncPeriod, "Time period in seconds for application resync.")
|
||||
command.Flags().StringVar(&repoServerAddress, "repo-server", common.DefaultRepoServerAddr, "Repo server address.")
|
||||
command.Flags().IntVar(&repoServerTimeoutSeconds, "repo-server-timeout-seconds", 60, "Repo server RPC call timeout seconds.")
|
||||
command.Flags().IntVar(&statusProcessors, "status-processors", 1, "Number of application status processors")
|
||||
command.Flags().IntVar(&operationProcessors, "operation-processors", 1, "Number of application operation processors")
|
||||
command.Flags().StringVar(&logLevel, "loglevel", "info", "Set the logging level. One of: debug|info|warn|error")
|
||||
command.Flags().IntVar(&glogLevel, "gloglevel", 0, "Set the glog logging level")
|
||||
command.Flags().IntVar(&metricsPort, "metrics-port", common.DefaultPortArgoCDMetrics, "Start metrics server on given port")
|
||||
cacheSrc = cache.AddCacheFlagsToCmd(&command)
|
||||
return &command
|
||||
}
|
||||
|
||||
@@ -7,11 +7,11 @@ import (
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/argoproj/argo-cd/reposerver/metrics"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
argocd "github.com/argoproj/argo-cd"
|
||||
"github.com/argoproj/argo-cd/common"
|
||||
"github.com/argoproj/argo-cd/errors"
|
||||
"github.com/argoproj/argo-cd/reposerver"
|
||||
@@ -31,6 +31,8 @@ func newCommand() *cobra.Command {
|
||||
var (
|
||||
logLevel string
|
||||
parallelismLimit int64
|
||||
listenPort int
|
||||
metricsPort int
|
||||
cacheSrc func() (*cache.Cache, error)
|
||||
tlsConfigCustomizerSrc func() (tls.ConfigCustomizer, error)
|
||||
)
|
||||
@@ -46,16 +48,18 @@ func newCommand() *cobra.Command {
|
||||
cache, err := cacheSrc()
|
||||
errors.CheckError(err)
|
||||
|
||||
server, err := reposerver.NewServer(git.NewFactory(), cache, tlsConfigCustomizer, parallelismLimit)
|
||||
metricsServer := metrics.NewMetricsServer(git.NewFactory())
|
||||
server, err := reposerver.NewServer(metricsServer, cache, tlsConfigCustomizer, parallelismLimit)
|
||||
errors.CheckError(err)
|
||||
|
||||
grpc := server.CreateGRPC()
|
||||
listener, err := net.Listen("tcp", fmt.Sprintf(":%d", common.PortRepoServer))
|
||||
listener, err := net.Listen("tcp", fmt.Sprintf(":%d", listenPort))
|
||||
errors.CheckError(err)
|
||||
|
||||
http.Handle("/metrics", promhttp.Handler())
|
||||
go func() { errors.CheckError(http.ListenAndServe(fmt.Sprintf(":%d", common.PortRepoServerMetrics), nil)) }()
|
||||
http.Handle("/metrics", metricsServer.GetHandler())
|
||||
go func() { errors.CheckError(http.ListenAndServe(fmt.Sprintf(":%d", metricsPort), nil)) }()
|
||||
|
||||
log.Infof("argocd-repo-server %s serving on %s", argocd.GetVersion(), listener.Addr())
|
||||
log.Infof("argocd-repo-server %s serving on %s", common.GetVersion(), listener.Addr())
|
||||
stats.RegisterStackDumper()
|
||||
stats.StartStatsTicker(10 * time.Minute)
|
||||
stats.RegisterHeapDumper("memprofile")
|
||||
@@ -67,6 +71,8 @@ func newCommand() *cobra.Command {
|
||||
|
||||
command.Flags().StringVar(&logLevel, "loglevel", "info", "Set the logging level. One of: debug|info|warn|error")
|
||||
command.Flags().Int64Var(¶llelismLimit, "parallelismlimit", 0, "Limit on number of concurrent manifests generate requests. Any value less the 1 means no limit.")
|
||||
command.Flags().IntVar(&listenPort, "port", common.DefaultPortRepoServer, "Listen on given port for incoming connections")
|
||||
command.Flags().IntVar(&metricsPort, "metrics-port", common.DefaultPortRepoServerMetrics, "Start metrics server on given port")
|
||||
tlsConfigCustomizerSrc = tls.AddTLSFlagsToCmd(&command)
|
||||
cacheSrc = cache.AddCacheFlagsToCmd(&command)
|
||||
return &command
|
||||
|
||||
@@ -22,17 +22,20 @@ import (
|
||||
// NewCommand returns a new instance of an argocd command
|
||||
func NewCommand() *cobra.Command {
|
||||
var (
|
||||
insecure bool
|
||||
logLevel string
|
||||
glogLevel int
|
||||
clientConfig clientcmd.ClientConfig
|
||||
staticAssetsDir string
|
||||
baseHRef string
|
||||
repoServerAddress string
|
||||
dexServerAddress string
|
||||
disableAuth bool
|
||||
tlsConfigCustomizerSrc func() (tls.ConfigCustomizer, error)
|
||||
cacheSrc func() (*cache.Cache, error)
|
||||
insecure bool
|
||||
listenPort int
|
||||
metricsPort int
|
||||
logLevel string
|
||||
glogLevel int
|
||||
clientConfig clientcmd.ClientConfig
|
||||
repoServerTimeoutSeconds int
|
||||
staticAssetsDir string
|
||||
baseHRef string
|
||||
repoServerAddress string
|
||||
dexServerAddress string
|
||||
disableAuth bool
|
||||
tlsConfigCustomizerSrc func() (tls.ConfigCustomizer, error)
|
||||
cacheSrc func() (*cache.Cache, error)
|
||||
)
|
||||
var command = &cobra.Command{
|
||||
Use: cliName,
|
||||
@@ -57,10 +60,12 @@ func NewCommand() *cobra.Command {
|
||||
|
||||
kubeclientset := kubernetes.NewForConfigOrDie(config)
|
||||
appclientset := appclientset.NewForConfigOrDie(config)
|
||||
repoclientset := reposerver.NewRepoServerClientset(repoServerAddress)
|
||||
repoclientset := reposerver.NewRepoServerClientset(repoServerAddress, repoServerTimeoutSeconds)
|
||||
|
||||
argoCDOpts := server.ArgoCDServerOpts{
|
||||
Insecure: insecure,
|
||||
ListenPort: listenPort,
|
||||
MetricsPort: metricsPort,
|
||||
Namespace: namespace,
|
||||
StaticAssetsDir: staticAssetsDir,
|
||||
BaseHRef: baseHRef,
|
||||
@@ -81,7 +86,7 @@ func NewCommand() *cobra.Command {
|
||||
ctx := context.Background()
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
argocd := server.NewServer(ctx, argoCDOpts)
|
||||
argocd.Run(ctx, common.PortAPIServer)
|
||||
argocd.Run(ctx, listenPort, metricsPort)
|
||||
cancel()
|
||||
}
|
||||
},
|
||||
@@ -97,6 +102,9 @@ func NewCommand() *cobra.Command {
|
||||
command.Flags().StringVar(&dexServerAddress, "dex-server", common.DefaultDexServerAddr, "Dex server address")
|
||||
command.Flags().BoolVar(&disableAuth, "disable-auth", false, "Disable client authentication")
|
||||
command.AddCommand(cli.NewVersionCmd(cliName))
|
||||
command.Flags().IntVar(&listenPort, "port", common.DefaultPortAPIServer, "Listen on given port")
|
||||
command.Flags().IntVar(&metricsPort, "metrics-port", common.DefaultPortArgoCDAPIServerMetrics, "Start metrics on given port")
|
||||
command.Flags().IntVar(&repoServerTimeoutSeconds, "repo-server-timeout-seconds", 60, "Repo server RPC call timeout seconds.")
|
||||
tlsConfigCustomizerSrc = tls.AddTLSFlagsToCmd(command)
|
||||
cacheSrc = cache.AddCacheFlagsToCmd(command)
|
||||
return command
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
|
||||
"github.com/argoproj/argo-cd/errors"
|
||||
argocdclient "github.com/argoproj/argo-cd/pkg/apiclient"
|
||||
"github.com/argoproj/argo-cd/server/account"
|
||||
accountpkg "github.com/argoproj/argo-cd/pkg/apiclient/account"
|
||||
"github.com/argoproj/argo-cd/util"
|
||||
"github.com/argoproj/argo-cd/util/cli"
|
||||
"github.com/argoproj/argo-cd/util/localconfig"
|
||||
@@ -57,7 +57,7 @@ func NewAccountUpdatePasswordCommand(clientOpts *argocdclient.ClientOptions) *co
|
||||
errors.CheckError(err)
|
||||
}
|
||||
|
||||
updatePasswordRequest := account.UpdatePasswordRequest{
|
||||
updatePasswordRequest := accountpkg.UpdatePasswordRequest{
|
||||
NewPassword: newPassword,
|
||||
CurrentPassword: currentPassword,
|
||||
}
|
||||
|
||||
@@ -11,6 +11,8 @@ import (
|
||||
"os/exec"
|
||||
"path"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
@@ -31,10 +33,10 @@ import (
|
||||
"github.com/argoproj/argo-cd/errors"
|
||||
"github.com/argoproj/argo-cd/pkg/apiclient"
|
||||
argocdclient "github.com/argoproj/argo-cd/pkg/apiclient"
|
||||
applicationpkg "github.com/argoproj/argo-cd/pkg/apiclient/application"
|
||||
settingspkg "github.com/argoproj/argo-cd/pkg/apiclient/settings"
|
||||
argoappv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/reposerver/repository"
|
||||
"github.com/argoproj/argo-cd/server/application"
|
||||
"github.com/argoproj/argo-cd/server/settings"
|
||||
"github.com/argoproj/argo-cd/util"
|
||||
"github.com/argoproj/argo-cd/util/argo"
|
||||
"github.com/argoproj/argo-cd/util/cli"
|
||||
@@ -43,6 +45,7 @@ import (
|
||||
"github.com/argoproj/argo-cd/util/git"
|
||||
"github.com/argoproj/argo-cd/util/hook"
|
||||
"github.com/argoproj/argo-cd/util/kube"
|
||||
"github.com/argoproj/argo-cd/util/resource"
|
||||
"github.com/argoproj/argo-cd/util/templates"
|
||||
)
|
||||
|
||||
@@ -138,7 +141,7 @@ func NewApplicationCreateCommand(clientOpts *argocdclient.ClientOptions) *cobra.
|
||||
}
|
||||
conn, appIf := argocdClient.NewApplicationClientOrDie()
|
||||
defer util.Close(conn)
|
||||
appCreateRequest := application.ApplicationCreateRequest{
|
||||
appCreateRequest := applicationpkg.ApplicationCreateRequest{
|
||||
Application: app,
|
||||
Upsert: &upsert,
|
||||
}
|
||||
@@ -189,7 +192,7 @@ func NewApplicationGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Com
|
||||
conn, appIf := acdClient.NewApplicationClientOrDie()
|
||||
defer util.Close(conn)
|
||||
appName := args[0]
|
||||
app, err := appIf.Get(context.Background(), &application.ApplicationQuery{Name: &appName, Refresh: getRefreshType(refresh, hardRefresh)})
|
||||
app, err := appIf.Get(context.Background(), &applicationpkg.ApplicationQuery{Name: &appName, Refresh: getRefreshType(refresh, hardRefresh)})
|
||||
errors.CheckError(err)
|
||||
switch output {
|
||||
case "yaml":
|
||||
@@ -221,7 +224,7 @@ func NewApplicationGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Com
|
||||
if len(app.Status.Resources) > 0 {
|
||||
fmt.Println()
|
||||
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
|
||||
printAppResources(w, app, showOperation)
|
||||
printAppResources(w, app)
|
||||
_ = w.Flush()
|
||||
}
|
||||
default:
|
||||
@@ -322,7 +325,7 @@ func truncateString(str string, num int) string {
|
||||
}
|
||||
|
||||
// printParams prints parameters and overrides
|
||||
func printParams(app *argoappv1.Application, appIf application.ApplicationServiceClient) {
|
||||
func printParams(app *argoappv1.Application, appIf applicationpkg.ApplicationServiceClient) {
|
||||
paramLenLimit := 80
|
||||
fmt.Println()
|
||||
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
|
||||
@@ -360,7 +363,7 @@ func NewApplicationSetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Com
|
||||
argocdClient := argocdclient.NewClientOrDie(clientOpts)
|
||||
conn, appIf := argocdClient.NewApplicationClientOrDie()
|
||||
defer util.Close(conn)
|
||||
app, err := appIf.Get(ctx, &application.ApplicationQuery{Name: &appName})
|
||||
app, err := appIf.Get(ctx, &applicationpkg.ApplicationQuery{Name: &appName})
|
||||
errors.CheckError(err)
|
||||
visited := setAppOptions(c.Flags(), app, &appOpts)
|
||||
if visited == 0 {
|
||||
@@ -369,7 +372,7 @@ func NewApplicationSetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Com
|
||||
os.Exit(1)
|
||||
}
|
||||
setParameterOverrides(app, appOpts.parameters)
|
||||
_, err = appIf.UpdateSpec(ctx, &application.ApplicationUpdateSpecRequest{
|
||||
_, err = appIf.UpdateSpec(ctx, &applicationpkg.ApplicationUpdateSpecRequest{
|
||||
Name: &app.Name,
|
||||
Spec: app.Spec,
|
||||
})
|
||||
@@ -394,7 +397,9 @@ func setAppOptions(flags *pflag.FlagSet, app *argoappv1.Application, appOpts *ap
|
||||
case "revision":
|
||||
app.Spec.Source.TargetRevision = appOpts.revision
|
||||
case "values":
|
||||
setHelmOpt(&app.Spec.Source, appOpts.valuesFiles)
|
||||
setHelmOpt(&app.Spec.Source, appOpts.valuesFiles, nil)
|
||||
case "release-name":
|
||||
setHelmOpt(&app.Spec.Source, nil, &appOpts.releaseName)
|
||||
case "directory-recurse":
|
||||
app.Spec.Source.Directory = &argoappv1.ApplicationSourceDirectory{Recurse: appOpts.directoryRecurse}
|
||||
case "config-management-plugin":
|
||||
@@ -454,13 +459,16 @@ func setKustomizeOpt(src *argoappv1.ApplicationSource, namePrefix *string) {
|
||||
}
|
||||
}
|
||||
|
||||
func setHelmOpt(src *argoappv1.ApplicationSource, valueFiles []string) {
|
||||
func setHelmOpt(src *argoappv1.ApplicationSource, valueFiles []string, releaseName *string) {
|
||||
if src.Helm == nil {
|
||||
src.Helm = &argoappv1.ApplicationSourceHelm{}
|
||||
}
|
||||
if valueFiles != nil {
|
||||
src.Helm.ValueFiles = valueFiles
|
||||
}
|
||||
if releaseName != nil {
|
||||
src.Helm.ReleaseName = *releaseName
|
||||
}
|
||||
if src.Helm.IsZero() {
|
||||
src.Helm = nil
|
||||
}
|
||||
@@ -475,6 +483,7 @@ type appOptions struct {
|
||||
destNamespace string
|
||||
parameters []string
|
||||
valuesFiles []string
|
||||
releaseName string
|
||||
project string
|
||||
syncPolicy string
|
||||
autoPrune bool
|
||||
@@ -492,6 +501,7 @@ func addAppFlags(command *cobra.Command, opts *appOptions) {
|
||||
command.Flags().StringVar(&opts.destNamespace, "dest-namespace", "", "K8s target namespace (overrides the namespace specified in the ksonnet app.yaml)")
|
||||
command.Flags().StringArrayVarP(&opts.parameters, "parameter", "p", []string{}, "set a parameter override (e.g. -p guestbook=image=example/guestbook:latest)")
|
||||
command.Flags().StringArrayVar(&opts.valuesFiles, "values", []string{}, "Helm values file(s) to use")
|
||||
command.Flags().StringVar(&opts.releaseName, "release-name", "", "Helm release-name")
|
||||
command.Flags().StringVar(&opts.project, "project", "", "Application project name")
|
||||
command.Flags().StringVar(&opts.syncPolicy, "sync-policy", "", "Set the sync policy (one of: automated, none)")
|
||||
command.Flags().BoolVar(&opts.autoPrune, "auto-prune", false, "Set automatic pruning when sync is automated")
|
||||
@@ -517,7 +527,7 @@ func NewApplicationUnsetCommand(clientOpts *argocdclient.ClientOptions) *cobra.C
|
||||
appName := args[0]
|
||||
conn, appIf := argocdclient.NewClientOrDie(clientOpts).NewApplicationClientOrDie()
|
||||
defer util.Close(conn)
|
||||
app, err := appIf.Get(context.Background(), &application.ApplicationQuery{Name: &appName})
|
||||
app, err := appIf.Get(context.Background(), &applicationpkg.ApplicationQuery{Name: &appName})
|
||||
errors.CheckError(err)
|
||||
|
||||
updated := false
|
||||
@@ -558,13 +568,13 @@ func NewApplicationUnsetCommand(clientOpts *argocdclient.ClientOptions) *cobra.C
|
||||
}
|
||||
}
|
||||
}
|
||||
setHelmOpt(&app.Spec.Source, specValueFiles)
|
||||
setHelmOpt(&app.Spec.Source, specValueFiles, nil)
|
||||
if !updated {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
_, err = appIf.UpdateSpec(context.Background(), &application.ApplicationUpdateSpecRequest{
|
||||
_, err = appIf.UpdateSpec(context.Background(), &applicationpkg.ApplicationUpdateSpecRequest{
|
||||
Name: &app.Name,
|
||||
Spec: app.Spec,
|
||||
})
|
||||
@@ -603,6 +613,18 @@ func liveObjects(resources []*argoappv1.ResourceDiff) ([]*unstructured.Unstructu
|
||||
}
|
||||
|
||||
func getLocalObjects(app *argoappv1.Application, local string, appLabelKey string) []*unstructured.Unstructured {
|
||||
manifestStrings := getLocalObjectsString(app, local, appLabelKey)
|
||||
objs := make([]*unstructured.Unstructured, len(manifestStrings))
|
||||
for i := range manifestStrings {
|
||||
obj := unstructured.Unstructured{}
|
||||
err := json.Unmarshal([]byte(manifestStrings[i]), &obj)
|
||||
errors.CheckError(err)
|
||||
objs[i] = &obj
|
||||
}
|
||||
return objs
|
||||
}
|
||||
|
||||
func getLocalObjectsString(app *argoappv1.Application, local string, appLabelKey string) []string {
|
||||
res, err := repository.GenerateManifests(local, &repository.ManifestRequest{
|
||||
ApplicationSource: &app.Spec.Source,
|
||||
AppLabelKey: appLabelKey,
|
||||
@@ -610,14 +632,8 @@ func getLocalObjects(app *argoappv1.Application, local string, appLabelKey strin
|
||||
Namespace: app.Spec.Destination.Namespace,
|
||||
})
|
||||
errors.CheckError(err)
|
||||
objs := make([]*unstructured.Unstructured, len(res.Manifests))
|
||||
for i := range res.Manifests {
|
||||
obj := unstructured.Unstructured{}
|
||||
err = json.Unmarshal([]byte(res.Manifests[i]), &obj)
|
||||
errors.CheckError(err)
|
||||
objs[i] = &obj
|
||||
}
|
||||
return objs
|
||||
|
||||
return res.Manifests
|
||||
}
|
||||
|
||||
type resourceInfoProvider struct {
|
||||
@@ -644,7 +660,7 @@ func groupLocalObjs(localObs []*unstructured.Unstructured, liveObjs []*unstructu
|
||||
objByKey := make(map[kube.ResourceKey]*unstructured.Unstructured)
|
||||
for i := range localObs {
|
||||
obj := localObs[i]
|
||||
if !hook.IsHook(obj) {
|
||||
if !(hook.IsHook(obj) || resource.Ignore(obj)) {
|
||||
objByKey[kube.GetResourceKey(obj)] = obj
|
||||
}
|
||||
}
|
||||
@@ -673,9 +689,9 @@ func NewApplicationDiffCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
conn, appIf := clientset.NewApplicationClientOrDie()
|
||||
defer util.Close(conn)
|
||||
appName := args[0]
|
||||
app, err := appIf.Get(context.Background(), &application.ApplicationQuery{Name: &appName, Refresh: getRefreshType(refresh, hardRefresh)})
|
||||
app, err := appIf.Get(context.Background(), &applicationpkg.ApplicationQuery{Name: &appName, Refresh: getRefreshType(refresh, hardRefresh)})
|
||||
errors.CheckError(err)
|
||||
resources, err := appIf.ManagedResources(context.Background(), &application.ResourcesQuery{ApplicationName: &appName})
|
||||
resources, err := appIf.ManagedResources(context.Background(), &applicationpkg.ResourcesQuery{ApplicationName: &appName})
|
||||
errors.CheckError(err)
|
||||
liveObjs, err := liveObjects(resources.Items)
|
||||
errors.CheckError(err)
|
||||
@@ -687,7 +703,7 @@ func NewApplicationDiffCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
|
||||
conn, settingsIf := clientset.NewSettingsClientOrDie()
|
||||
defer util.Close(conn)
|
||||
argoSettings, err := settingsIf.Get(context.Background(), &settings.SettingsQuery{})
|
||||
argoSettings, err := settingsIf.Get(context.Background(), &settingspkg.SettingsQuery{})
|
||||
errors.CheckError(err)
|
||||
|
||||
if local != "" {
|
||||
@@ -857,7 +873,7 @@ func NewApplicationDeleteCommand(clientOpts *argocdclient.ClientOptions) *cobra.
|
||||
conn, appIf := argocdclient.NewClientOrDie(clientOpts).NewApplicationClientOrDie()
|
||||
defer util.Close(conn)
|
||||
for _, appName := range args {
|
||||
appDeleteReq := application.ApplicationDeleteRequest{
|
||||
appDeleteReq := applicationpkg.ApplicationDeleteRequest{
|
||||
Name: &appName,
|
||||
}
|
||||
if c.Flag("cascade").Changed {
|
||||
@@ -883,7 +899,7 @@ func NewApplicationListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
conn, appIf := argocdclient.NewClientOrDie(clientOpts).NewApplicationClientOrDie()
|
||||
defer util.Close(conn)
|
||||
apps, err := appIf.List(context.Background(), &application.ApplicationQuery{})
|
||||
apps, err := appIf.List(context.Background(), &applicationpkg.ApplicationQuery{})
|
||||
errors.CheckError(err)
|
||||
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
|
||||
var fmtStr string
|
||||
@@ -957,6 +973,7 @@ func formatConditionsSummary(app argoappv1.Application) string {
|
||||
const (
|
||||
resourceFieldDelimiter = ":"
|
||||
resourceFieldCount = 3
|
||||
labelFieldDelimiter = "="
|
||||
)
|
||||
|
||||
func parseSelectedResources(resources []string) []argoappv1.SyncOperationResource {
|
||||
@@ -979,6 +996,21 @@ func parseSelectedResources(resources []string) []argoappv1.SyncOperationResourc
|
||||
return selectedResources
|
||||
}
|
||||
|
||||
func parseLabels(labels []string) (map[string]string, error) {
|
||||
var selectedLabels map[string]string
|
||||
if labels != nil {
|
||||
selectedLabels = map[string]string{}
|
||||
for _, r := range labels {
|
||||
fields := strings.Split(r, labelFieldDelimiter)
|
||||
if len(fields) != 2 {
|
||||
return nil, fmt.Errorf("labels should have key%svalue, but instead got: %s", labelFieldDelimiter, r)
|
||||
}
|
||||
selectedLabels[fields[0]] = fields[1]
|
||||
}
|
||||
}
|
||||
return selectedLabels, nil
|
||||
}
|
||||
|
||||
// NewApplicationWaitCommand returns a new instance of an `argocd app wait` command
|
||||
func NewApplicationWaitCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
var (
|
||||
@@ -1020,51 +1052,10 @@ func NewApplicationWaitCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
}
|
||||
|
||||
// printAppResources prints the resources of an application in a tabwriter table
|
||||
// Optionally prints the message from the operation state
|
||||
func printAppResources(w io.Writer, app *argoappv1.Application, showOperation bool) {
|
||||
messages := make(map[string]string)
|
||||
opState := app.Status.OperationState
|
||||
var syncRes *argoappv1.SyncOperationResult
|
||||
|
||||
if showOperation {
|
||||
fmt.Fprintf(w, "GROUP\tKIND\tNAMESPACE\tNAME\tSTATUS\tHEALTH\tHOOK\tMESSAGE\n")
|
||||
if opState != nil {
|
||||
if opState.SyncResult != nil {
|
||||
syncRes = opState.SyncResult
|
||||
}
|
||||
}
|
||||
if syncRes != nil {
|
||||
for _, res := range syncRes.Resources {
|
||||
if !res.IsHook() {
|
||||
messages[fmt.Sprintf("%s/%s/%s/%s", res.Group, res.Kind, res.Namespace, res.Name)] = res.Message
|
||||
} else if res.HookType == argoappv1.HookTypePreSync {
|
||||
fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n", res.Group, res.Kind, res.Namespace, res.Name, res.HookPhase, "", res.HookType, res.Message)
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
fmt.Fprintf(w, "GROUP\tKIND\tNAMESPACE\tNAME\tSTATUS\tHEALTH\n")
|
||||
}
|
||||
for _, res := range app.Status.Resources {
|
||||
healthStatus := ""
|
||||
if res.Health != nil {
|
||||
healthStatus = res.Health.Status
|
||||
}
|
||||
if showOperation {
|
||||
message := messages[fmt.Sprintf("%s/%s/%s/%s", res.Group, res.Kind, res.Namespace, res.Name)]
|
||||
fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s", res.Group, res.Kind, res.Namespace, res.Name, res.Status, healthStatus, "", message)
|
||||
} else {
|
||||
fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s", res.Group, res.Kind, res.Namespace, res.Name, res.Status, healthStatus)
|
||||
}
|
||||
fmt.Fprint(w, "\n")
|
||||
}
|
||||
if showOperation && syncRes != nil {
|
||||
for _, res := range syncRes.Resources {
|
||||
if res.HookType == argoappv1.HookTypeSync || res.HookType == argoappv1.HookTypePostSync {
|
||||
fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n", res.Group, res.Kind, res.Namespace, res.Name, res.HookPhase, "", res.HookType, res.Message)
|
||||
}
|
||||
}
|
||||
|
||||
func printAppResources(w io.Writer, app *argoappv1.Application) {
|
||||
_, _ = fmt.Fprintf(w, "GROUP\tKIND\tNAMESPACE\tNAME\tSTATUS\tHEALTH\tHOOK\tMESSAGE\n")
|
||||
for _, res := range getResourceStates(app, nil) {
|
||||
_, _ = fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n", res.Group, res.Kind, res.Namespace, res.Name, res.Status, res.Health, res.Hook, res.Message)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1073,11 +1064,14 @@ func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
var (
|
||||
revision string
|
||||
resources []string
|
||||
labels []string
|
||||
prune bool
|
||||
dryRun bool
|
||||
timeout uint
|
||||
strategy string
|
||||
force bool
|
||||
async bool
|
||||
local string
|
||||
)
|
||||
var command = &cobra.Command{
|
||||
Use: "sync APPNAME",
|
||||
@@ -1091,14 +1085,76 @@ func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
conn, appIf := acdClient.NewApplicationClientOrDie()
|
||||
defer util.Close(conn)
|
||||
|
||||
selectedResources := parseSelectedResources(resources)
|
||||
appName := args[0]
|
||||
syncReq := application.ApplicationSyncRequest{
|
||||
|
||||
selectedLabels, parseErr := parseLabels(labels)
|
||||
if parseErr != nil {
|
||||
log.Fatal(parseErr)
|
||||
}
|
||||
|
||||
if len(selectedLabels) > 0 {
|
||||
ctx := context.Background()
|
||||
|
||||
if revision == "" {
|
||||
revision = "HEAD"
|
||||
}
|
||||
|
||||
q := applicationpkg.ApplicationManifestQuery{
|
||||
Name: &appName,
|
||||
Revision: revision,
|
||||
}
|
||||
|
||||
res, err := appIf.GetManifests(ctx, &q)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
for _, mfst := range res.Manifests {
|
||||
obj, err := argoappv1.UnmarshalToUnstructured(mfst)
|
||||
errors.CheckError(err)
|
||||
for key, selectedValue := range selectedLabels {
|
||||
if objectValue, ok := obj.GetLabels()[key]; ok && selectedValue == objectValue {
|
||||
gvk := obj.GroupVersionKind()
|
||||
resources = append(resources, fmt.Sprintf("%s:%s:%s", gvk.Group, gvk.Kind, obj.GetName()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If labels are provided and none are found return error only if specific resources were also not
|
||||
// specified.
|
||||
if len(resources) == 0 {
|
||||
log.Fatalf("No matching resources found for labels: %v", labels)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
selectedResources := parseSelectedResources(resources)
|
||||
|
||||
var localObjsStrings []string
|
||||
if local != "" {
|
||||
app, err := appIf.Get(context.Background(), &applicationpkg.ApplicationQuery{Name: &appName})
|
||||
|
||||
if app.Spec.SyncPolicy != nil && app.Spec.SyncPolicy.Automated != nil {
|
||||
log.Fatal("Cannot use local sync when Automatic Sync Policy is enabled")
|
||||
}
|
||||
|
||||
errors.CheckError(err)
|
||||
conn, settingsIf := acdClient.NewSettingsClientOrDie()
|
||||
argoSettings, err := settingsIf.Get(context.Background(), &settingspkg.SettingsQuery{})
|
||||
errors.CheckError(err)
|
||||
util.Close(conn)
|
||||
|
||||
localObjsStrings = getLocalObjectsString(app, local, argoSettings.AppLabelKey)
|
||||
|
||||
}
|
||||
|
||||
syncReq := applicationpkg.ApplicationSyncRequest{
|
||||
Name: &appName,
|
||||
DryRun: dryRun,
|
||||
Revision: revision,
|
||||
Resources: selectedResources,
|
||||
Prune: prune,
|
||||
Manifests: localObjsStrings,
|
||||
}
|
||||
switch strategy {
|
||||
case "apply":
|
||||
@@ -1114,31 +1170,34 @@ func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
_, err := appIf.Sync(ctx, &syncReq)
|
||||
errors.CheckError(err)
|
||||
|
||||
app, err := waitOnApplicationStatus(acdClient, appName, timeout, false, false, true, false, selectedResources)
|
||||
errors.CheckError(err)
|
||||
if !async {
|
||||
app, err := waitOnApplicationStatus(acdClient, appName, timeout, false, false, true, false, selectedResources)
|
||||
errors.CheckError(err)
|
||||
|
||||
pruningRequired := 0
|
||||
for _, resDetails := range app.Status.OperationState.SyncResult.Resources {
|
||||
if resDetails.Status == argoappv1.ResultCodePruneSkipped {
|
||||
pruningRequired++
|
||||
// Only get resources to be pruned if sync was application-wide
|
||||
if len(selectedResources) == 0 {
|
||||
pruningRequired := app.Status.OperationState.SyncResult.Resources.PruningRequired()
|
||||
if pruningRequired > 0 {
|
||||
log.Fatalf("%d resources require pruning", pruningRequired)
|
||||
}
|
||||
|
||||
if !app.Status.OperationState.Phase.Successful() && !dryRun {
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
}
|
||||
if pruningRequired > 0 {
|
||||
log.Fatalf("%d resources require pruning", pruningRequired)
|
||||
}
|
||||
|
||||
if !app.Status.OperationState.Phase.Successful() && !dryRun {
|
||||
os.Exit(1)
|
||||
}
|
||||
},
|
||||
}
|
||||
command.Flags().BoolVar(&dryRun, "dry-run", false, "Preview apply without affecting cluster")
|
||||
command.Flags().BoolVar(&prune, "prune", false, "Allow deleting unexpected resources")
|
||||
command.Flags().StringVar(&revision, "revision", "", "Sync to a specific revision. Preserves parameter overrides")
|
||||
command.Flags().StringArrayVar(&resources, "resource", []string{}, fmt.Sprintf("Sync only specific resources as GROUP%sKIND%sNAME. Fields may be blank. This option may be specified repeatedly", resourceFieldDelimiter, resourceFieldDelimiter))
|
||||
command.Flags().StringArrayVar(&labels, "label", []string{}, fmt.Sprintf("Sync only specific resources with a label. This option may be specified repeatedly."))
|
||||
command.Flags().UintVar(&timeout, "timeout", defaultCheckTimeoutSeconds, "Time out after this many seconds")
|
||||
command.Flags().StringVar(&strategy, "strategy", "", "Sync strategy (one of: apply|hook)")
|
||||
command.Flags().BoolVar(&force, "force", false, "Use a force apply")
|
||||
command.Flags().BoolVar(&async, "async", false, "Do not wait for application to sync before continuing")
|
||||
command.Flags().StringVar(&local, "local", "", "Path to a local directory. When this flag is present no git queries will be made")
|
||||
return command
|
||||
}
|
||||
|
||||
@@ -1154,33 +1213,6 @@ type resourceState struct {
|
||||
Message string
|
||||
}
|
||||
|
||||
func newResourceStateFromStatus(res *argoappv1.ResourceStatus) *resourceState {
|
||||
healthStatus := ""
|
||||
if res.Health != nil {
|
||||
healthStatus = res.Health.Status
|
||||
}
|
||||
return &resourceState{
|
||||
Group: res.Group,
|
||||
Kind: res.Kind,
|
||||
Namespace: res.Namespace,
|
||||
Name: res.Name,
|
||||
Status: string(res.Status),
|
||||
Health: healthStatus,
|
||||
}
|
||||
}
|
||||
|
||||
func newResourceStateFromResult(res *argoappv1.ResourceResult) *resourceState {
|
||||
return &resourceState{
|
||||
Group: res.Group,
|
||||
Kind: res.Kind,
|
||||
Namespace: res.Namespace,
|
||||
Name: res.Name,
|
||||
Status: string(res.HookPhase),
|
||||
Hook: string(res.HookType),
|
||||
Message: res.Message,
|
||||
}
|
||||
}
|
||||
|
||||
// Key returns a unique-ish key for the resource.
|
||||
func (rs *resourceState) Key() string {
|
||||
return fmt.Sprintf("%s/%s/%s/%s", rs.Group, rs.Kind, rs.Namespace, rs.Name)
|
||||
@@ -1209,44 +1241,69 @@ func (rs *resourceState) Merge(newState *resourceState) bool {
|
||||
return updated
|
||||
}
|
||||
|
||||
func calculateResourceStates(app *argoappv1.Application, selectedResources []argoappv1.SyncOperationResource) map[string]*resourceState {
|
||||
resStates := getResourceStates(app, selectedResources)
|
||||
func getResourceStates(app *argoappv1.Application, selectedResources []argoappv1.SyncOperationResource) []*resourceState {
|
||||
var states []*resourceState
|
||||
resourceByKey := make(map[kube.ResourceKey]argoappv1.ResourceStatus)
|
||||
for i := range app.Status.Resources {
|
||||
res := app.Status.Resources[i]
|
||||
resourceByKey[kube.NewResourceKey(res.Group, res.Kind, res.Namespace, res.Name)] = res
|
||||
}
|
||||
|
||||
var opResult *argoappv1.SyncOperationResult
|
||||
if app.Status.OperationState != nil {
|
||||
if app.Status.OperationState.SyncResult != nil {
|
||||
opResult = app.Status.OperationState.SyncResult
|
||||
// print most resources info along with most recent operation results
|
||||
if app.Status.OperationState != nil && app.Status.OperationState.SyncResult != nil {
|
||||
for _, res := range app.Status.OperationState.SyncResult.Resources {
|
||||
sync := string(res.HookPhase)
|
||||
health := string(res.Status)
|
||||
key := kube.NewResourceKey(res.Group, res.Kind, res.Namespace, res.Name)
|
||||
if resource, ok := resourceByKey[key]; ok && res.HookType == "" {
|
||||
health = argoappv1.HealthStatusUnknown
|
||||
if resource.Health != nil {
|
||||
health = resource.Health.Status
|
||||
}
|
||||
sync = string(resource.Status)
|
||||
}
|
||||
states = append(states, &resourceState{
|
||||
Group: res.Group, Kind: res.Kind, Namespace: res.Namespace, Name: res.Name, Status: sync, Health: health, Hook: string(res.HookType), Message: res.Message})
|
||||
delete(resourceByKey, kube.NewResourceKey(res.Group, res.Kind, res.Namespace, res.Name))
|
||||
}
|
||||
}
|
||||
if opResult == nil {
|
||||
return resStates
|
||||
resKeys := make([]kube.ResourceKey, 0)
|
||||
for k := range resourceByKey {
|
||||
resKeys = append(resKeys, k)
|
||||
}
|
||||
|
||||
for _, result := range opResult.Resources {
|
||||
newState := newResourceStateFromResult(result)
|
||||
key := newState.Key()
|
||||
if prev, ok := resStates[key]; ok {
|
||||
prev.Merge(newState)
|
||||
} else {
|
||||
resStates[key] = newState
|
||||
sort.Slice(resKeys, func(i, j int) bool {
|
||||
return resKeys[i].String() < resKeys[j].String()
|
||||
})
|
||||
// print rest of resources which were not part of most recent operation
|
||||
for _, resKey := range resKeys {
|
||||
res := resourceByKey[resKey]
|
||||
health := argoappv1.HealthStatusUnknown
|
||||
if res.Health != nil {
|
||||
health = res.Health.Status
|
||||
}
|
||||
states = append(states, &resourceState{
|
||||
Group: res.Group, Kind: res.Kind, Namespace: res.Namespace, Name: res.Name, Status: string(res.Status), Health: health, Hook: "", Message: ""})
|
||||
}
|
||||
// filter out not selected resources
|
||||
if len(selectedResources) > 0 {
|
||||
for i := len(states) - 1; i >= 0; i-- {
|
||||
res := states[i]
|
||||
if !argo.ContainsSyncResource(res.Name, schema.GroupVersionKind{Group: res.Group, Kind: res.Kind}, selectedResources) {
|
||||
states = append(states[:i], states[i+1:]...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return resStates
|
||||
return states
|
||||
}
|
||||
|
||||
func getResourceStates(app *argoappv1.Application, selectedResources []argoappv1.SyncOperationResource) map[string]*resourceState {
|
||||
func groupResourceStates(app *argoappv1.Application, selectedResources []argoappv1.SyncOperationResource) map[string]*resourceState {
|
||||
resStates := make(map[string]*resourceState)
|
||||
for _, res := range app.Status.Resources {
|
||||
if len(selectedResources) > 0 && !argo.ContainsSyncResource(res.Name, res.GroupVersionKind(), selectedResources) {
|
||||
continue
|
||||
}
|
||||
newState := newResourceStateFromStatus(&res)
|
||||
key := newState.Key()
|
||||
for _, result := range getResourceStates(app, selectedResources) {
|
||||
key := result.Key()
|
||||
if prev, ok := resStates[key]; ok {
|
||||
prev.Merge(newState)
|
||||
prev.Merge(result)
|
||||
} else {
|
||||
resStates[key] = newState
|
||||
resStates[key] = result
|
||||
}
|
||||
}
|
||||
return resStates
|
||||
@@ -1284,7 +1341,7 @@ func waitOnApplicationStatus(acdClient apiclient.Client, appName string, timeout
|
||||
if refresh {
|
||||
conn, appClient := acdClient.NewApplicationClientOrDie()
|
||||
refreshType := string(argoappv1.RefreshTypeNormal)
|
||||
app, err = appClient.Get(context.Background(), &application.ApplicationQuery{Name: &appName, Refresh: &refreshType})
|
||||
app, err = appClient.Get(context.Background(), &applicationpkg.ApplicationQuery{Name: &appName, Refresh: &refreshType})
|
||||
errors.CheckError(err)
|
||||
_ = conn.Close()
|
||||
}
|
||||
@@ -1299,7 +1356,7 @@ func waitOnApplicationStatus(acdClient apiclient.Client, appName string, timeout
|
||||
if len(app.Status.Resources) > 0 {
|
||||
fmt.Println()
|
||||
w := tabwriter.NewWriter(os.Stdout, 5, 0, 2, ' ', 0)
|
||||
printAppResources(w, app, watchOperation)
|
||||
printAppResources(w, app)
|
||||
_ = w.Flush()
|
||||
}
|
||||
}
|
||||
@@ -1311,13 +1368,13 @@ func waitOnApplicationStatus(acdClient apiclient.Client, appName string, timeout
|
||||
}
|
||||
|
||||
w := tabwriter.NewWriter(os.Stdout, 5, 0, 2, ' ', 0)
|
||||
fmt.Fprintf(w, waitFormatString, "TIMESTAMP", "GROUP", "KIND", "NAMESPACE", "NAME", "STATUS", "HEALTH", "HOOK", "MESSAGE")
|
||||
_, _ = fmt.Fprintf(w, waitFormatString, "TIMESTAMP", "GROUP", "KIND", "NAMESPACE", "NAME", "STATUS", "HEALTH", "HOOK", "MESSAGE")
|
||||
|
||||
prevStates := make(map[string]*resourceState)
|
||||
appEventCh := acdClient.WatchApplicationWithRetry(ctx, appName)
|
||||
conn, appClient := acdClient.NewApplicationClientOrDie()
|
||||
defer util.Close(conn)
|
||||
app, err := appClient.Get(ctx, &application.ApplicationQuery{Name: &appName})
|
||||
app, err := appClient.Get(ctx, &applicationpkg.ApplicationQuery{Name: &appName})
|
||||
errors.CheckError(err)
|
||||
|
||||
for appEvent := range appEventCh {
|
||||
@@ -1343,12 +1400,12 @@ func waitOnApplicationStatus(acdClient apiclient.Client, appName string, timeout
|
||||
selectedResourcesAreReady = checkResourceStatus(watchSync, watchHealth, watchOperation, watchSuspended, app.Status.Health.Status, string(app.Status.Sync.Status), appEvent.Application.Operation)
|
||||
}
|
||||
|
||||
if len(app.Status.GetErrorConditions()) == 0 && selectedResourcesAreReady {
|
||||
if selectedResourcesAreReady {
|
||||
printFinalStatus(app)
|
||||
return app, nil
|
||||
}
|
||||
|
||||
newStates := calculateResourceStates(app, selectedResources)
|
||||
newStates := groupResourceStates(app, selectedResources)
|
||||
for _, newState := range newStates {
|
||||
var doPrint bool
|
||||
stateKey := newState.Key()
|
||||
@@ -1429,6 +1486,7 @@ func setParameterOverrides(app *argoappv1.Application, parameters []string) {
|
||||
if app.Spec.Source.Helm == nil {
|
||||
app.Spec.Source.Helm = &argoappv1.ApplicationSourceHelm{}
|
||||
}
|
||||
re := regexp.MustCompile(`([^\\]),`)
|
||||
for _, paramStr := range parameters {
|
||||
parts := strings.SplitN(paramStr, "=", 2)
|
||||
if len(parts) != 2 {
|
||||
@@ -1436,7 +1494,7 @@ func setParameterOverrides(app *argoappv1.Application, parameters []string) {
|
||||
}
|
||||
newParam := argoappv1.HelmParameter{
|
||||
Name: parts[0],
|
||||
Value: parts[1],
|
||||
Value: re.ReplaceAllString(parts[1], `$1\,`),
|
||||
}
|
||||
found := false
|
||||
for i, cp := range app.Spec.Source.Helm.Parameters {
|
||||
@@ -1471,7 +1529,7 @@ func NewApplicationHistoryCommand(clientOpts *argocdclient.ClientOptions) *cobra
|
||||
conn, appIf := argocdclient.NewClientOrDie(clientOpts).NewApplicationClientOrDie()
|
||||
defer util.Close(conn)
|
||||
appName := args[0]
|
||||
app, err := appIf.Get(context.Background(), &application.ApplicationQuery{Name: &appName})
|
||||
app, err := appIf.Get(context.Background(), &applicationpkg.ApplicationQuery{Name: &appName})
|
||||
errors.CheckError(err)
|
||||
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
|
||||
fmt.Fprintf(w, "ID\tDATE\tREVISION\n")
|
||||
@@ -1510,7 +1568,7 @@ func NewApplicationRollbackCommand(clientOpts *argocdclient.ClientOptions) *cobr
|
||||
conn, appIf := acdClient.NewApplicationClientOrDie()
|
||||
defer util.Close(conn)
|
||||
ctx := context.Background()
|
||||
app, err := appIf.Get(ctx, &application.ApplicationQuery{Name: &appName})
|
||||
app, err := appIf.Get(ctx, &applicationpkg.ApplicationQuery{Name: &appName})
|
||||
errors.CheckError(err)
|
||||
var depInfo *argoappv1.RevisionHistory
|
||||
for _, di := range app.Status.History {
|
||||
@@ -1523,7 +1581,7 @@ func NewApplicationRollbackCommand(clientOpts *argocdclient.ClientOptions) *cobr
|
||||
log.Fatalf("Application '%s' does not have deployment id '%d' in history\n", app.ObjectMeta.Name, depID)
|
||||
}
|
||||
|
||||
_, err = appIf.Rollback(ctx, &application.ApplicationRollbackRequest{
|
||||
_, err = appIf.Rollback(ctx, &applicationpkg.ApplicationRollbackRequest{
|
||||
Name: &appName,
|
||||
ID: int64(depID),
|
||||
Prune: prune,
|
||||
@@ -1583,14 +1641,14 @@ func NewApplicationManifestsCommand(clientOpts *argocdclient.ClientOptions) *cob
|
||||
conn, appIf := argocdclient.NewClientOrDie(clientOpts).NewApplicationClientOrDie()
|
||||
defer util.Close(conn)
|
||||
ctx := context.Background()
|
||||
resources, err := appIf.ManagedResources(context.Background(), &application.ResourcesQuery{ApplicationName: &appName})
|
||||
resources, err := appIf.ManagedResources(context.Background(), &applicationpkg.ResourcesQuery{ApplicationName: &appName})
|
||||
errors.CheckError(err)
|
||||
|
||||
var unstructureds []*unstructured.Unstructured
|
||||
switch source {
|
||||
case "git":
|
||||
if revision != "" {
|
||||
q := application.ApplicationManifestQuery{
|
||||
q := applicationpkg.ApplicationManifestQuery{
|
||||
Name: &appName,
|
||||
Revision: revision,
|
||||
}
|
||||
@@ -1641,7 +1699,7 @@ func NewApplicationTerminateOpCommand(clientOpts *argocdclient.ClientOptions) *c
|
||||
conn, appIf := argocdclient.NewClientOrDie(clientOpts).NewApplicationClientOrDie()
|
||||
defer util.Close(conn)
|
||||
ctx := context.Background()
|
||||
_, err := appIf.TerminateOperation(ctx, &application.OperationTerminateRequest{Name: &appName})
|
||||
_, err := appIf.TerminateOperation(ctx, &applicationpkg.OperationTerminateRequest{Name: &appName})
|
||||
errors.CheckError(err)
|
||||
fmt.Printf("Application '%s' operation terminating\n", appName)
|
||||
},
|
||||
@@ -1661,7 +1719,7 @@ func NewApplicationEditCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
appName := args[0]
|
||||
conn, appIf := argocdclient.NewClientOrDie(clientOpts).NewApplicationClientOrDie()
|
||||
defer util.Close(conn)
|
||||
app, err := appIf.Get(context.Background(), &application.ApplicationQuery{Name: &appName})
|
||||
app, err := appIf.Get(context.Background(), &applicationpkg.ApplicationQuery{Name: &appName})
|
||||
errors.CheckError(err)
|
||||
appData, err := json.Marshal(app.Spec)
|
||||
errors.CheckError(err)
|
||||
@@ -1678,7 +1736,7 @@ func NewApplicationEditCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = appIf.UpdateSpec(context.Background(), &application.ApplicationUpdateSpecRequest{Name: &app.Name, Spec: updatedSpec})
|
||||
_, err = appIf.UpdateSpec(context.Background(), &applicationpkg.ApplicationUpdateSpecRequest{Name: &app.Name, Spec: updatedSpec})
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to update application spec:\n%v", err)
|
||||
}
|
||||
@@ -1704,7 +1762,7 @@ func NewApplicationPatchCommand(clientOpts *argocdclient.ClientOptions) *cobra.C
|
||||
conn, appIf := argocdclient.NewClientOrDie(clientOpts).NewApplicationClientOrDie()
|
||||
defer util.Close(conn)
|
||||
|
||||
patchedApp, err := appIf.Patch(context.Background(), &application.ApplicationPatchRequest{
|
||||
patchedApp, err := appIf.Patch(context.Background(), &applicationpkg.ApplicationPatchRequest{
|
||||
Name: &appName,
|
||||
Patch: patch,
|
||||
})
|
||||
@@ -1727,6 +1785,9 @@ func filterResources(command *cobra.Command, resources []*argoappv1.ResourceDiff
|
||||
filteredObjects := make([]*unstructured.Unstructured, 0)
|
||||
for i := range liveObjs {
|
||||
obj := liveObjs[i]
|
||||
if obj == nil {
|
||||
continue
|
||||
}
|
||||
gvk := obj.GroupVersionKind()
|
||||
if command.Flags().Changed("group") && group != gvk.Group {
|
||||
continue
|
||||
@@ -1792,13 +1853,13 @@ func NewApplicationPatchResourceCommand(clientOpts *argocdclient.ClientOptions)
|
||||
conn, appIf := argocdclient.NewClientOrDie(clientOpts).NewApplicationClientOrDie()
|
||||
defer util.Close(conn)
|
||||
ctx := context.Background()
|
||||
resources, err := appIf.ManagedResources(ctx, &application.ResourcesQuery{ApplicationName: &appName})
|
||||
resources, err := appIf.ManagedResources(ctx, &applicationpkg.ResourcesQuery{ApplicationName: &appName})
|
||||
errors.CheckError(err)
|
||||
objectsToPatch := filterResources(command, resources.Items, group, kind, namespace, resourceName, all)
|
||||
for i := range objectsToPatch {
|
||||
obj := objectsToPatch[i]
|
||||
gvk := obj.GroupVersionKind()
|
||||
_, err = appIf.PatchResource(ctx, &application.ApplicationResourcePatchRequest{
|
||||
_, err = appIf.PatchResource(ctx, &applicationpkg.ApplicationResourcePatchRequest{
|
||||
Name: &appName,
|
||||
Namespace: obj.GetNamespace(),
|
||||
ResourceName: obj.GetName(),
|
||||
|
||||
@@ -11,8 +11,8 @@ import (
|
||||
|
||||
"github.com/argoproj/argo-cd/errors"
|
||||
argocdclient "github.com/argoproj/argo-cd/pkg/apiclient"
|
||||
applicationpkg "github.com/argoproj/argo-cd/pkg/apiclient/application"
|
||||
argoappv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/server/application"
|
||||
"github.com/argoproj/argo-cd/util"
|
||||
)
|
||||
|
||||
@@ -51,14 +51,14 @@ func NewApplicationResourceActionsListCommand(clientOpts *argocdclient.ClientOpt
|
||||
conn, appIf := argocdclient.NewClientOrDie(clientOpts).NewApplicationClientOrDie()
|
||||
defer util.Close(conn)
|
||||
ctx := context.Background()
|
||||
resources, err := appIf.ManagedResources(ctx, &application.ResourcesQuery{ApplicationName: &appName})
|
||||
resources, err := appIf.ManagedResources(ctx, &applicationpkg.ResourcesQuery{ApplicationName: &appName})
|
||||
errors.CheckError(err)
|
||||
filteredObjects := filterResources(command, resources.Items, group, kind, namespace, resourceName, all)
|
||||
availableActions := make(map[string][]argoappv1.ResourceAction)
|
||||
for i := range filteredObjects {
|
||||
obj := filteredObjects[i]
|
||||
gvk := obj.GroupVersionKind()
|
||||
availActionsForResource, err := appIf.ListResourceActions(ctx, &application.ApplicationResourceRequest{
|
||||
availActionsForResource, err := appIf.ListResourceActions(ctx, &applicationpkg.ApplicationResourceRequest{
|
||||
Name: &appName,
|
||||
Namespace: obj.GetNamespace(),
|
||||
ResourceName: obj.GetName(),
|
||||
@@ -128,14 +128,14 @@ func NewApplicationResourceActionsRunCommand(clientOpts *argocdclient.ClientOpti
|
||||
conn, appIf := argocdclient.NewClientOrDie(clientOpts).NewApplicationClientOrDie()
|
||||
defer util.Close(conn)
|
||||
ctx := context.Background()
|
||||
resources, err := appIf.ManagedResources(ctx, &application.ResourcesQuery{ApplicationName: &appName})
|
||||
resources, err := appIf.ManagedResources(ctx, &applicationpkg.ResourcesQuery{ApplicationName: &appName})
|
||||
errors.CheckError(err)
|
||||
filteredObjects := filterResources(command, resources.Items, group, kind, namespace, resourceName, all)
|
||||
for i := range filteredObjects {
|
||||
obj := filteredObjects[i]
|
||||
gvk := obj.GroupVersionKind()
|
||||
objResourceName := obj.GetName()
|
||||
_, err := appIf.RunResourceAction(context.Background(), &application.ResourceActionRunRequest{
|
||||
_, err := appIf.RunResourceAction(context.Background(), &applicationpkg.ResourceActionRunRequest{
|
||||
Name: &appName,
|
||||
Namespace: obj.GetNamespace(),
|
||||
ResourceName: objResourceName,
|
||||
|
||||
25
cmd/argocd/commands/app_test.go
Normal file
@@ -0,0 +1,25 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestParseLabels(t *testing.T) {
|
||||
validLabels := []string{"key=value", "foo=bar", "intuit=inc"}
|
||||
|
||||
result, err := parseLabels(validLabels)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, result, 3)
|
||||
|
||||
invalidLabels := []string{"key=value", "too=many=equals"}
|
||||
_, err = parseLabels(invalidLabels)
|
||||
assert.Error(t, err)
|
||||
|
||||
emptyLabels := []string{}
|
||||
result, err = parseLabels(emptyLabels)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, result, 0)
|
||||
|
||||
}
|
||||
@@ -19,9 +19,10 @@ import (
|
||||
"github.com/argoproj/argo-cd/common"
|
||||
"github.com/argoproj/argo-cd/errors"
|
||||
argocdclient "github.com/argoproj/argo-cd/pkg/apiclient"
|
||||
clusterpkg "github.com/argoproj/argo-cd/pkg/apiclient/cluster"
|
||||
argoappv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/server/cluster"
|
||||
"github.com/argoproj/argo-cd/util"
|
||||
"github.com/argoproj/argo-cd/util/clusterauth"
|
||||
)
|
||||
|
||||
// NewClusterCommand returns a new instance of an `argocd cluster` command
|
||||
@@ -39,16 +40,18 @@ func NewClusterCommand(clientOpts *argocdclient.ClientOptions, pathOpts *clientc
|
||||
command.AddCommand(NewClusterGetCommand(clientOpts))
|
||||
command.AddCommand(NewClusterListCommand(clientOpts))
|
||||
command.AddCommand(NewClusterRemoveCommand(clientOpts))
|
||||
command.AddCommand(NewClusterRotateAuthCommand(clientOpts))
|
||||
return command
|
||||
}
|
||||
|
||||
// NewClusterAddCommand returns a new instance of an `argocd cluster add` command
|
||||
func NewClusterAddCommand(clientOpts *argocdclient.ClientOptions, pathOpts *clientcmd.PathOptions) *cobra.Command {
|
||||
var (
|
||||
inCluster bool
|
||||
upsert bool
|
||||
awsRoleArn string
|
||||
awsClusterName string
|
||||
inCluster bool
|
||||
upsert bool
|
||||
awsRoleArn string
|
||||
awsClusterName string
|
||||
systemNamespace string
|
||||
)
|
||||
var command = &cobra.Command{
|
||||
Use: "add",
|
||||
@@ -85,7 +88,7 @@ func NewClusterAddCommand(clientOpts *argocdclient.ClientOptions, pathOpts *clie
|
||||
// Install RBAC resources for managing the cluster
|
||||
clientset, err := kubernetes.NewForConfig(conf)
|
||||
errors.CheckError(err)
|
||||
managerBearerToken, err = common.InstallClusterManagerRBAC(clientset)
|
||||
managerBearerToken, err = clusterauth.InstallClusterManagerRBAC(clientset, systemNamespace)
|
||||
errors.CheckError(err)
|
||||
}
|
||||
conn, clusterIf := argocdclient.NewClientOrDie(clientOpts).NewClusterClientOrDie()
|
||||
@@ -94,7 +97,7 @@ func NewClusterAddCommand(clientOpts *argocdclient.ClientOptions, pathOpts *clie
|
||||
if inCluster {
|
||||
clst.Server = common.KubernetesInternalAPIServerAddr
|
||||
}
|
||||
clstCreateReq := cluster.ClusterCreateRequest{
|
||||
clstCreateReq := clusterpkg.ClusterCreateRequest{
|
||||
Cluster: clst,
|
||||
Upsert: upsert,
|
||||
}
|
||||
@@ -108,6 +111,7 @@ func NewClusterAddCommand(clientOpts *argocdclient.ClientOptions, pathOpts *clie
|
||||
command.Flags().BoolVar(&upsert, "upsert", false, "Override an existing cluster with the same name even if the spec differs")
|
||||
command.Flags().StringVar(&awsClusterName, "aws-cluster-name", "", "AWS Cluster name if set then aws-iam-authenticator will be used to access cluster")
|
||||
command.Flags().StringVar(&awsRoleArn, "aws-role-arn", "", "Optional AWS role arn. If set then AWS IAM Authenticator assume a role to perform cluster operations instead of the default AWS credential provider chain.")
|
||||
command.Flags().StringVar(&systemNamespace, "system-namespace", common.DefaultSystemNamespace, "Use different system namespace")
|
||||
return command
|
||||
}
|
||||
|
||||
@@ -154,20 +158,8 @@ func NewCluster(name string, conf *rest.Config, managerBearerToken string, awsAu
|
||||
tlsClientConfig := argoappv1.TLSClientConfig{
|
||||
Insecure: conf.TLSClientConfig.Insecure,
|
||||
ServerName: conf.TLSClientConfig.ServerName,
|
||||
CertData: conf.TLSClientConfig.CertData,
|
||||
KeyData: conf.TLSClientConfig.KeyData,
|
||||
CAData: conf.TLSClientConfig.CAData,
|
||||
}
|
||||
if len(conf.TLSClientConfig.CertData) == 0 && conf.TLSClientConfig.CertFile != "" {
|
||||
data, err := ioutil.ReadFile(conf.TLSClientConfig.CertFile)
|
||||
errors.CheckError(err)
|
||||
tlsClientConfig.CertData = data
|
||||
}
|
||||
if len(conf.TLSClientConfig.KeyData) == 0 && conf.TLSClientConfig.KeyFile != "" {
|
||||
data, err := ioutil.ReadFile(conf.TLSClientConfig.KeyFile)
|
||||
errors.CheckError(err)
|
||||
tlsClientConfig.KeyData = data
|
||||
}
|
||||
if len(conf.TLSClientConfig.CAData) == 0 && conf.TLSClientConfig.CAFile != "" {
|
||||
data, err := ioutil.ReadFile(conf.TLSClientConfig.CAFile)
|
||||
errors.CheckError(err)
|
||||
@@ -188,7 +180,7 @@ func NewCluster(name string, conf *rest.Config, managerBearerToken string, awsAu
|
||||
// NewClusterGetCommand returns a new instance of an `argocd cluster get` command
|
||||
func NewClusterGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
var command = &cobra.Command{
|
||||
Use: "get",
|
||||
Use: "get CLUSTER",
|
||||
Short: "Get cluster information",
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
if len(args) == 0 {
|
||||
@@ -198,7 +190,7 @@ func NewClusterGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command
|
||||
conn, clusterIf := argocdclient.NewClientOrDie(clientOpts).NewClusterClientOrDie()
|
||||
defer util.Close(conn)
|
||||
for _, clusterName := range args {
|
||||
clst, err := clusterIf.Get(context.Background(), &cluster.ClusterQuery{Server: clusterName})
|
||||
clst, err := clusterIf.Get(context.Background(), &clusterpkg.ClusterQuery{Server: clusterName})
|
||||
errors.CheckError(err)
|
||||
yamlBytes, err := yaml.Marshal(clst)
|
||||
errors.CheckError(err)
|
||||
@@ -212,7 +204,7 @@ func NewClusterGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command
|
||||
// NewClusterRemoveCommand returns a new instance of an `argocd cluster list` command
|
||||
func NewClusterRemoveCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
var command = &cobra.Command{
|
||||
Use: "rm",
|
||||
Use: "rm CLUSTER",
|
||||
Short: "Remove cluster credentials",
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
if len(args) == 0 {
|
||||
@@ -227,9 +219,9 @@ func NewClusterRemoveCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comm
|
||||
|
||||
for _, clusterName := range args {
|
||||
// TODO(jessesuen): find the right context and remove manager RBAC artifacts
|
||||
// err := common.UninstallClusterManagerRBAC(clientset)
|
||||
// err := clusterauth.UninstallClusterManagerRBAC(clientset)
|
||||
// errors.CheckError(err)
|
||||
_, err := clusterIf.Delete(context.Background(), &cluster.ClusterQuery{Server: clusterName})
|
||||
_, err := clusterIf.Delete(context.Background(), &clusterpkg.ClusterQuery{Server: clusterName})
|
||||
errors.CheckError(err)
|
||||
}
|
||||
},
|
||||
@@ -245,7 +237,7 @@ func NewClusterListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comman
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
conn, clusterIf := argocdclient.NewClientOrDie(clientOpts).NewClusterClientOrDie()
|
||||
defer util.Close(conn)
|
||||
clusters, err := clusterIf.List(context.Background(), &cluster.ClusterQuery{})
|
||||
clusters, err := clusterIf.List(context.Background(), &clusterpkg.ClusterQuery{})
|
||||
errors.CheckError(err)
|
||||
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
|
||||
fmt.Fprintf(w, "SERVER\tNAME\tSTATUS\tMESSAGE\n")
|
||||
@@ -257,3 +249,26 @@ func NewClusterListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comman
|
||||
}
|
||||
return command
|
||||
}
|
||||
|
||||
// NewClusterRotateAuthCommand returns a new instance of an `argocd cluster rotate-auth` command
|
||||
func NewClusterRotateAuthCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
var command = &cobra.Command{
|
||||
Use: "rotate-auth CLUSTER",
|
||||
Short: fmt.Sprintf("%s cluster rotate-auth CLUSTER", cliName),
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
if len(args) != 1 {
|
||||
c.HelpFunc()(c, args)
|
||||
os.Exit(1)
|
||||
}
|
||||
conn, clusterIf := argocdclient.NewClientOrDie(clientOpts).NewClusterClientOrDie()
|
||||
defer util.Close(conn)
|
||||
clusterQuery := clusterpkg.ClusterQuery{
|
||||
Server: args[0],
|
||||
}
|
||||
_, err := clusterIf.RotateAuth(context.Background(), &clusterQuery)
|
||||
errors.CheckError(err)
|
||||
fmt.Printf("Cluster '%s' rotated auth\n", clusterQuery.Server)
|
||||
},
|
||||
}
|
||||
return command
|
||||
}
|
||||
|
||||
@@ -8,6 +8,8 @@ import (
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
@@ -18,16 +20,36 @@ import (
|
||||
|
||||
// NewContextCommand returns a new instance of an `argocd ctx` command
|
||||
func NewContextCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
var delete bool
|
||||
var command = &cobra.Command{
|
||||
Use: "context",
|
||||
Aliases: []string{"ctx"},
|
||||
Short: "Switch between contexts",
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
|
||||
localCfg, err := localconfig.ReadLocalConfig(clientOpts.ConfigPath)
|
||||
errors.CheckError(err)
|
||||
|
||||
deletePresentContext := false
|
||||
c.Flags().Visit(func(f *pflag.Flag) {
|
||||
if f.Name == "delete" {
|
||||
deletePresentContext = true
|
||||
}
|
||||
})
|
||||
|
||||
if len(args) == 0 {
|
||||
printArgoCDContexts(clientOpts.ConfigPath)
|
||||
return
|
||||
if deletePresentContext {
|
||||
err := deleteContext(localCfg.CurrentContext, clientOpts.ConfigPath)
|
||||
errors.CheckError(err)
|
||||
return
|
||||
} else {
|
||||
printArgoCDContexts(clientOpts.ConfigPath)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
ctxName := args[0]
|
||||
|
||||
argoCDDir, err := localconfig.DefaultConfigDir()
|
||||
errors.CheckError(err)
|
||||
prevCtxFile := path.Join(argoCDDir, ".prev-ctx")
|
||||
@@ -37,8 +59,6 @@ func NewContextCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
errors.CheckError(err)
|
||||
ctxName = string(prevCtxBytes)
|
||||
}
|
||||
localCfg, err := localconfig.ReadLocalConfig(clientOpts.ConfigPath)
|
||||
errors.CheckError(err)
|
||||
if localCfg.CurrentContext == ctxName {
|
||||
fmt.Printf("Already at context '%s'\n", localCfg.CurrentContext)
|
||||
return
|
||||
@@ -48,6 +68,7 @@ func NewContextCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
}
|
||||
prevCtx := localCfg.CurrentContext
|
||||
localCfg.CurrentContext = ctxName
|
||||
|
||||
err = localconfig.WriteLocalConfig(*localCfg, clientOpts.ConfigPath)
|
||||
errors.CheckError(err)
|
||||
err = ioutil.WriteFile(prevCtxFile, []byte(prevCtx), 0644)
|
||||
@@ -55,9 +76,43 @@ func NewContextCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
fmt.Printf("Switched to context '%s'\n", localCfg.CurrentContext)
|
||||
},
|
||||
}
|
||||
command.Flags().BoolVar(&delete, "delete", false, "Delete the context instead of switching to it")
|
||||
return command
|
||||
}
|
||||
|
||||
func deleteContext(context, configPath string) error {
|
||||
|
||||
localCfg, err := localconfig.ReadLocalConfig(configPath)
|
||||
errors.CheckError(err)
|
||||
if localCfg == nil {
|
||||
return fmt.Errorf("Nothing to logout from")
|
||||
}
|
||||
|
||||
serverName, ok := localCfg.RemoveContext(context)
|
||||
if !ok {
|
||||
return fmt.Errorf("Context %s does not exist", context)
|
||||
}
|
||||
_ = localCfg.RemoveUser(context)
|
||||
_ = localCfg.RemoveServer(serverName)
|
||||
|
||||
if localCfg.IsEmpty() {
|
||||
err = localconfig.DeleteLocalConfig(configPath)
|
||||
errors.CheckError(err)
|
||||
} else {
|
||||
if localCfg.CurrentContext == context {
|
||||
localCfg.CurrentContext = localCfg.Contexts[0].Name
|
||||
}
|
||||
err = localconfig.ValidateLocalConfig(*localCfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error in logging out")
|
||||
}
|
||||
err = localconfig.WriteLocalConfig(*localCfg, configPath)
|
||||
errors.CheckError(err)
|
||||
}
|
||||
fmt.Printf("Context '%s' deleted\n", context)
|
||||
return nil
|
||||
}
|
||||
|
||||
func printArgoCDContexts(configPath string) {
|
||||
localCfg, err := localconfig.ReadLocalConfig(configPath)
|
||||
errors.CheckError(err)
|
||||
|
||||
60
cmd/argocd/commands/context_test.go
Normal file
@@ -0,0 +1,60 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/argoproj/argo-cd/util/localconfig"
|
||||
)
|
||||
|
||||
const testConfig = `contexts:
|
||||
- name: argocd.example.com:443
|
||||
server: argocd.example.com:443
|
||||
user: argocd.example.com:443
|
||||
- name: localhost:8080
|
||||
server: localhost:8080
|
||||
user: localhost:8080
|
||||
current-context: localhost:8080
|
||||
servers:
|
||||
- server: argocd.example.com:443
|
||||
- plain-text: true
|
||||
server: localhost:8080
|
||||
users:
|
||||
- auth-token: vErrYS3c3tReFRe$hToken
|
||||
name: argocd.example.com:443
|
||||
refresh-token: vErrYS3c3tReFRe$hToken
|
||||
- auth-token: vErrYS3c3tReFRe$hToken
|
||||
name: localhost:8080`
|
||||
|
||||
const testConfigFilePath = "./testdata/config"
|
||||
|
||||
func TestContextDelete(t *testing.T) {
|
||||
|
||||
// Write the test config file
|
||||
err := ioutil.WriteFile(testConfigFilePath, []byte(testConfig), os.ModePerm)
|
||||
assert.NoError(t, err)
|
||||
|
||||
localConfig, err := localconfig.ReadLocalConfig(testConfigFilePath)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, localConfig.CurrentContext, "localhost:8080")
|
||||
assert.Contains(t, localConfig.Contexts, localconfig.ContextRef{Name: "localhost:8080", Server: "localhost:8080", User: "localhost:8080"})
|
||||
|
||||
err = deleteContext("localhost:8080", testConfigFilePath)
|
||||
assert.NoError(t, err)
|
||||
|
||||
localConfig, err = localconfig.ReadLocalConfig(testConfigFilePath)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, localConfig.CurrentContext, "argocd.example.com:443")
|
||||
assert.NotContains(t, localConfig.Contexts, localconfig.ContextRef{Name: "localhost:8080", Server: "localhost:8080", User: "localhost:8080"})
|
||||
assert.NotContains(t, localConfig.Servers, localconfig.Server{PlainText: true, Server: "localhost:8080"})
|
||||
assert.NotContains(t, localConfig.Users, localconfig.User{AuthToken: "vErrYS3c3tReFRe$hToken", Name: "localhost:8080"})
|
||||
assert.Contains(t, localConfig.Contexts, localconfig.ContextRef{Name: "argocd.example.com:443", Server: "argocd.example.com:443", User: "argocd.example.com:443"})
|
||||
|
||||
// Write the file again so that no conflicts are made in git
|
||||
err = ioutil.WriteFile(testConfigFilePath, []byte(testConfig), os.ModePerm)
|
||||
assert.NoError(t, err)
|
||||
|
||||
}
|
||||
@@ -8,8 +8,8 @@ import (
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
oidc "github.com/coreos/go-oidc"
|
||||
jwt "github.com/dgrijalva/jwt-go"
|
||||
"github.com/coreos/go-oidc"
|
||||
"github.com/dgrijalva/jwt-go"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/skratchdot/open-golang/open"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -17,8 +17,8 @@ import (
|
||||
|
||||
"github.com/argoproj/argo-cd/errors"
|
||||
argocdclient "github.com/argoproj/argo-cd/pkg/apiclient"
|
||||
"github.com/argoproj/argo-cd/server/session"
|
||||
"github.com/argoproj/argo-cd/server/settings"
|
||||
sessionpkg "github.com/argoproj/argo-cd/pkg/apiclient/session"
|
||||
settingspkg "github.com/argoproj/argo-cd/pkg/apiclient/settings"
|
||||
"github.com/argoproj/argo-cd/util"
|
||||
"github.com/argoproj/argo-cd/util/cli"
|
||||
grpc_util "github.com/argoproj/argo-cd/util/grpc"
|
||||
@@ -88,7 +88,7 @@ func NewLoginCommand(globalClientOpts *argocdclient.ClientOptions) *cobra.Comman
|
||||
httpClient, err := acdClient.HTTPClient()
|
||||
errors.CheckError(err)
|
||||
ctx = oidc.ClientContext(ctx, httpClient)
|
||||
acdSet, err := setIf.Get(ctx, &settings.SettingsQuery{})
|
||||
acdSet, err := setIf.Get(ctx, &settingspkg.SettingsQuery{})
|
||||
errors.CheckError(err)
|
||||
oauth2conf, provider, err := acdClient.OIDCConfig(ctx, acdSet)
|
||||
errors.CheckError(err)
|
||||
@@ -278,7 +278,7 @@ func passwordLogin(acdClient argocdclient.Client, username, password string) str
|
||||
username, password = cli.PromptCredentials(username, password)
|
||||
sessConn, sessionIf := acdClient.NewSessionClientOrDie()
|
||||
defer util.Close(sessConn)
|
||||
sessionRequest := session.SessionCreateRequest{
|
||||
sessionRequest := sessionpkg.SessionCreateRequest{
|
||||
Username: username,
|
||||
Password: password,
|
||||
}
|
||||
|
||||
50
cmd/argocd/commands/logout.go
Normal file
@@ -0,0 +1,50 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/argoproj/argo-cd/errors"
|
||||
argocdclient "github.com/argoproj/argo-cd/pkg/apiclient"
|
||||
"github.com/argoproj/argo-cd/util/localconfig"
|
||||
)
|
||||
|
||||
// NewLogoutCommand returns a new instance of `argocd logout` command
|
||||
func NewLogoutCommand(globalClientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
var command = &cobra.Command{
|
||||
Use: "logout CONTEXT",
|
||||
Short: "Log out from Argo CD",
|
||||
Long: "Log out from Argo CD",
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
if len(args) == 0 {
|
||||
c.HelpFunc()(c, args)
|
||||
os.Exit(1)
|
||||
}
|
||||
context := args[0]
|
||||
|
||||
localCfg, err := localconfig.ReadLocalConfig(globalClientOpts.ConfigPath)
|
||||
errors.CheckError(err)
|
||||
if localCfg == nil {
|
||||
log.Fatalf("Nothing to logout from")
|
||||
}
|
||||
|
||||
ok := localCfg.RemoveToken(context)
|
||||
if !ok {
|
||||
log.Fatalf("Context %s does not exist", context)
|
||||
}
|
||||
|
||||
err = localconfig.ValidateLocalConfig(*localCfg)
|
||||
if err != nil {
|
||||
log.Fatalf("Error in logging out: %s", err)
|
||||
}
|
||||
err = localconfig.WriteLocalConfig(*localCfg, globalClientOpts.ConfigPath)
|
||||
errors.CheckError(err)
|
||||
|
||||
fmt.Printf("Logged out from '%s'\n", context)
|
||||
},
|
||||
}
|
||||
return command
|
||||
}
|
||||
39
cmd/argocd/commands/logout_test.go
Normal file
@@ -0,0 +1,39 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/argoproj/argo-cd/pkg/apiclient"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/argoproj/argo-cd/util/localconfig"
|
||||
)
|
||||
|
||||
func TestLogout(t *testing.T) {
|
||||
|
||||
// Write the test config file
|
||||
err := ioutil.WriteFile(testConfigFilePath, []byte(testConfig), os.ModePerm)
|
||||
assert.NoError(t, err)
|
||||
|
||||
localConfig, err := localconfig.ReadLocalConfig(testConfigFilePath)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, localConfig.CurrentContext, "localhost:8080")
|
||||
assert.Contains(t, localConfig.Contexts, localconfig.ContextRef{Name: "localhost:8080", Server: "localhost:8080", User: "localhost:8080"})
|
||||
|
||||
command := NewLogoutCommand(&apiclient.ClientOptions{ConfigPath: testConfigFilePath})
|
||||
command.Run(nil, []string{"localhost:8080"})
|
||||
|
||||
localConfig, err = localconfig.ReadLocalConfig(testConfigFilePath)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, localConfig.CurrentContext, "localhost:8080")
|
||||
assert.NotContains(t, localConfig.Users, localconfig.User{AuthToken: "vErrYS3c3tReFRe$hToken", Name: "localhost:8080"})
|
||||
assert.Contains(t, localConfig.Contexts, localconfig.ContextRef{Name: "argocd.example.com:443", Server: "argocd.example.com:443", User: "argocd.example.com:443"})
|
||||
|
||||
// Write the file again so that no conflicts are made in git
|
||||
err = ioutil.WriteFile(testConfigFilePath, []byte(testConfig), os.ModePerm)
|
||||
assert.NoError(t, err)
|
||||
|
||||
}
|
||||
@@ -19,8 +19,8 @@ import (
|
||||
|
||||
"github.com/argoproj/argo-cd/errors"
|
||||
argocdclient "github.com/argoproj/argo-cd/pkg/apiclient"
|
||||
projectpkg "github.com/argoproj/argo-cd/pkg/apiclient/project"
|
||||
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/server/project"
|
||||
"github.com/argoproj/argo-cd/util"
|
||||
"github.com/argoproj/argo-cd/util/cli"
|
||||
"github.com/argoproj/argo-cd/util/git"
|
||||
@@ -125,7 +125,7 @@ func NewProjectCreateCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comm
|
||||
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
|
||||
defer util.Close(conn)
|
||||
|
||||
_, err := projIf.Create(context.Background(), &project.ProjectCreateRequest{Project: &proj})
|
||||
_, err := projIf.Create(context.Background(), &projectpkg.ProjectCreateRequest{Project: &proj})
|
||||
errors.CheckError(err)
|
||||
},
|
||||
}
|
||||
@@ -150,7 +150,7 @@ func NewProjectSetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command
|
||||
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
|
||||
defer util.Close(conn)
|
||||
|
||||
proj, err := projIf.Get(context.Background(), &project.ProjectQuery{Name: projName})
|
||||
proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName})
|
||||
errors.CheckError(err)
|
||||
|
||||
visited := 0
|
||||
@@ -171,7 +171,7 @@ func NewProjectSetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
_, err = projIf.Update(context.Background(), &project.ProjectUpdateRequest{Project: proj})
|
||||
_, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj})
|
||||
errors.CheckError(err)
|
||||
},
|
||||
}
|
||||
@@ -195,7 +195,7 @@ func NewProjectAddDestinationCommand(clientOpts *argocdclient.ClientOptions) *co
|
||||
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
|
||||
defer util.Close(conn)
|
||||
|
||||
proj, err := projIf.Get(context.Background(), &project.ProjectQuery{Name: projName})
|
||||
proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName})
|
||||
errors.CheckError(err)
|
||||
|
||||
for _, dest := range proj.Spec.Destinations {
|
||||
@@ -204,7 +204,7 @@ func NewProjectAddDestinationCommand(clientOpts *argocdclient.ClientOptions) *co
|
||||
}
|
||||
}
|
||||
proj.Spec.Destinations = append(proj.Spec.Destinations, v1alpha1.ApplicationDestination{Server: server, Namespace: namespace})
|
||||
_, err = projIf.Update(context.Background(), &project.ProjectUpdateRequest{Project: proj})
|
||||
_, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj})
|
||||
errors.CheckError(err)
|
||||
},
|
||||
}
|
||||
@@ -227,7 +227,7 @@ func NewProjectRemoveDestinationCommand(clientOpts *argocdclient.ClientOptions)
|
||||
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
|
||||
defer util.Close(conn)
|
||||
|
||||
proj, err := projIf.Get(context.Background(), &project.ProjectQuery{Name: projName})
|
||||
proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName})
|
||||
errors.CheckError(err)
|
||||
|
||||
index := -1
|
||||
@@ -241,7 +241,7 @@ func NewProjectRemoveDestinationCommand(clientOpts *argocdclient.ClientOptions)
|
||||
log.Fatal("Specified destination does not exist in project")
|
||||
} else {
|
||||
proj.Spec.Destinations = append(proj.Spec.Destinations[:index], proj.Spec.Destinations[index+1:]...)
|
||||
_, err = projIf.Update(context.Background(), &project.ProjectUpdateRequest{Project: proj})
|
||||
_, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj})
|
||||
errors.CheckError(err)
|
||||
}
|
||||
},
|
||||
@@ -265,7 +265,7 @@ func NewProjectAddSourceCommand(clientOpts *argocdclient.ClientOptions) *cobra.C
|
||||
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
|
||||
defer util.Close(conn)
|
||||
|
||||
proj, err := projIf.Get(context.Background(), &project.ProjectQuery{Name: projName})
|
||||
proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName})
|
||||
errors.CheckError(err)
|
||||
|
||||
for _, item := range proj.Spec.SourceRepos {
|
||||
@@ -279,7 +279,7 @@ func NewProjectAddSourceCommand(clientOpts *argocdclient.ClientOptions) *cobra.C
|
||||
}
|
||||
}
|
||||
proj.Spec.SourceRepos = append(proj.Spec.SourceRepos, url)
|
||||
_, err = projIf.Update(context.Background(), &project.ProjectUpdateRequest{Project: proj})
|
||||
_, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj})
|
||||
errors.CheckError(err)
|
||||
},
|
||||
}
|
||||
@@ -299,11 +299,11 @@ func modifyProjectResourceCmd(cmdUse, cmdDesc string, clientOpts *argocdclient.C
|
||||
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
|
||||
defer util.Close(conn)
|
||||
|
||||
proj, err := projIf.Get(context.Background(), &project.ProjectQuery{Name: projName})
|
||||
proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName})
|
||||
errors.CheckError(err)
|
||||
|
||||
if action(proj, group, kind) {
|
||||
_, err = projIf.Update(context.Background(), &project.ProjectUpdateRequest{Project: proj})
|
||||
_, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj})
|
||||
errors.CheckError(err)
|
||||
}
|
||||
},
|
||||
@@ -399,7 +399,7 @@ func NewProjectRemoveSourceCommand(clientOpts *argocdclient.ClientOptions) *cobr
|
||||
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
|
||||
defer util.Close(conn)
|
||||
|
||||
proj, err := projIf.Get(context.Background(), &project.ProjectQuery{Name: projName})
|
||||
proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName})
|
||||
errors.CheckError(err)
|
||||
|
||||
index := -1
|
||||
@@ -413,7 +413,7 @@ func NewProjectRemoveSourceCommand(clientOpts *argocdclient.ClientOptions) *cobr
|
||||
fmt.Printf("Source repository '%s' does not exist in project\n", url)
|
||||
} else {
|
||||
proj.Spec.SourceRepos = append(proj.Spec.SourceRepos[:index], proj.Spec.SourceRepos[index+1:]...)
|
||||
_, err = projIf.Update(context.Background(), &project.ProjectUpdateRequest{Project: proj})
|
||||
_, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj})
|
||||
errors.CheckError(err)
|
||||
}
|
||||
},
|
||||
@@ -435,7 +435,7 @@ func NewProjectDeleteCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comm
|
||||
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
|
||||
defer util.Close(conn)
|
||||
for _, name := range args {
|
||||
_, err := projIf.Delete(context.Background(), &project.ProjectQuery{Name: name})
|
||||
_, err := projIf.Delete(context.Background(), &projectpkg.ProjectQuery{Name: name})
|
||||
errors.CheckError(err)
|
||||
}
|
||||
},
|
||||
@@ -451,7 +451,7 @@ func NewProjectListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comman
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
|
||||
defer util.Close(conn)
|
||||
projects, err := projIf.List(context.Background(), &project.ProjectQuery{})
|
||||
projects, err := projIf.List(context.Background(), &projectpkg.ProjectQuery{})
|
||||
errors.CheckError(err)
|
||||
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
|
||||
fmt.Fprintf(w, "NAME\tDESCRIPTION\tDESTINATIONS\tSOURCES\tCLUSTER-RESOURCE-WHITELIST\tNAMESPACE-RESOURCE-BLACKLIST\n")
|
||||
@@ -513,7 +513,7 @@ func NewProjectGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command
|
||||
projName := args[0]
|
||||
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
|
||||
defer util.Close(conn)
|
||||
p, err := projIf.Get(context.Background(), &project.ProjectQuery{Name: projName})
|
||||
p, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName})
|
||||
errors.CheckError(err)
|
||||
fmt.Printf(printProjFmtStr, "Name:", p.Name)
|
||||
fmt.Printf(printProjFmtStr, "Description:", p.Spec.Description)
|
||||
@@ -574,7 +574,7 @@ func NewProjectEditCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comman
|
||||
projName := args[0]
|
||||
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
|
||||
defer util.Close(conn)
|
||||
proj, err := projIf.Get(context.Background(), &project.ProjectQuery{Name: projName})
|
||||
proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName})
|
||||
errors.CheckError(err)
|
||||
projData, err := json.Marshal(proj.Spec)
|
||||
errors.CheckError(err)
|
||||
@@ -591,12 +591,12 @@ func NewProjectEditCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comman
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
proj, err := projIf.Get(context.Background(), &project.ProjectQuery{Name: projName})
|
||||
proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
proj.Spec = updatedSpec
|
||||
_, err = projIf.Update(context.Background(), &project.ProjectUpdateRequest{Project: proj})
|
||||
_, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj})
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to update project:\n%v", err)
|
||||
}
|
||||
|
||||
@@ -12,8 +12,8 @@ import (
|
||||
|
||||
"github.com/argoproj/argo-cd/errors"
|
||||
argocdclient "github.com/argoproj/argo-cd/pkg/apiclient"
|
||||
projectpkg "github.com/argoproj/argo-cd/pkg/apiclient/project"
|
||||
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/server/project"
|
||||
"github.com/argoproj/argo-cd/util"
|
||||
projectutil "github.com/argoproj/argo-cd/util/project"
|
||||
)
|
||||
@@ -61,7 +61,7 @@ func NewProjectRoleAddPolicyCommand(clientOpts *argocdclient.ClientOptions) *cob
|
||||
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
|
||||
defer util.Close(conn)
|
||||
|
||||
proj, err := projIf.Get(context.Background(), &project.ProjectQuery{Name: projName})
|
||||
proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName})
|
||||
errors.CheckError(err)
|
||||
|
||||
role, roleIndex, err := projectutil.GetRoleByName(proj, roleName)
|
||||
@@ -70,7 +70,7 @@ func NewProjectRoleAddPolicyCommand(clientOpts *argocdclient.ClientOptions) *cob
|
||||
policy := fmt.Sprintf(policyTemplate, proj.Name, role.Name, opts.action, proj.Name, opts.object, opts.permission)
|
||||
proj.Spec.Roles[roleIndex].Policies = append(role.Policies, policy)
|
||||
|
||||
_, err = projIf.Update(context.Background(), &project.ProjectUpdateRequest{Project: proj})
|
||||
_, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj})
|
||||
errors.CheckError(err)
|
||||
},
|
||||
}
|
||||
@@ -96,7 +96,7 @@ func NewProjectRoleRemovePolicyCommand(clientOpts *argocdclient.ClientOptions) *
|
||||
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
|
||||
defer util.Close(conn)
|
||||
|
||||
proj, err := projIf.Get(context.Background(), &project.ProjectQuery{Name: projName})
|
||||
proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName})
|
||||
errors.CheckError(err)
|
||||
|
||||
role, roleIndex, err := projectutil.GetRoleByName(proj, roleName)
|
||||
@@ -115,7 +115,7 @@ func NewProjectRoleRemovePolicyCommand(clientOpts *argocdclient.ClientOptions) *
|
||||
}
|
||||
role.Policies[duplicateIndex] = role.Policies[len(role.Policies)-1]
|
||||
proj.Spec.Roles[roleIndex].Policies = role.Policies[:len(role.Policies)-1]
|
||||
_, err = projIf.Update(context.Background(), &project.ProjectUpdateRequest{Project: proj})
|
||||
_, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj})
|
||||
errors.CheckError(err)
|
||||
},
|
||||
}
|
||||
@@ -141,7 +141,7 @@ func NewProjectRoleCreateCommand(clientOpts *argocdclient.ClientOptions) *cobra.
|
||||
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
|
||||
defer util.Close(conn)
|
||||
|
||||
proj, err := projIf.Get(context.Background(), &project.ProjectQuery{Name: projName})
|
||||
proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName})
|
||||
errors.CheckError(err)
|
||||
|
||||
_, _, err = projectutil.GetRoleByName(proj, roleName)
|
||||
@@ -151,7 +151,7 @@ func NewProjectRoleCreateCommand(clientOpts *argocdclient.ClientOptions) *cobra.
|
||||
}
|
||||
proj.Spec.Roles = append(proj.Spec.Roles, v1alpha1.ProjectRole{Name: roleName, Description: description})
|
||||
|
||||
_, err = projIf.Update(context.Background(), &project.ProjectUpdateRequest{Project: proj})
|
||||
_, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj})
|
||||
errors.CheckError(err)
|
||||
fmt.Printf("Role '%s' created\n", roleName)
|
||||
},
|
||||
@@ -175,7 +175,7 @@ func NewProjectRoleDeleteCommand(clientOpts *argocdclient.ClientOptions) *cobra.
|
||||
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
|
||||
defer util.Close(conn)
|
||||
|
||||
proj, err := projIf.Get(context.Background(), &project.ProjectQuery{Name: projName})
|
||||
proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName})
|
||||
errors.CheckError(err)
|
||||
|
||||
_, index, err := projectutil.GetRoleByName(proj, roleName)
|
||||
@@ -186,7 +186,7 @@ func NewProjectRoleDeleteCommand(clientOpts *argocdclient.ClientOptions) *cobra.
|
||||
proj.Spec.Roles[index] = proj.Spec.Roles[len(proj.Spec.Roles)-1]
|
||||
proj.Spec.Roles = proj.Spec.Roles[:len(proj.Spec.Roles)-1]
|
||||
|
||||
_, err = projIf.Update(context.Background(), &project.ProjectUpdateRequest{Project: proj})
|
||||
_, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj})
|
||||
errors.CheckError(err)
|
||||
fmt.Printf("Role '%s' deleted\n", roleName)
|
||||
},
|
||||
@@ -213,7 +213,7 @@ func NewProjectRoleCreateTokenCommand(clientOpts *argocdclient.ClientOptions) *c
|
||||
defer util.Close(conn)
|
||||
duration, err := timeutil.ParseDuration(expiresIn)
|
||||
errors.CheckError(err)
|
||||
token, err := projIf.CreateToken(context.Background(), &project.ProjectTokenCreateRequest{Project: projName, Role: roleName, ExpiresIn: int64(duration.Seconds())})
|
||||
token, err := projIf.CreateToken(context.Background(), &projectpkg.ProjectTokenCreateRequest{Project: projName, Role: roleName, ExpiresIn: int64(duration.Seconds())})
|
||||
errors.CheckError(err)
|
||||
fmt.Println(token.Token)
|
||||
},
|
||||
@@ -241,7 +241,7 @@ func NewProjectRoleDeleteTokenCommand(clientOpts *argocdclient.ClientOptions) *c
|
||||
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
|
||||
defer util.Close(conn)
|
||||
|
||||
_, err = projIf.DeleteToken(context.Background(), &project.ProjectTokenDeleteRequest{Project: projName, Role: roleName, Iat: issuedAt})
|
||||
_, err = projIf.DeleteToken(context.Background(), &projectpkg.ProjectTokenDeleteRequest{Project: projName, Role: roleName, Iat: issuedAt})
|
||||
errors.CheckError(err)
|
||||
},
|
||||
}
|
||||
@@ -262,7 +262,7 @@ func NewProjectRoleListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
|
||||
defer util.Close(conn)
|
||||
|
||||
project, err := projIf.Get(context.Background(), &project.ProjectQuery{Name: projName})
|
||||
project, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName})
|
||||
errors.CheckError(err)
|
||||
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
|
||||
fmt.Fprintf(w, "ROLE-NAME\tDESCRIPTION\n")
|
||||
@@ -290,7 +290,7 @@ func NewProjectRoleGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Com
|
||||
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
|
||||
defer util.Close(conn)
|
||||
|
||||
proj, err := projIf.Get(context.Background(), &project.ProjectQuery{Name: projName})
|
||||
proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName})
|
||||
errors.CheckError(err)
|
||||
|
||||
role, _, err := projectutil.GetRoleByName(proj, roleName)
|
||||
@@ -331,7 +331,7 @@ func NewProjectRoleAddGroupCommand(clientOpts *argocdclient.ClientOptions) *cobr
|
||||
projName, roleName, groupName := args[0], args[1], args[2]
|
||||
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
|
||||
defer util.Close(conn)
|
||||
proj, err := projIf.Get(context.Background(), &project.ProjectQuery{Name: projName})
|
||||
proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName})
|
||||
errors.CheckError(err)
|
||||
updated, err := projectutil.AddGroupToRole(proj, roleName, groupName)
|
||||
errors.CheckError(err)
|
||||
@@ -339,7 +339,7 @@ func NewProjectRoleAddGroupCommand(clientOpts *argocdclient.ClientOptions) *cobr
|
||||
fmt.Printf("Group '%s' already present in role '%s'\n", groupName, roleName)
|
||||
return
|
||||
}
|
||||
_, err = projIf.Update(context.Background(), &project.ProjectUpdateRequest{Project: proj})
|
||||
_, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj})
|
||||
errors.CheckError(err)
|
||||
fmt.Printf("Group '%s' added to role '%s'\n", groupName, roleName)
|
||||
},
|
||||
@@ -360,7 +360,7 @@ func NewProjectRoleRemoveGroupCommand(clientOpts *argocdclient.ClientOptions) *c
|
||||
projName, roleName, groupName := args[0], args[1], args[2]
|
||||
conn, projIf := argocdclient.NewClientOrDie(clientOpts).NewProjectClientOrDie()
|
||||
defer util.Close(conn)
|
||||
proj, err := projIf.Get(context.Background(), &project.ProjectQuery{Name: projName})
|
||||
proj, err := projIf.Get(context.Background(), &projectpkg.ProjectQuery{Name: projName})
|
||||
errors.CheckError(err)
|
||||
updated, err := projectutil.RemoveGroupFromRole(proj, roleName, groupName)
|
||||
errors.CheckError(err)
|
||||
@@ -368,7 +368,7 @@ func NewProjectRoleRemoveGroupCommand(clientOpts *argocdclient.ClientOptions) *c
|
||||
fmt.Printf("Group '%s' not present in role '%s'\n", groupName, roleName)
|
||||
return
|
||||
}
|
||||
_, err = projIf.Update(context.Background(), &project.ProjectUpdateRequest{Project: proj})
|
||||
_, err = projIf.Update(context.Background(), &projectpkg.ProjectUpdateRequest{Project: proj})
|
||||
errors.CheckError(err)
|
||||
fmt.Printf("Group '%s' removed from role '%s'\n", groupName, roleName)
|
||||
},
|
||||
|
||||
@@ -5,13 +5,13 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
oidc "github.com/coreos/go-oidc"
|
||||
"github.com/coreos/go-oidc"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/argoproj/argo-cd/errors"
|
||||
argocdclient "github.com/argoproj/argo-cd/pkg/apiclient"
|
||||
"github.com/argoproj/argo-cd/server/settings"
|
||||
settingspkg "github.com/argoproj/argo-cd/pkg/apiclient/settings"
|
||||
"github.com/argoproj/argo-cd/util"
|
||||
"github.com/argoproj/argo-cd/util/localconfig"
|
||||
"github.com/argoproj/argo-cd/util/session"
|
||||
@@ -63,7 +63,7 @@ func NewReloginCommand(globalClientOpts *argocdclient.ClientOptions) *cobra.Comm
|
||||
httpClient, err := acdClient.HTTPClient()
|
||||
errors.CheckError(err)
|
||||
ctx = oidc.ClientContext(ctx, httpClient)
|
||||
acdSet, err := setIf.Get(ctx, &settings.SettingsQuery{})
|
||||
acdSet, err := setIf.Get(ctx, &settingspkg.SettingsQuery{})
|
||||
errors.CheckError(err)
|
||||
oauth2conf, provider, err := acdClient.OIDCConfig(ctx, acdSet)
|
||||
errors.CheckError(err)
|
||||
|
||||
@@ -12,8 +12,8 @@ import (
|
||||
|
||||
"github.com/argoproj/argo-cd/errors"
|
||||
argocdclient "github.com/argoproj/argo-cd/pkg/apiclient"
|
||||
repositorypkg "github.com/argoproj/argo-cd/pkg/apiclient/repository"
|
||||
appsv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/server/repository"
|
||||
"github.com/argoproj/argo-cd/util"
|
||||
"github.com/argoproj/argo-cd/util/cli"
|
||||
"github.com/argoproj/argo-cd/util/git"
|
||||
@@ -68,7 +68,7 @@ func NewRepoAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
// See issue #315
|
||||
err := git.TestRepo(repo.Repo, "", "", repo.SSHPrivateKey, repo.InsecureIgnoreHostKey)
|
||||
if err != nil {
|
||||
if git.IsSSHURL(repo.Repo) {
|
||||
if yes, _ := git.IsSSHURL(repo.Repo); yes {
|
||||
// If we failed using git SSH credentials, then the repo is automatically bad
|
||||
log.Fatal(err)
|
||||
}
|
||||
@@ -78,7 +78,7 @@ func NewRepoAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
}
|
||||
conn, repoIf := argocdclient.NewClientOrDie(clientOpts).NewRepoClientOrDie()
|
||||
defer util.Close(conn)
|
||||
repoCreateReq := repository.RepoCreateRequest{
|
||||
repoCreateReq := repositorypkg.RepoCreateRequest{
|
||||
Repo: &repo,
|
||||
Upsert: upsert,
|
||||
}
|
||||
@@ -108,7 +108,7 @@ func NewRepoRemoveCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command
|
||||
conn, repoIf := argocdclient.NewClientOrDie(clientOpts).NewRepoClientOrDie()
|
||||
defer util.Close(conn)
|
||||
for _, repoURL := range args {
|
||||
_, err := repoIf.Delete(context.Background(), &repository.RepoQuery{Repo: repoURL})
|
||||
_, err := repoIf.Delete(context.Background(), &repositorypkg.RepoQuery{Repo: repoURL})
|
||||
errors.CheckError(err)
|
||||
}
|
||||
},
|
||||
@@ -124,7 +124,7 @@ func NewRepoListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
conn, repoIf := argocdclient.NewClientOrDie(clientOpts).NewRepoClientOrDie()
|
||||
defer util.Close(conn)
|
||||
repos, err := repoIf.List(context.Background(), &repository.RepoQuery{})
|
||||
repos, err := repoIf.List(context.Background(), &repositorypkg.RepoQuery{})
|
||||
errors.CheckError(err)
|
||||
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
|
||||
fmt.Fprintf(w, "REPO\tUSER\tSTATUS\tMESSAGE\n")
|
||||
|
||||
@@ -45,6 +45,7 @@ func NewCommand() *cobra.Command {
|
||||
command.AddCommand(NewContextCommand(&clientOpts))
|
||||
command.AddCommand(NewProjectCommand(&clientOpts))
|
||||
command.AddCommand(NewAccountCommand(&clientOpts))
|
||||
command.AddCommand(NewLogoutCommand(&clientOpts))
|
||||
|
||||
defaultLocalConfigPath, err := localconfig.DefaultLocalConfigPath()
|
||||
errors.CheckError(err)
|
||||
|
||||
18
cmd/argocd/commands/testdata/config
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
contexts:
|
||||
- name: argocd.example.com:443
|
||||
server: argocd.example.com:443
|
||||
user: argocd.example.com:443
|
||||
- name: localhost:8080
|
||||
server: localhost:8080
|
||||
user: localhost:8080
|
||||
current-context: localhost:8080
|
||||
servers:
|
||||
- server: argocd.example.com:443
|
||||
- plain-text: true
|
||||
server: localhost:8080
|
||||
users:
|
||||
- auth-token: vErrYS3c3tReFRe$hToken
|
||||
name: argocd.example.com:443
|
||||
refresh-token: vErrYS3c3tReFRe$hToken
|
||||
- auth-token: vErrYS3c3tReFRe$hToken
|
||||
name: localhost:8080
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
"github.com/golang/protobuf/ptypes/empty"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
argocd "github.com/argoproj/argo-cd"
|
||||
"github.com/argoproj/argo-cd/common"
|
||||
"github.com/argoproj/argo-cd/errors"
|
||||
argocdclient "github.com/argoproj/argo-cd/pkg/apiclient"
|
||||
"github.com/argoproj/argo-cd/util"
|
||||
@@ -22,7 +22,7 @@ func NewVersionCmd(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
Use: "version",
|
||||
Short: fmt.Sprintf("Print version information"),
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
version := argocd.GetVersion()
|
||||
version := common.GetVersion()
|
||||
fmt.Printf("%s: %s\n", cliName, version)
|
||||
if !short {
|
||||
fmt.Printf(" BuildDate: %s\n", version.BuildDate)
|
||||
|
||||
@@ -17,12 +17,18 @@ const (
|
||||
ArgoCDRBACConfigMapName = "argocd-rbac-cm"
|
||||
)
|
||||
|
||||
// Default system namespace
|
||||
const (
|
||||
PortAPIServer = 8080
|
||||
PortRepoServer = 8081
|
||||
PortArgoCDMetrics = 8082
|
||||
PortArgoCDAPIServerMetrics = 8083
|
||||
PortRepoServerMetrics = 8084
|
||||
DefaultSystemNamespace = "kube-system"
|
||||
)
|
||||
|
||||
// Default listener ports for ArgoCD components
|
||||
const (
|
||||
DefaultPortAPIServer = 8080
|
||||
DefaultPortRepoServer = 8081
|
||||
DefaultPortArgoCDMetrics = 8082
|
||||
DefaultPortArgoCDAPIServerMetrics = 8083
|
||||
DefaultPortRepoServerMetrics = 8084
|
||||
)
|
||||
|
||||
// Argo CD application related constants
|
||||
@@ -75,6 +81,12 @@ const (
|
||||
// LabelValueSecretTypeCluster indicates a secret type of cluster
|
||||
LabelValueSecretTypeCluster = "cluster"
|
||||
|
||||
// AnnotationCompareOptions is a comma-separated list of options for comparison
|
||||
AnnotationCompareOptions = "argocd.argoproj.io/compare-options"
|
||||
// AnnotationSyncOptions is a comma-separated list of options for syncing
|
||||
AnnotationSyncOptions = "argocd.argoproj.io/sync-options"
|
||||
// AnnotationSyncWave indicates which wave of the sync the resource or hook should be in
|
||||
AnnotationSyncWave = "argocd.argoproj.io/sync-wave"
|
||||
// AnnotationKeyHook contains the hook type of a resource
|
||||
AnnotationKeyHook = "argocd.argoproj.io/hook"
|
||||
// AnnotationKeyHookDeletePolicy is the policy of deleting a hook
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package argocd
|
||||
package common
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math"
|
||||
"reflect"
|
||||
"runtime/debug"
|
||||
"strings"
|
||||
@@ -26,6 +27,7 @@ import (
|
||||
statecache "github.com/argoproj/argo-cd/controller/cache"
|
||||
"github.com/argoproj/argo-cd/controller/metrics"
|
||||
"github.com/argoproj/argo-cd/errors"
|
||||
"github.com/argoproj/argo-cd/pkg/apis/application"
|
||||
appv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
|
||||
appclientset "github.com/argoproj/argo-cd/pkg/client/clientset/versioned"
|
||||
appinformers "github.com/argoproj/argo-cd/pkg/client/informers/externalversions"
|
||||
@@ -45,6 +47,21 @@ const (
|
||||
updateOperationStateTimeout = 1 * time.Second
|
||||
)
|
||||
|
||||
type CompareWith int
|
||||
|
||||
const (
|
||||
// Compare live application state against state defined in latest git revision.
|
||||
CompareWithLatest CompareWith = 2
|
||||
// Compare live application state against state defined using revision of most recent comparison.
|
||||
CompareWithRecent CompareWith = 1
|
||||
// Skip comparison and only refresh application resources tree
|
||||
ComparisonWithNothing CompareWith = 0
|
||||
)
|
||||
|
||||
func (a CompareWith) Max(b CompareWith) CompareWith {
|
||||
return CompareWith(math.Max(float64(a), float64(b)))
|
||||
}
|
||||
|
||||
// ApplicationController is the controller for application resources.
|
||||
type ApplicationController struct {
|
||||
cache *argocache.Cache
|
||||
@@ -65,7 +82,7 @@ type ApplicationController struct {
|
||||
db db.ArgoDB
|
||||
settings *settings_util.ArgoCDSettings
|
||||
settingsMgr *settings_util.SettingsManager
|
||||
refreshRequestedApps map[string]bool
|
||||
refreshRequestedApps map[string]CompareWith
|
||||
refreshRequestedAppsMutex *sync.Mutex
|
||||
metricsServer *metrics.MetricsServer
|
||||
}
|
||||
@@ -84,6 +101,7 @@ func NewApplicationController(
|
||||
repoClientset reposerver.Clientset,
|
||||
argoCache *argocache.Cache,
|
||||
appResyncPeriod time.Duration,
|
||||
metricsPort int,
|
||||
) (*ApplicationController, error) {
|
||||
db := db.NewDB(namespace, settingsMgr, kubeClientset)
|
||||
settings, err := settingsMgr.GetSettings()
|
||||
@@ -102,7 +120,7 @@ func NewApplicationController(
|
||||
appOperationQueue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
|
||||
db: db,
|
||||
statusRefreshTimeout: appResyncPeriod,
|
||||
refreshRequestedApps: make(map[string]bool),
|
||||
refreshRequestedApps: make(map[string]CompareWith),
|
||||
refreshRequestedAppsMutex: &sync.Mutex{},
|
||||
auditLogger: argo.NewAuditLogger(namespace, kubeClientset, "argocd-application-controller"),
|
||||
settingsMgr: settingsMgr,
|
||||
@@ -110,21 +128,50 @@ func NewApplicationController(
|
||||
}
|
||||
appInformer, appLister := ctrl.newApplicationInformerAndLister()
|
||||
projInformer := v1alpha1.NewAppProjectInformer(applicationClientset, namespace, appResyncPeriod, cache.Indexers{})
|
||||
stateCache := statecache.NewLiveStateCache(db, appInformer, ctrl.settings, kubectlCmd, func(appName string, fullRefresh bool) {
|
||||
ctrl.requestAppRefresh(appName, fullRefresh)
|
||||
ctrl.appRefreshQueue.Add(fmt.Sprintf("%s/%s", ctrl.namespace, appName))
|
||||
metricsAddr := fmt.Sprintf("0.0.0.0:%d", metricsPort)
|
||||
ctrl.metricsServer = metrics.NewMetricsServer(metricsAddr, appLister, func() error {
|
||||
_, err := kubeClientset.Discovery().ServerVersion()
|
||||
return err
|
||||
})
|
||||
appStateManager := NewAppStateManager(db, applicationClientset, repoClientset, namespace, kubectlCmd, ctrl.settings, stateCache, projInformer)
|
||||
stateCache := statecache.NewLiveStateCache(db, appInformer, ctrl.settings, kubectlCmd, ctrl.metricsServer, ctrl.handleAppUpdated)
|
||||
appStateManager := NewAppStateManager(db, applicationClientset, repoClientset, namespace, kubectlCmd, ctrl.settings, stateCache, projInformer, ctrl.metricsServer)
|
||||
ctrl.appInformer = appInformer
|
||||
ctrl.appLister = appLister
|
||||
ctrl.projInformer = projInformer
|
||||
ctrl.appStateManager = appStateManager
|
||||
ctrl.stateCache = stateCache
|
||||
metricsAddr := fmt.Sprintf("0.0.0.0:%d", common.PortArgoCDMetrics)
|
||||
ctrl.metricsServer = metrics.NewMetricsServer(metricsAddr, ctrl.appLister)
|
||||
|
||||
return &ctrl, nil
|
||||
}
|
||||
|
||||
func isSelfReferencedApp(app *appv1.Application, ref v1.ObjectReference) bool {
|
||||
gvk := ref.GroupVersionKind()
|
||||
return ref.UID == app.UID &&
|
||||
ref.Name == app.Name &&
|
||||
ref.Namespace == app.Namespace &&
|
||||
gvk.Group == application.Group &&
|
||||
gvk.Kind == application.ApplicationKind
|
||||
}
|
||||
|
||||
func (ctrl *ApplicationController) handleAppUpdated(appName string, isManagedResource bool, ref v1.ObjectReference) {
|
||||
skipForceRefresh := false
|
||||
|
||||
obj, exists, err := ctrl.appInformer.GetIndexer().GetByKey(ctrl.namespace + "/" + appName)
|
||||
if app, ok := obj.(*appv1.Application); exists && err == nil && ok && isSelfReferencedApp(app, ref) {
|
||||
// Don't force refresh app if related resource is application itself. This prevents infinite reconciliation loop.
|
||||
skipForceRefresh = true
|
||||
}
|
||||
|
||||
if !skipForceRefresh {
|
||||
level := ComparisonWithNothing
|
||||
if isManagedResource {
|
||||
level = CompareWithRecent
|
||||
}
|
||||
ctrl.requestAppRefresh(appName, level)
|
||||
}
|
||||
ctrl.appRefreshQueue.Add(fmt.Sprintf("%s/%s", ctrl.namespace, appName))
|
||||
}
|
||||
|
||||
func (ctrl *ApplicationController) setAppManagedResources(a *appv1.Application, comparisonResult *comparisonResult) (*appv1.ApplicationTree, error) {
|
||||
managedResources, err := ctrl.managedResources(a, comparisonResult)
|
||||
if err != nil {
|
||||
@@ -168,7 +215,7 @@ func (ctrl *ApplicationController) getResourceTree(a *appv1.Application, managed
|
||||
},
|
||||
})
|
||||
} else {
|
||||
err := ctrl.stateCache.IterateHierarchy(a.Spec.Destination.Server, kube.GetResourceKey(live), func(child appv1.ResourceNode) {
|
||||
err := ctrl.stateCache.IterateHierarchy(a.Spec.Destination.Server, live, func(child appv1.ResourceNode) {
|
||||
nodes = append(nodes, child)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -267,20 +314,20 @@ func (ctrl *ApplicationController) Run(ctx context.Context, statusProcessors int
|
||||
<-ctx.Done()
|
||||
}
|
||||
|
||||
func (ctrl *ApplicationController) requestAppRefresh(appName string, fullRefresh bool) {
|
||||
func (ctrl *ApplicationController) requestAppRefresh(appName string, compareWith CompareWith) {
|
||||
ctrl.refreshRequestedAppsMutex.Lock()
|
||||
defer ctrl.refreshRequestedAppsMutex.Unlock()
|
||||
ctrl.refreshRequestedApps[appName] = fullRefresh || ctrl.refreshRequestedApps[appName]
|
||||
ctrl.refreshRequestedApps[appName] = compareWith.Max(ctrl.refreshRequestedApps[appName])
|
||||
}
|
||||
|
||||
func (ctrl *ApplicationController) isRefreshRequested(appName string) (bool, bool) {
|
||||
func (ctrl *ApplicationController) isRefreshRequested(appName string) (bool, CompareWith) {
|
||||
ctrl.refreshRequestedAppsMutex.Lock()
|
||||
defer ctrl.refreshRequestedAppsMutex.Unlock()
|
||||
fullRefresh, ok := ctrl.refreshRequestedApps[appName]
|
||||
level, ok := ctrl.refreshRequestedApps[appName]
|
||||
if ok {
|
||||
delete(ctrl.refreshRequestedApps, appName)
|
||||
}
|
||||
return ok, fullRefresh
|
||||
return ok, level
|
||||
}
|
||||
|
||||
func (ctrl *ApplicationController) processAppOperationQueueItem() (processNext bool) {
|
||||
@@ -327,6 +374,10 @@ func (ctrl *ApplicationController) processAppOperationQueueItem() (processNext b
|
||||
return
|
||||
}
|
||||
|
||||
func shouldBeDeleted(app *appv1.Application, obj *unstructured.Unstructured) bool {
|
||||
return !kube.IsCRD(obj) && !isSelfReferencedApp(app, kube.GetObjectRef(obj))
|
||||
}
|
||||
|
||||
func (ctrl *ApplicationController) finalizeApplicationDeletion(app *appv1.Application) error {
|
||||
logCtx := log.WithField("application", app.Name)
|
||||
logCtx.Infof("Deleting resources")
|
||||
@@ -345,13 +396,20 @@ func (ctrl *ApplicationController) finalizeApplicationDeletion(app *appv1.Applic
|
||||
}
|
||||
objs := make([]*unstructured.Unstructured, 0)
|
||||
for k := range objsMap {
|
||||
if objsMap[k].GetDeletionTimestamp() == nil && !kube.IsCRD(objsMap[k]) {
|
||||
if objsMap[k].GetDeletionTimestamp() == nil && shouldBeDeleted(app, objsMap[k]) {
|
||||
objs = append(objs, objsMap[k])
|
||||
}
|
||||
}
|
||||
|
||||
cluster, err := ctrl.db.GetCluster(context.Background(), app.Spec.Destination.Server)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
config := metrics.AddMetricsTransportWrapper(ctrl.metricsServer, app, cluster.RESTConfig())
|
||||
|
||||
err = util.RunAllAsync(len(objs), func(i int) error {
|
||||
obj := objs[i]
|
||||
return ctrl.stateCache.Delete(app.Spec.Destination.Server, obj)
|
||||
return ctrl.kubectl.DeleteResource(config, obj.GroupVersionKind(), obj.GetName(), obj.GetNamespace(), false)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -361,6 +419,11 @@ func (ctrl *ApplicationController) finalizeApplicationDeletion(app *appv1.Applic
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for k, obj := range objsMap {
|
||||
if !shouldBeDeleted(app, obj) {
|
||||
delete(objsMap, k)
|
||||
}
|
||||
}
|
||||
if len(objsMap) > 0 {
|
||||
logCtx.Infof("%d objects remaining for deletion", len(objsMap))
|
||||
return nil
|
||||
@@ -454,6 +517,7 @@ func (ctrl *ApplicationController) processRequestedAppOperation(app *appv1.Appli
|
||||
ctrl.setOperationState(app, state)
|
||||
logCtx.Infof("Initialized new operation: %v", *app.Operation)
|
||||
}
|
||||
|
||||
ctrl.appStateManager.SyncAppState(app, state)
|
||||
|
||||
if state.Phase == appv1.OperationRunning {
|
||||
@@ -475,7 +539,7 @@ func (ctrl *ApplicationController) processRequestedAppOperation(app *appv1.Appli
|
||||
if state.Phase.Completed() {
|
||||
// if we just completed an operation, force a refresh so that UI will report up-to-date
|
||||
// sync/health information
|
||||
ctrl.requestAppRefresh(app.ObjectMeta.Name, true)
|
||||
ctrl.requestAppRefresh(app.ObjectMeta.Name, CompareWithLatest)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -510,6 +574,10 @@ func (ctrl *ApplicationController) setOperationState(app *appv1.Application, sta
|
||||
appClient := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(ctrl.namespace)
|
||||
_, err = appClient.Patch(app.Name, types.MergePatchType, patchJSON)
|
||||
if err != nil {
|
||||
// Stop retrying updating deleted application
|
||||
if apierr.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
log.Infof("updated '%s' operation (phase: %s)", app.Name, state.Phase)
|
||||
@@ -566,7 +634,7 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo
|
||||
log.Warnf("Key '%s' in index is not an application", appKey)
|
||||
return
|
||||
}
|
||||
needRefresh, refreshType, fullRefresh := ctrl.needRefreshAppStatus(origApp, ctrl.statusRefreshTimeout)
|
||||
needRefresh, refreshType, comparisonLevel := ctrl.needRefreshAppStatus(origApp, ctrl.statusRefreshTimeout)
|
||||
|
||||
if !needRefresh {
|
||||
return
|
||||
@@ -576,20 +644,20 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo
|
||||
defer func() {
|
||||
reconcileDuration := time.Since(startTime)
|
||||
ctrl.metricsServer.IncReconcile(origApp, reconcileDuration)
|
||||
logCtx := log.WithFields(log.Fields{"application": origApp.Name, "time_ms": reconcileDuration.Seconds() * 1e3, "full": fullRefresh})
|
||||
logCtx := log.WithFields(log.Fields{"application": origApp.Name, "time_ms": reconcileDuration.Seconds() * 1e3, "level": comparisonLevel})
|
||||
logCtx.Info("Reconciliation completed")
|
||||
}()
|
||||
|
||||
app := origApp.DeepCopy()
|
||||
logCtx := log.WithFields(log.Fields{"application": app.Name})
|
||||
if !fullRefresh {
|
||||
if comparisonLevel == ComparisonWithNothing {
|
||||
if managedResources, err := ctrl.cache.GetAppManagedResources(app.Name); err != nil {
|
||||
logCtx.Warnf("Failed to get cached managed resources for tree reconciliation, fallback to full reconciliation")
|
||||
} else {
|
||||
if tree, err := ctrl.getResourceTree(app, managedResources); err != nil {
|
||||
app.Status.Conditions = []appv1.ApplicationCondition{{Type: appv1.ApplicationConditionComparisonError, Message: err.Error()}}
|
||||
} else {
|
||||
app.Status.ExternalURLs = tree.GetBrowableURLs()
|
||||
app.Status.Summary = tree.GetSummary()
|
||||
if err = ctrl.cache.SetAppResourcesTree(app.Name, tree); err != nil {
|
||||
logCtx.Errorf("Failed to cache resources tree: %v", err)
|
||||
return
|
||||
@@ -610,7 +678,16 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo
|
||||
return
|
||||
}
|
||||
|
||||
compareResult, err := ctrl.appStateManager.CompareAppState(app, "", app.Spec.Source, refreshType == appv1.RefreshTypeHard)
|
||||
var localManifests []string
|
||||
if opState := app.Status.OperationState; opState != nil {
|
||||
localManifests = opState.Operation.Sync.Manifests
|
||||
}
|
||||
|
||||
revision := app.Spec.Source.TargetRevision
|
||||
if comparisonLevel == CompareWithRecent {
|
||||
revision = app.Status.Sync.Revision
|
||||
}
|
||||
compareResult, err := ctrl.appStateManager.CompareAppState(app, revision, app.Spec.Source, refreshType == appv1.RefreshTypeHard, localManifests)
|
||||
if err != nil {
|
||||
conditions = append(conditions, appv1.ApplicationCondition{Type: appv1.ApplicationConditionComparisonError, Message: err.Error()})
|
||||
} else {
|
||||
@@ -621,7 +698,7 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo
|
||||
if err != nil {
|
||||
logCtx.Errorf("Failed to cache app resources: %v", err)
|
||||
} else {
|
||||
app.Status.ExternalURLs = tree.GetBrowableURLs()
|
||||
app.Status.Summary = tree.GetSummary()
|
||||
}
|
||||
|
||||
syncErrCond := ctrl.autoSync(app, compareResult.syncStatus)
|
||||
@@ -644,17 +721,17 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo
|
||||
// Returns true if application never been compared, has changed or comparison result has expired.
|
||||
// Additionally returns whether full refresh was requested or not.
|
||||
// If full refresh is requested then target and live state should be reconciled, else only live state tree should be updated.
|
||||
func (ctrl *ApplicationController) needRefreshAppStatus(app *appv1.Application, statusRefreshTimeout time.Duration) (bool, appv1.RefreshType, bool) {
|
||||
func (ctrl *ApplicationController) needRefreshAppStatus(app *appv1.Application, statusRefreshTimeout time.Duration) (bool, appv1.RefreshType, CompareWith) {
|
||||
logCtx := log.WithFields(log.Fields{"application": app.Name})
|
||||
var reason string
|
||||
fullRefresh := true
|
||||
compareWith := CompareWithLatest
|
||||
refreshType := appv1.RefreshTypeNormal
|
||||
expired := app.Status.ReconciledAt.Add(statusRefreshTimeout).Before(time.Now().UTC())
|
||||
if requestedType, ok := app.IsRefreshRequested(); ok {
|
||||
refreshType = requestedType
|
||||
reason = fmt.Sprintf("%s refresh requested", refreshType)
|
||||
} else if requested, full := ctrl.isRefreshRequested(app.Name); requested {
|
||||
fullRefresh = full
|
||||
} else if requested, level := ctrl.isRefreshRequested(app.Name); requested {
|
||||
compareWith = level
|
||||
reason = fmt.Sprintf("controller refresh requested")
|
||||
} else if app.Status.Sync.Status == appv1.SyncStatusCodeUnknown && expired {
|
||||
reason = "comparison status unknown"
|
||||
@@ -666,10 +743,10 @@ func (ctrl *ApplicationController) needRefreshAppStatus(app *appv1.Application,
|
||||
reason = fmt.Sprintf("comparison expired. reconciledAt: %v, expiry: %v", app.Status.ReconciledAt, statusRefreshTimeout)
|
||||
}
|
||||
if reason != "" {
|
||||
logCtx.Infof("Refreshing app status (%s)", reason)
|
||||
return true, refreshType, fullRefresh
|
||||
logCtx.Infof("Refreshing app status (%s), level (%d)", reason, compareWith)
|
||||
return true, refreshType, compareWith
|
||||
}
|
||||
return false, refreshType, fullRefresh
|
||||
return false, refreshType, compareWith
|
||||
}
|
||||
|
||||
func (ctrl *ApplicationController) refreshAppConditions(app *appv1.Application) ([]appv1.ApplicationCondition, bool) {
|
||||
@@ -688,7 +765,7 @@ func (ctrl *ApplicationController) refreshAppConditions(app *appv1.Application)
|
||||
})
|
||||
}
|
||||
} else {
|
||||
specConditions, _, err := argo.GetSpecErrors(context.Background(), &app.Spec, proj, ctrl.repoClientset, ctrl.db)
|
||||
specConditions, err := argo.ValidatePermissions(context.Background(), &app.Spec, proj, ctrl.db)
|
||||
if err != nil {
|
||||
conditions = append(conditions, appv1.ApplicationCondition{
|
||||
Type: appv1.ApplicationConditionUnknownError,
|
||||
@@ -885,7 +962,7 @@ func (ctrl *ApplicationController) newApplicationInformerAndLister() (cache.Shar
|
||||
if oldOK && newOK {
|
||||
if toggledAutomatedSync(oldApp, newApp) {
|
||||
log.WithField("application", newApp.Name).Info("Enabled automated sync")
|
||||
ctrl.requestAppRefresh(newApp.Name, true)
|
||||
ctrl.requestAppRefresh(newApp.Name, CompareWithLatest)
|
||||
}
|
||||
}
|
||||
ctrl.appRefreshQueue.Add(key)
|
||||
@@ -927,6 +1004,8 @@ func (ctrl *ApplicationController) watchSettings(ctx context.Context) {
|
||||
ctrl.settingsMgr.Subscribe(updateCh)
|
||||
prevAppLabelKey := ctrl.settings.GetAppInstanceLabelKey()
|
||||
prevResourceExclusions := ctrl.settings.ResourceExclusions
|
||||
prevResourceInclusions := ctrl.settings.ResourceInclusions
|
||||
prevConfigManagementPlugins := ctrl.settings.ConfigManagementPlugins
|
||||
done := false
|
||||
for !done {
|
||||
select {
|
||||
@@ -939,10 +1018,20 @@ func (ctrl *ApplicationController) watchSettings(ctx context.Context) {
|
||||
prevAppLabelKey = newAppLabelKey
|
||||
}
|
||||
if !reflect.DeepEqual(prevResourceExclusions, newSettings.ResourceExclusions) {
|
||||
log.Infof("resource exclusions modified")
|
||||
log.WithFields(log.Fields{"prevResourceExclusions": prevResourceExclusions, "newResourceExclusions": newSettings.ResourceExclusions}).Info("resource exclusions modified")
|
||||
ctrl.stateCache.Invalidate()
|
||||
prevResourceExclusions = newSettings.ResourceExclusions
|
||||
}
|
||||
if !reflect.DeepEqual(prevResourceInclusions, newSettings.ResourceInclusions) {
|
||||
log.WithFields(log.Fields{"prevResourceInclusions": prevResourceInclusions, "newResourceInclusions": newSettings.ResourceInclusions}).Info("resource inclusions modified")
|
||||
ctrl.stateCache.Invalidate()
|
||||
prevResourceInclusions = newSettings.ResourceInclusions
|
||||
}
|
||||
if !reflect.DeepEqual(prevConfigManagementPlugins, newSettings.ConfigManagementPlugins) {
|
||||
log.WithFields(log.Fields{"prevConfigManagementPlugins": prevConfigManagementPlugins, "newConfigManagementPlugins": newSettings.ConfigManagementPlugins}).Info("config management plugins modified")
|
||||
ctrl.stateCache.Invalidate()
|
||||
prevConfigManagementPlugins = newSettings.ConfigManagementPlugins
|
||||
}
|
||||
case <-ctx.Done():
|
||||
done = true
|
||||
}
|
||||
|
||||
@@ -9,13 +9,16 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierr "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
kubetesting "k8s.io/client-go/testing"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
|
||||
"github.com/argoproj/argo-cd/common"
|
||||
mockstatecache "github.com/argoproj/argo-cd/controller/cache/mocks"
|
||||
argoappv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
|
||||
appclientset "github.com/argoproj/argo-cd/pkg/client/clientset/versioned/fake"
|
||||
@@ -74,6 +77,7 @@ func newFakeController(data *fakeData) *ApplicationController {
|
||||
&mockRepoClientset,
|
||||
utilcache.NewCache(utilcache.NewInMemoryCache(1*time.Hour)),
|
||||
time.Minute,
|
||||
common.DefaultPortArgoCDMetrics,
|
||||
)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@@ -119,6 +123,7 @@ var fakeApp = `
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: Application
|
||||
metadata:
|
||||
uid: "123"
|
||||
name: my-app
|
||||
namespace: ` + test.FakeArgoCDNamespace + `
|
||||
spec:
|
||||
@@ -353,20 +358,26 @@ func TestAutoSyncParameterOverrides(t *testing.T) {
|
||||
// TestFinalizeAppDeletion verifies application deletion
|
||||
func TestFinalizeAppDeletion(t *testing.T) {
|
||||
app := newFakeApp()
|
||||
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}})
|
||||
app.Spec.Destination.Namespace = test.FakeArgoCDNamespace
|
||||
appObj := kube.MustToUnstructured(&app)
|
||||
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}, managedLiveObjs: map[kube.ResourceKey]*unstructured.Unstructured{
|
||||
kube.GetResourceKey(appObj): appObj,
|
||||
}})
|
||||
|
||||
fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset)
|
||||
patched := false
|
||||
fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset)
|
||||
defaultReactor := fakeAppCs.ReactionChain[0]
|
||||
fakeAppCs.ReactionChain = nil
|
||||
fakeAppCs.AddReactor("get", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
|
||||
return defaultReactor.React(action)
|
||||
})
|
||||
fakeAppCs.AddReactor("patch", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
|
||||
patched = true
|
||||
return true, nil, nil
|
||||
})
|
||||
err := ctrl.finalizeApplicationDeletion(app)
|
||||
// TODO: use an interface to fake out the calls to GetResourcesWithLabel and DeleteResourceWithLabel
|
||||
// For now just ensure we have an expected error condition
|
||||
assert.Error(t, err) // Change this to assert.Nil when we stub out GetResourcesWithLabel/DeleteResourceWithLabel
|
||||
assert.False(t, patched) // Change this to assert.True when we stub out GetResourcesWithLabel/DeleteResourceWithLabel
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, patched)
|
||||
}
|
||||
|
||||
// TestNormalizeApplication verifies we normalize an application during reconciliation
|
||||
@@ -442,3 +453,78 @@ func TestNormalizeApplication(t *testing.T) {
|
||||
assert.False(t, normalized)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleAppUpdated(t *testing.T) {
|
||||
app := newFakeApp()
|
||||
app.Spec.Destination.Namespace = test.FakeArgoCDNamespace
|
||||
app.Spec.Destination.Server = common.KubernetesInternalAPIServerAddr
|
||||
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}})
|
||||
|
||||
ctrl.handleAppUpdated(app.Name, true, kube.GetObjectRef(kube.MustToUnstructured(app)))
|
||||
isRequested, level := ctrl.isRefreshRequested(app.Name)
|
||||
assert.False(t, isRequested)
|
||||
assert.Equal(t, ComparisonWithNothing, level)
|
||||
|
||||
ctrl.handleAppUpdated(app.Name, true, corev1.ObjectReference{UID: "test", Kind: kube.DeploymentKind, Name: "test", Namespace: "default"})
|
||||
isRequested, level = ctrl.isRefreshRequested(app.Name)
|
||||
assert.True(t, isRequested)
|
||||
assert.Equal(t, CompareWithRecent, level)
|
||||
}
|
||||
|
||||
func TestSetOperationStateOnDeletedApp(t *testing.T) {
|
||||
ctrl := newFakeController(&fakeData{apps: []runtime.Object{}})
|
||||
fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset)
|
||||
fakeAppCs.ReactionChain = nil
|
||||
patched := false
|
||||
fakeAppCs.AddReactor("patch", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
|
||||
patched = true
|
||||
return true, nil, apierr.NewNotFound(schema.GroupResource{}, "my-app")
|
||||
})
|
||||
ctrl.setOperationState(newFakeApp(), &argoappv1.OperationState{Phase: argoappv1.OperationSucceeded})
|
||||
assert.True(t, patched)
|
||||
}
|
||||
|
||||
func TestNeedRefreshAppStatus(t *testing.T) {
|
||||
ctrl := newFakeController(&fakeData{apps: []runtime.Object{}})
|
||||
|
||||
app := newFakeApp()
|
||||
app.Status.ReconciledAt = metav1.Now()
|
||||
app.Status.Sync = argoappv1.SyncStatus{
|
||||
Status: argoappv1.SyncStatusCodeSynced,
|
||||
ComparedTo: argoappv1.ComparedTo{
|
||||
Source: app.Spec.Source,
|
||||
Destination: app.Spec.Destination,
|
||||
},
|
||||
}
|
||||
|
||||
// no need to refresh just reconciled application
|
||||
needRefresh, _, _ := ctrl.needRefreshAppStatus(app, 1*time.Hour)
|
||||
assert.False(t, needRefresh)
|
||||
|
||||
// refresh app using the 'deepest' requested comparison level
|
||||
ctrl.requestAppRefresh(app.Name, CompareWithRecent)
|
||||
ctrl.requestAppRefresh(app.Name, ComparisonWithNothing)
|
||||
|
||||
needRefresh, refreshType, compareWith := ctrl.needRefreshAppStatus(app, 1*time.Hour)
|
||||
assert.True(t, needRefresh)
|
||||
assert.Equal(t, argoappv1.RefreshTypeNormal, refreshType)
|
||||
assert.Equal(t, CompareWithRecent, compareWith)
|
||||
|
||||
// refresh application which status is not reconciled using latest commit
|
||||
app.Status.Sync = argoappv1.SyncStatus{Status: argoappv1.SyncStatusCodeUnknown}
|
||||
|
||||
needRefresh, refreshType, compareWith = ctrl.needRefreshAppStatus(app, 1*time.Hour)
|
||||
assert.True(t, needRefresh)
|
||||
assert.Equal(t, argoappv1.RefreshTypeNormal, refreshType)
|
||||
assert.Equal(t, CompareWithLatest, compareWith)
|
||||
|
||||
// execute hard refresh if app has refresh annotation
|
||||
app.Annotations = map[string]string{
|
||||
common.AnnotationKeyRefresh: string(argoappv1.RefreshTypeHard),
|
||||
}
|
||||
needRefresh, refreshType, compareWith = ctrl.needRefreshAppStatus(app, 1*time.Hour)
|
||||
assert.True(t, needRefresh)
|
||||
assert.Equal(t, argoappv1.RefreshTypeHard, refreshType)
|
||||
assert.Equal(t, CompareWithLatest, compareWith)
|
||||
|
||||
}
|
||||
|
||||
61
controller/cache/cache.go
vendored
@@ -5,11 +5,13 @@ import (
|
||||
"sync"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
|
||||
"github.com/argoproj/argo-cd/controller/metrics"
|
||||
appv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/util"
|
||||
"github.com/argoproj/argo-cd/util/db"
|
||||
@@ -20,17 +22,17 @@ import (
|
||||
type LiveStateCache interface {
|
||||
IsNamespaced(server string, obj *unstructured.Unstructured) (bool, error)
|
||||
// Executes give callback against resource specified by the key and all its children
|
||||
IterateHierarchy(server string, key kube.ResourceKey, action func(child appv1.ResourceNode)) error
|
||||
IterateHierarchy(server string, obj *unstructured.Unstructured, action func(child appv1.ResourceNode)) error
|
||||
// Returns state of live nodes which correspond for target nodes of specified application.
|
||||
GetManagedLiveObjs(a *appv1.Application, targetObjs []*unstructured.Unstructured) (map[kube.ResourceKey]*unstructured.Unstructured, error)
|
||||
// Starts watching resources of each controlled cluster.
|
||||
Run(ctx context.Context)
|
||||
// Deletes specified resource from cluster.
|
||||
Delete(server string, obj *unstructured.Unstructured) error
|
||||
// Invalidate invalidates the entire cluster state cache
|
||||
Invalidate()
|
||||
}
|
||||
|
||||
type AppUpdatedHandler = func(appName string, isManagedResource bool, ref v1.ObjectReference)
|
||||
|
||||
func GetTargetObjKey(a *appv1.Application, un *unstructured.Unstructured, isNamespaced bool) kube.ResourceKey {
|
||||
key := kube.GetResourceKey(un)
|
||||
if !isNamespaced {
|
||||
@@ -42,26 +44,35 @@ func GetTargetObjKey(a *appv1.Application, un *unstructured.Unstructured, isName
|
||||
return key
|
||||
}
|
||||
|
||||
func NewLiveStateCache(db db.ArgoDB, appInformer cache.SharedIndexInformer, settings *settings.ArgoCDSettings, kubectl kube.Kubectl, onAppUpdated func(appName string, fullRefresh bool)) LiveStateCache {
|
||||
func NewLiveStateCache(
|
||||
db db.ArgoDB,
|
||||
appInformer cache.SharedIndexInformer,
|
||||
settings *settings.ArgoCDSettings,
|
||||
kubectl kube.Kubectl,
|
||||
metricsServer *metrics.MetricsServer,
|
||||
onAppUpdated AppUpdatedHandler) LiveStateCache {
|
||||
|
||||
return &liveStateCache{
|
||||
appInformer: appInformer,
|
||||
db: db,
|
||||
clusters: make(map[string]*clusterInfo),
|
||||
lock: &sync.Mutex{},
|
||||
onAppUpdated: onAppUpdated,
|
||||
kubectl: kubectl,
|
||||
settings: settings,
|
||||
appInformer: appInformer,
|
||||
db: db,
|
||||
clusters: make(map[string]*clusterInfo),
|
||||
lock: &sync.Mutex{},
|
||||
onAppUpdated: onAppUpdated,
|
||||
kubectl: kubectl,
|
||||
settings: settings,
|
||||
metricsServer: metricsServer,
|
||||
}
|
||||
}
|
||||
|
||||
type liveStateCache struct {
|
||||
db db.ArgoDB
|
||||
clusters map[string]*clusterInfo
|
||||
lock *sync.Mutex
|
||||
appInformer cache.SharedIndexInformer
|
||||
onAppUpdated func(appName string, fullRefresh bool)
|
||||
kubectl kube.Kubectl
|
||||
settings *settings.ArgoCDSettings
|
||||
db db.ArgoDB
|
||||
clusters map[string]*clusterInfo
|
||||
lock *sync.Mutex
|
||||
appInformer cache.SharedIndexInformer
|
||||
onAppUpdated AppUpdatedHandler
|
||||
kubectl kube.Kubectl
|
||||
settings *settings.ArgoCDSettings
|
||||
metricsServer *metrics.MetricsServer
|
||||
}
|
||||
|
||||
func (c *liveStateCache) getCluster(server string) (*clusterInfo, error) {
|
||||
@@ -116,14 +127,6 @@ func (c *liveStateCache) Invalidate() {
|
||||
log.Info("live state cache invalidated")
|
||||
}
|
||||
|
||||
func (c *liveStateCache) Delete(server string, obj *unstructured.Unstructured) error {
|
||||
clusterInfo, err := c.getSyncedCluster(server)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return clusterInfo.delete(obj)
|
||||
}
|
||||
|
||||
func (c *liveStateCache) IsNamespaced(server string, obj *unstructured.Unstructured) (bool, error) {
|
||||
clusterInfo, err := c.getSyncedCluster(server)
|
||||
if err != nil {
|
||||
@@ -132,12 +135,12 @@ func (c *liveStateCache) IsNamespaced(server string, obj *unstructured.Unstructu
|
||||
return clusterInfo.isNamespaced(obj), nil
|
||||
}
|
||||
|
||||
func (c *liveStateCache) IterateHierarchy(server string, key kube.ResourceKey, action func(child appv1.ResourceNode)) error {
|
||||
func (c *liveStateCache) IterateHierarchy(server string, obj *unstructured.Unstructured, action func(child appv1.ResourceNode)) error {
|
||||
clusterInfo, err := c.getSyncedCluster(server)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
clusterInfo.iterateHierarchy(key, action)
|
||||
clusterInfo.iterateHierarchy(obj, action)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -146,7 +149,7 @@ func (c *liveStateCache) GetManagedLiveObjs(a *appv1.Application, targetObjs []*
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return clusterInfo.getManagedLiveObjs(a, targetObjs)
|
||||
return clusterInfo.getManagedLiveObjs(a, targetObjs, c.metricsServer)
|
||||
}
|
||||
|
||||
func isClusterHasApps(apps []interface{}, cluster *appv1.Cluster) bool {
|
||||
|
||||
76
controller/cache/cluster.go
vendored
@@ -4,11 +4,16 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"runtime/debug"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
|
||||
"github.com/argoproj/argo-cd/controller/metrics"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
@@ -44,7 +49,7 @@ type clusterInfo struct {
|
||||
nodes map[kube.ResourceKey]*node
|
||||
nsIndex map[string]map[kube.ResourceKey]*node
|
||||
|
||||
onAppUpdated func(appName string, fullRefresh bool)
|
||||
onAppUpdated AppUpdatedHandler
|
||||
kubectl kube.Kubectl
|
||||
cluster *appv1.Cluster
|
||||
log *log.Entry
|
||||
@@ -93,13 +98,8 @@ func (c *clusterInfo) createObjInfo(un *unstructured.Unstructured, appInstanceLa
|
||||
}
|
||||
nodeInfo := &node{
|
||||
resourceVersion: un.GetResourceVersion(),
|
||||
ref: v1.ObjectReference{
|
||||
APIVersion: un.GetAPIVersion(),
|
||||
Kind: un.GetKind(),
|
||||
Name: un.GetName(),
|
||||
Namespace: un.GetNamespace(),
|
||||
},
|
||||
ownerRefs: ownerRefs,
|
||||
ref: kube.GetObjectRef(un),
|
||||
ownerRefs: ownerRefs,
|
||||
}
|
||||
populateNodeInfo(un, nodeInfo)
|
||||
appName := kube.GetAppInstanceLabel(un, appInstanceLabel)
|
||||
@@ -329,18 +329,36 @@ func (c *clusterInfo) ensureSynced() error {
|
||||
return c.syncError
|
||||
}
|
||||
|
||||
func (c *clusterInfo) iterateHierarchy(key kube.ResourceKey, action func(child appv1.ResourceNode)) {
|
||||
func (c *clusterInfo) iterateHierarchy(obj *unstructured.Unstructured, action func(child appv1.ResourceNode)) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
key := kube.GetResourceKey(obj)
|
||||
if objInfo, ok := c.nodes[key]; ok {
|
||||
action(objInfo.asResourceNode())
|
||||
nsNodes := c.nsIndex[key.Namespace]
|
||||
childrenByUID := make(map[types.UID][]*node)
|
||||
for _, child := range nsNodes {
|
||||
if objInfo.isParentOf(child) {
|
||||
childrenByUID[child.ref.UID] = append(childrenByUID[child.ref.UID], child)
|
||||
}
|
||||
}
|
||||
// make sure children has no duplicates
|
||||
for _, children := range childrenByUID {
|
||||
if len(children) > 0 {
|
||||
// The object might have multiple children with the same UID (e.g. replicaset from apps and extensions group). It is ok to pick any object but we need to make sure
|
||||
// we pick the same child after every refresh.
|
||||
sort.Slice(children, func(i, j int) bool {
|
||||
key1 := children[i].resourceKey()
|
||||
key2 := children[j].resourceKey()
|
||||
return strings.Compare(key1.String(), key2.String()) < 0
|
||||
})
|
||||
child := children[0]
|
||||
action(child.asResourceNode())
|
||||
child.iterateChildren(nsNodes, map[kube.ResourceKey]bool{objInfo.resourceKey(): true}, action)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
action(c.createObjInfo(obj, c.settings.GetAppInstanceLabelKey()).asResourceNode())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -351,7 +369,7 @@ func (c *clusterInfo) isNamespaced(obj *unstructured.Unstructured) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (c *clusterInfo) getManagedLiveObjs(a *appv1.Application, targetObjs []*unstructured.Unstructured) (map[kube.ResourceKey]*unstructured.Unstructured, error) {
|
||||
func (c *clusterInfo) getManagedLiveObjs(a *appv1.Application, targetObjs []*unstructured.Unstructured, metricsServer *metrics.MetricsServer) (map[kube.ResourceKey]*unstructured.Unstructured, error) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
@@ -362,6 +380,7 @@ func (c *clusterInfo) getManagedLiveObjs(a *appv1.Application, targetObjs []*uns
|
||||
managedObjs[key] = o.resource
|
||||
}
|
||||
}
|
||||
config := metrics.AddMetricsTransportWrapper(metricsServer, a, c.cluster.RESTConfig())
|
||||
// iterate target objects and identify ones that already exist in the cluster,\
|
||||
// but are simply missing our label
|
||||
lock := &sync.Mutex{}
|
||||
@@ -378,7 +397,7 @@ func (c *clusterInfo) getManagedLiveObjs(a *appv1.Application, targetObjs []*uns
|
||||
managedObj = existingObj.resource
|
||||
} else {
|
||||
var err error
|
||||
managedObj, err = c.kubectl.GetResource(c.cluster.RESTConfig(), targetObj.GroupVersionKind(), existingObj.ref.Name, existingObj.ref.Namespace)
|
||||
managedObj, err = c.kubectl.GetResource(config, targetObj.GroupVersionKind(), existingObj.ref.Name, existingObj.ref.Namespace)
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return nil
|
||||
@@ -386,13 +405,32 @@ func (c *clusterInfo) getManagedLiveObjs(a *appv1.Application, targetObjs []*uns
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else if _, watched := c.apisMeta[key.GroupKind()]; !watched {
|
||||
var err error
|
||||
managedObj, err = c.kubectl.GetResource(config, targetObj.GroupVersionKind(), targetObj.GetName(), targetObj.GetNamespace())
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if managedObj != nil {
|
||||
managedObj, err := c.kubectl.ConvertToVersion(managedObj, targetObj.GroupVersionKind().Group, targetObj.GroupVersionKind().Version)
|
||||
converted, err := c.kubectl.ConvertToVersion(managedObj, targetObj.GroupVersionKind().Group, targetObj.GroupVersionKind().Version)
|
||||
if err != nil {
|
||||
return err
|
||||
// fallback to loading resource from kubernetes if conversion fails
|
||||
log.Warnf("Failed to convert resource: %v", err)
|
||||
managedObj, err = c.kubectl.GetResource(config, targetObj.GroupVersionKind(), managedObj.GetName(), managedObj.GetNamespace())
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
managedObj = converted
|
||||
}
|
||||
lock.Lock()
|
||||
managedObjs[key] = managedObj
|
||||
@@ -407,10 +445,6 @@ func (c *clusterInfo) getManagedLiveObjs(a *appv1.Application, targetObjs []*uns
|
||||
return managedObjs, nil
|
||||
}
|
||||
|
||||
func (c *clusterInfo) delete(obj *unstructured.Unstructured) error {
|
||||
return c.kubectl.DeleteResource(c.cluster.RESTConfig(), obj.GroupVersionKind(), obj.GetName(), obj.GetNamespace(), false)
|
||||
}
|
||||
|
||||
func (c *clusterInfo) processEvent(event watch.EventType, un *unstructured.Unstructured) error {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
@@ -446,8 +480,8 @@ func (c *clusterInfo) onNodeUpdated(exists bool, existingNode *node, un *unstruc
|
||||
toNotify[app] = n.isRootAppNode() || toNotify[app]
|
||||
}
|
||||
}
|
||||
for name, full := range toNotify {
|
||||
c.onAppUpdated(name, full)
|
||||
for name, isRootAppNode := range toNotify {
|
||||
c.onAppUpdated(name, isRootAppNode, newObj.ref)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -459,7 +493,7 @@ func (c *clusterInfo) onNodeRemoved(key kube.ResourceKey, n *node) {
|
||||
|
||||
c.removeNode(key)
|
||||
if appName != "" {
|
||||
c.onAppUpdated(appName, n.isRootAppNode())
|
||||
c.onAppUpdated(appName, n.isRootAppNode(), n.ref)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
52
controller/cache/cluster_test.go
vendored
@@ -43,24 +43,28 @@ var (
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
uid: "1"
|
||||
name: helm-guestbook-pod
|
||||
namespace: default
|
||||
ownerReferences:
|
||||
- apiVersion: extensions/v1beta1
|
||||
- apiVersion: apps/v1
|
||||
kind: ReplicaSet
|
||||
name: helm-guestbook-rs
|
||||
uid: "2"
|
||||
resourceVersion: "123"`)
|
||||
|
||||
testRS = strToUnstructured(`
|
||||
apiVersion: apps/v1
|
||||
kind: ReplicaSet
|
||||
metadata:
|
||||
uid: "2"
|
||||
name: helm-guestbook-rs
|
||||
namespace: default
|
||||
ownerReferences:
|
||||
- apiVersion: extensions/v1beta1
|
||||
- apiVersion: apps/v1beta1
|
||||
kind: Deployment
|
||||
name: helm-guestbook
|
||||
uid: "3"
|
||||
resourceVersion: "123"`)
|
||||
|
||||
testDeploy = strToUnstructured(`
|
||||
@@ -69,6 +73,7 @@ var (
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: helm-guestbook
|
||||
uid: "3"
|
||||
name: helm-guestbook
|
||||
namespace: default
|
||||
resourceVersion: "123"`)
|
||||
@@ -107,6 +112,10 @@ var (
|
||||
serviceName: helm-guestbook
|
||||
servicePort: 443
|
||||
path: /
|
||||
- backend:
|
||||
serviceName: helm-guestbook
|
||||
servicePort: https
|
||||
path: /
|
||||
status:
|
||||
loadBalancer:
|
||||
ingress:
|
||||
@@ -142,7 +151,7 @@ func newClusterExt(kubectl kube.Kubectl) *clusterInfo {
|
||||
return &clusterInfo{
|
||||
lock: &sync.Mutex{},
|
||||
nodes: make(map[kube.ResourceKey]*node),
|
||||
onAppUpdated: func(appName string, fullRefresh bool) {},
|
||||
onAppUpdated: func(appName string, fullRefresh bool, reference corev1.ObjectReference) {},
|
||||
kubectl: kubectl,
|
||||
nsIndex: make(map[string]map[kube.ResourceKey]*node),
|
||||
cluster: &appv1.Cluster{},
|
||||
@@ -156,7 +165,7 @@ func newClusterExt(kubectl kube.Kubectl) *clusterInfo {
|
||||
|
||||
func getChildren(cluster *clusterInfo, un *unstructured.Unstructured) []appv1.ResourceNode {
|
||||
hierarchy := make([]appv1.ResourceNode, 0)
|
||||
cluster.iterateHierarchy(kube.GetResourceKey(un), func(child appv1.ResourceNode) {
|
||||
cluster.iterateHierarchy(un, func(child appv1.ResourceNode) {
|
||||
hierarchy = append(hierarchy, child)
|
||||
})
|
||||
return hierarchy[1:]
|
||||
@@ -175,6 +184,7 @@ func TestGetChildren(t *testing.T) {
|
||||
Name: "helm-guestbook-pod",
|
||||
Group: "",
|
||||
Version: "v1",
|
||||
UID: "1",
|
||||
},
|
||||
ParentRefs: []appv1.ResourceRef{{
|
||||
Group: "apps",
|
||||
@@ -182,6 +192,7 @@ func TestGetChildren(t *testing.T) {
|
||||
Kind: "ReplicaSet",
|
||||
Namespace: "default",
|
||||
Name: "helm-guestbook-rs",
|
||||
UID: "2",
|
||||
}},
|
||||
Health: &appv1.HealthStatus{Status: appv1.HealthStatusUnknown},
|
||||
NetworkingInfo: &appv1.ResourceNetworkingInfo{Labels: testPod.GetLabels()},
|
||||
@@ -197,11 +208,12 @@ func TestGetChildren(t *testing.T) {
|
||||
Name: "helm-guestbook-rs",
|
||||
Group: "apps",
|
||||
Version: "v1",
|
||||
UID: "2",
|
||||
},
|
||||
ResourceVersion: "123",
|
||||
Health: &appv1.HealthStatus{Status: appv1.HealthStatusHealthy},
|
||||
Info: []appv1.InfoItem{},
|
||||
ParentRefs: []appv1.ResourceRef{{Group: "apps", Version: "", Kind: "Deployment", Namespace: "default", Name: "helm-guestbook"}},
|
||||
ParentRefs: []appv1.ResourceRef{{Group: "apps", Version: "", Kind: "Deployment", Namespace: "default", Name: "helm-guestbook", UID: "3"}},
|
||||
}}, rsChildren...), deployChildren)
|
||||
}
|
||||
|
||||
@@ -225,7 +237,7 @@ metadata:
|
||||
Namespace: "default",
|
||||
},
|
||||
},
|
||||
}, []*unstructured.Unstructured{targetDeploy})
|
||||
}, []*unstructured.Unstructured{targetDeploy}, nil)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, managedObjs, map[kube.ResourceKey]*unstructured.Unstructured{
|
||||
kube.NewResourceKey("apps", "Deployment", "default", "helm-guestbook"): testDeploy,
|
||||
@@ -253,12 +265,14 @@ func TestProcessNewChildEvent(t *testing.T) {
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
uid: "4"
|
||||
name: helm-guestbook-pod2
|
||||
namespace: default
|
||||
ownerReferences:
|
||||
- apiVersion: extensions/v1beta1
|
||||
- apiVersion: apps/v1
|
||||
kind: ReplicaSet
|
||||
name: helm-guestbook-rs
|
||||
uid: "2"
|
||||
resourceVersion: "123"`)
|
||||
|
||||
err = cluster.processEvent(watch.Added, newPod)
|
||||
@@ -275,6 +289,7 @@ func TestProcessNewChildEvent(t *testing.T) {
|
||||
Name: "helm-guestbook-pod",
|
||||
Group: "",
|
||||
Version: "v1",
|
||||
UID: "1",
|
||||
},
|
||||
Info: []appv1.InfoItem{{Name: "Containers", Value: "0/0"}},
|
||||
Health: &appv1.HealthStatus{Status: appv1.HealthStatusUnknown},
|
||||
@@ -285,6 +300,7 @@ func TestProcessNewChildEvent(t *testing.T) {
|
||||
Kind: "ReplicaSet",
|
||||
Namespace: "default",
|
||||
Name: "helm-guestbook-rs",
|
||||
UID: "2",
|
||||
}},
|
||||
ResourceVersion: "123",
|
||||
}, {
|
||||
@@ -294,6 +310,7 @@ func TestProcessNewChildEvent(t *testing.T) {
|
||||
Name: "helm-guestbook-pod2",
|
||||
Group: "",
|
||||
Version: "v1",
|
||||
UID: "4",
|
||||
},
|
||||
NetworkingInfo: &appv1.ResourceNetworkingInfo{Labels: testPod.GetLabels()},
|
||||
Info: []appv1.InfoItem{{Name: "Containers", Value: "0/0"}},
|
||||
@@ -304,6 +321,7 @@ func TestProcessNewChildEvent(t *testing.T) {
|
||||
Kind: "ReplicaSet",
|
||||
Namespace: "default",
|
||||
Name: "helm-guestbook-rs",
|
||||
UID: "2",
|
||||
}},
|
||||
ResourceVersion: "123",
|
||||
}}, rsChildren)
|
||||
@@ -351,7 +369,7 @@ func TestUpdateResourceTags(t *testing.T) {
|
||||
func TestUpdateAppResource(t *testing.T) {
|
||||
updatesReceived := make([]string, 0)
|
||||
cluster := newCluster(testPod, testRS, testDeploy)
|
||||
cluster.onAppUpdated = func(appName string, fullRefresh bool) {
|
||||
cluster.onAppUpdated = func(appName string, fullRefresh bool, _ corev1.ObjectReference) {
|
||||
updatesReceived = append(updatesReceived, fmt.Sprintf("%s: %v", appName, fullRefresh))
|
||||
}
|
||||
|
||||
@@ -415,3 +433,21 @@ func TestWatchCacheUpdated(t *testing.T) {
|
||||
_, ok = cluster.nodes[kube.GetResourceKey(added)]
|
||||
assert.True(t, ok)
|
||||
}
|
||||
|
||||
func TestGetDuplicatedChildren(t *testing.T) {
|
||||
extensionsRS := testRS.DeepCopy()
|
||||
extensionsRS.SetGroupVersionKind(schema.GroupVersionKind{Group: "extensions", Kind: kube.ReplicaSetKind, Version: "v1beta1"})
|
||||
cluster := newCluster(testDeploy, testRS, extensionsRS)
|
||||
err := cluster.ensureSynced()
|
||||
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Get children multiple times to make sure the right child is picked up every time.
|
||||
for i := 0; i < 5; i++ {
|
||||
children := getChildren(cluster, testDeploy)
|
||||
assert.Len(t, children, 1)
|
||||
assert.Equal(t, "apps", children[0].Group)
|
||||
assert.Equal(t, kube.ReplicaSetKind, children[0].Kind)
|
||||
assert.Equal(t, testRS.GetName(), children[0].Name)
|
||||
}
|
||||
}
|
||||
|
||||
63
controller/cache/info.go
vendored
@@ -9,6 +9,7 @@ import (
|
||||
k8snode "k8s.io/kubernetes/pkg/util/node"
|
||||
|
||||
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/util"
|
||||
"github.com/argoproj/argo-cd/util/kube"
|
||||
)
|
||||
|
||||
@@ -63,14 +64,15 @@ func populateServiceInfo(un *unstructured.Unstructured, node *node) {
|
||||
}
|
||||
|
||||
func populateIngressInfo(un *unstructured.Unstructured, node *node) {
|
||||
targets := make([]v1alpha1.ResourceRef, 0)
|
||||
ingress := getIngress(un)
|
||||
targetsMap := make(map[v1alpha1.ResourceRef]bool)
|
||||
if backend, ok, err := unstructured.NestedMap(un.Object, "spec", "backend"); ok && err == nil {
|
||||
targets = append(targets, v1alpha1.ResourceRef{
|
||||
targetsMap[v1alpha1.ResourceRef{
|
||||
Group: "",
|
||||
Kind: kube.ServiceKind,
|
||||
Namespace: un.GetNamespace(),
|
||||
Name: fmt.Sprintf("%s", backend["serviceName"]),
|
||||
})
|
||||
}] = true
|
||||
}
|
||||
urlsSet := make(map[string]bool)
|
||||
if rules, ok, err := unstructured.NestedSlice(un.Object, "spec", "rules"); ok && err == nil {
|
||||
@@ -80,6 +82,14 @@ func populateIngressInfo(un *unstructured.Unstructured, node *node) {
|
||||
continue
|
||||
}
|
||||
host := rule["host"]
|
||||
if host == nil || host == "" {
|
||||
for i := range ingress {
|
||||
host = util.FirstNonEmpty(ingress[i].Hostname, ingress[i].IP)
|
||||
if host != "" {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
paths, ok, err := unstructured.NestedSlice(rule, "http", "paths")
|
||||
if !ok || err != nil {
|
||||
continue
|
||||
@@ -91,32 +101,48 @@ func populateIngressInfo(un *unstructured.Unstructured, node *node) {
|
||||
}
|
||||
|
||||
if serviceName, ok, err := unstructured.NestedString(path, "backend", "serviceName"); ok && err == nil {
|
||||
targets = append(targets, v1alpha1.ResourceRef{
|
||||
targetsMap[v1alpha1.ResourceRef{
|
||||
Group: "",
|
||||
Kind: kube.ServiceKind,
|
||||
Namespace: un.GetNamespace(),
|
||||
Name: serviceName,
|
||||
})
|
||||
}] = true
|
||||
}
|
||||
|
||||
if port, ok, err := unstructured.NestedFieldNoCopy(path, "backend", "servicePort"); ok && err == nil && host != "" {
|
||||
switch fmt.Sprintf("%v", port) {
|
||||
case "80":
|
||||
if port, ok, err := unstructured.NestedFieldNoCopy(path, "backend", "servicePort"); ok && err == nil && host != "" && host != nil {
|
||||
stringPort := ""
|
||||
switch typedPod := port.(type) {
|
||||
case int64:
|
||||
stringPort = fmt.Sprintf("%d", typedPod)
|
||||
case float64:
|
||||
stringPort = fmt.Sprintf("%d", int64(typedPod))
|
||||
case string:
|
||||
stringPort = typedPod
|
||||
default:
|
||||
stringPort = fmt.Sprintf("%v", port)
|
||||
}
|
||||
|
||||
switch stringPort {
|
||||
case "80", "http":
|
||||
urlsSet[fmt.Sprintf("http://%s", host)] = true
|
||||
case "443":
|
||||
case "443", "https":
|
||||
urlsSet[fmt.Sprintf("https://%s", host)] = true
|
||||
default:
|
||||
urlsSet[fmt.Sprintf("http://%s:%s", host, port)] = true
|
||||
urlsSet[fmt.Sprintf("http://%s:%s", host, stringPort)] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
targets := make([]v1alpha1.ResourceRef, 0)
|
||||
for target := range targetsMap {
|
||||
targets = append(targets, target)
|
||||
}
|
||||
urls := make([]string, 0)
|
||||
for url := range urlsSet {
|
||||
urls = append(urls, url)
|
||||
}
|
||||
node.networkingInfo = &v1alpha1.ResourceNetworkingInfo{TargetRefs: targets, Ingress: getIngress(un), ExternalURLs: urls}
|
||||
node.networkingInfo = &v1alpha1.ResourceNetworkingInfo{TargetRefs: targets, Ingress: ingress, ExternalURLs: urls}
|
||||
}
|
||||
|
||||
func populatePodInfo(un *unstructured.Unstructured, node *node) {
|
||||
@@ -135,13 +161,20 @@ func populatePodInfo(un *unstructured.Unstructured, node *node) {
|
||||
reason = pod.Status.Reason
|
||||
}
|
||||
|
||||
initializing := false
|
||||
|
||||
// note that I ignore initContainers
|
||||
imagesSet := make(map[string]bool)
|
||||
for _, container := range pod.Spec.InitContainers {
|
||||
imagesSet[container.Image] = true
|
||||
}
|
||||
for _, container := range pod.Spec.Containers {
|
||||
node.images = append(node.images, container.Image)
|
||||
imagesSet[container.Image] = true
|
||||
}
|
||||
|
||||
node.images = nil
|
||||
for image := range imagesSet {
|
||||
node.images = append(node.images, image)
|
||||
}
|
||||
|
||||
initializing := false
|
||||
for i := range pod.Status.InitContainerStatuses {
|
||||
container := pod.Status.InitContainerStatuses[i]
|
||||
restarts += int(container.RestartCount)
|
||||
|
||||
40
controller/cache/info_test.go
vendored
@@ -1,6 +1,8 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
@@ -50,6 +52,9 @@ func TestGetIngressInfo(t *testing.T) {
|
||||
node := &node{}
|
||||
populateNodeInfo(testIngress, node)
|
||||
assert.Equal(t, 0, len(node.info))
|
||||
sort.Slice(node.networkingInfo.TargetRefs, func(i, j int) bool {
|
||||
return strings.Compare(node.networkingInfo.TargetRefs[j].Name, node.networkingInfo.TargetRefs[i].Name) < 0
|
||||
})
|
||||
assert.Equal(t, &v1alpha1.ResourceNetworkingInfo{
|
||||
Ingress: []v1.LoadBalancerIngress{{IP: "107.178.210.11"}},
|
||||
TargetRefs: []v1alpha1.ResourceRef{{
|
||||
@@ -66,3 +71,38 @@ func TestGetIngressInfo(t *testing.T) {
|
||||
ExternalURLs: []string{"https://helm-guestbook.com"},
|
||||
}, node.networkingInfo)
|
||||
}
|
||||
|
||||
func TestGetIngressInfoNoHost(t *testing.T) {
|
||||
ingress := strToUnstructured(`
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: helm-guestbook
|
||||
namespace: default
|
||||
spec:
|
||||
rules:
|
||||
- http:
|
||||
paths:
|
||||
- backend:
|
||||
serviceName: helm-guestbook
|
||||
servicePort: 443
|
||||
path: /
|
||||
status:
|
||||
loadBalancer:
|
||||
ingress:
|
||||
- ip: 107.178.210.11`)
|
||||
|
||||
node := &node{}
|
||||
populateNodeInfo(ingress, node)
|
||||
|
||||
assert.Equal(t, &v1alpha1.ResourceNetworkingInfo{
|
||||
Ingress: []v1.LoadBalancerIngress{{IP: "107.178.210.11"}},
|
||||
TargetRefs: []v1alpha1.ResourceRef{{
|
||||
Namespace: "default",
|
||||
Group: "",
|
||||
Kind: kube.ServiceKind,
|
||||
Name: "helm-guestbook",
|
||||
}},
|
||||
ExternalURLs: []string{"https://107.178.210.11"},
|
||||
}, node.networkingInfo)
|
||||
}
|
||||
|
||||
24
controller/cache/mocks/LiveStateCache.go
vendored
@@ -13,20 +13,6 @@ type LiveStateCache struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
// Delete provides a mock function with given fields: server, obj
|
||||
func (_m *LiveStateCache) Delete(server string, obj *unstructured.Unstructured) error {
|
||||
ret := _m.Called(server, obj)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(string, *unstructured.Unstructured) error); ok {
|
||||
r0 = rf(server, obj)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// GetManagedLiveObjs provides a mock function with given fields: a, targetObjs
|
||||
func (_m *LiveStateCache) GetManagedLiveObjs(a *v1alpha1.Application, targetObjs []*unstructured.Unstructured) (map[kube.ResourceKey]*unstructured.Unstructured, error) {
|
||||
ret := _m.Called(a, targetObjs)
|
||||
@@ -76,13 +62,13 @@ func (_m *LiveStateCache) IsNamespaced(server string, obj *unstructured.Unstruct
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// IterateHierarchy provides a mock function with given fields: server, key, action
|
||||
func (_m *LiveStateCache) IterateHierarchy(server string, key kube.ResourceKey, action func(v1alpha1.ResourceNode)) error {
|
||||
ret := _m.Called(server, key, action)
|
||||
// IterateHierarchy provides a mock function with given fields: server, obj, action
|
||||
func (_m *LiveStateCache) IterateHierarchy(server string, obj *unstructured.Unstructured, action func(v1alpha1.ResourceNode)) error {
|
||||
ret := _m.Called(server, obj, action)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(string, kube.ResourceKey, func(v1alpha1.ResourceNode)) error); ok {
|
||||
r0 = rf(server, key, action)
|
||||
if rf, ok := ret.Get(0).(func(string, *unstructured.Unstructured, func(v1alpha1.ResourceNode)) error); ok {
|
||||
r0 = rf(server, obj, action)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
6
controller/cache/node.go
vendored
@@ -36,8 +36,7 @@ func (n *node) resourceKey() kube.ResourceKey {
|
||||
|
||||
func (n *node) isParentOf(child *node) bool {
|
||||
for _, ownerRef := range child.ownerRefs {
|
||||
ownerGvk := schema.FromAPIVersionAndKind(ownerRef.APIVersion, ownerRef.Kind)
|
||||
if kube.NewResourceKey(ownerGvk.Group, ownerRef.Kind, n.ref.Namespace, ownerRef.Name) == n.resourceKey() {
|
||||
if n.ref.UID == ownerRef.UID {
|
||||
return true
|
||||
}
|
||||
}
|
||||
@@ -100,10 +99,11 @@ func (n *node) asResourceNode() appv1.ResourceNode {
|
||||
for _, ownerRef := range n.ownerRefs {
|
||||
ownerGvk := schema.FromAPIVersionAndKind(ownerRef.APIVersion, ownerRef.Kind)
|
||||
ownerKey := kube.NewResourceKey(ownerGvk.Group, ownerRef.Kind, n.ref.Namespace, ownerRef.Name)
|
||||
parentRefs[0] = appv1.ResourceRef{Name: ownerRef.Name, Kind: ownerKey.Kind, Namespace: n.ref.Namespace, Group: ownerKey.Group}
|
||||
parentRefs[0] = appv1.ResourceRef{Name: ownerRef.Name, Kind: ownerKey.Kind, Namespace: n.ref.Namespace, Group: ownerKey.Group, UID: string(ownerRef.UID)}
|
||||
}
|
||||
return appv1.ResourceNode{
|
||||
ResourceRef: appv1.ResourceRef{
|
||||
UID: string(n.ref.UID),
|
||||
Name: n.ref.Name,
|
||||
Group: gv.Group,
|
||||
Version: gv.Version,
|
||||
|
||||
3
controller/cache/node_test.go
vendored
@@ -19,9 +19,10 @@ func TestIsParentOf(t *testing.T) {
|
||||
assert.False(t, grandParent.isParentOf(child))
|
||||
}
|
||||
|
||||
func TestIsParentOfSameKindDifferentGroup(t *testing.T) {
|
||||
func TestIsParentOfSameKindDifferentGroupAndUID(t *testing.T) {
|
||||
rs := testRS.DeepCopy()
|
||||
rs.SetAPIVersion("somecrd.io/v1")
|
||||
rs.SetUID("123")
|
||||
child := c.createObjInfo(testPod, "")
|
||||
invalidParent := c.createObjInfo(rs, "")
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@ package metrics
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
@@ -12,11 +13,13 @@ import (
|
||||
argoappv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
|
||||
applister "github.com/argoproj/argo-cd/pkg/client/listers/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/util/git"
|
||||
"github.com/argoproj/argo-cd/util/healthz"
|
||||
)
|
||||
|
||||
type MetricsServer struct {
|
||||
*http.Server
|
||||
syncCounter *prometheus.CounterVec
|
||||
k8sRequestCounter *prometheus.CounterVec
|
||||
reconcileHistogram *prometheus.HistogramVec
|
||||
}
|
||||
|
||||
@@ -57,12 +60,13 @@ var (
|
||||
)
|
||||
|
||||
// NewMetricsServer returns a new prometheus server which collects application metrics
|
||||
func NewMetricsServer(addr string, appLister applister.ApplicationLister) *MetricsServer {
|
||||
func NewMetricsServer(addr string, appLister applister.ApplicationLister, healthCheck func() error) *MetricsServer {
|
||||
mux := http.NewServeMux()
|
||||
appRegistry := NewAppRegistry(appLister)
|
||||
appRegistry.MustRegister(prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{}))
|
||||
appRegistry.MustRegister(prometheus.NewGoCollector())
|
||||
mux.Handle(MetricsPath, promhttp.HandlerFor(appRegistry, promhttp.HandlerOpts{}))
|
||||
healthz.ServeHealthCheck(mux, healthCheck)
|
||||
|
||||
syncCounter := prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
@@ -72,6 +76,14 @@ func NewMetricsServer(addr string, appLister applister.ApplicationLister) *Metri
|
||||
append(descAppDefaultLabels, "phase"),
|
||||
)
|
||||
appRegistry.MustRegister(syncCounter)
|
||||
k8sRequestCounter := prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "argocd_app_k8s_request_total",
|
||||
Help: "Number of kubernetes requests executed during application reconciliation.",
|
||||
},
|
||||
append(descAppDefaultLabels, "response_code"),
|
||||
)
|
||||
appRegistry.MustRegister(k8sRequestCounter)
|
||||
|
||||
reconcileHistogram := prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
@@ -91,6 +103,7 @@ func NewMetricsServer(addr string, appLister applister.ApplicationLister) *Metri
|
||||
Handler: mux,
|
||||
},
|
||||
syncCounter: syncCounter,
|
||||
k8sRequestCounter: k8sRequestCounter,
|
||||
reconcileHistogram: reconcileHistogram,
|
||||
}
|
||||
}
|
||||
@@ -103,6 +116,11 @@ func (m *MetricsServer) IncSync(app *argoappv1.Application, state *argoappv1.Ope
|
||||
m.syncCounter.WithLabelValues(app.Namespace, app.Name, app.Spec.GetProject(), string(state.Phase)).Inc()
|
||||
}
|
||||
|
||||
// IncKubernetesRequest increments the kubernetes requests counter for an application
|
||||
func (m *MetricsServer) IncKubernetesRequest(app *argoappv1.Application, statusCode int) {
|
||||
m.k8sRequestCounter.WithLabelValues(app.Namespace, app.Name, app.Spec.GetProject(), strconv.Itoa(statusCode)).Inc()
|
||||
}
|
||||
|
||||
// IncReconcile increments the reconcile counter for an application
|
||||
func (m *MetricsServer) IncReconcile(app *argoappv1.Application, duration time.Duration) {
|
||||
m.reconcileHistogram.WithLabelValues(app.Namespace, app.Name, app.Spec.GetProject()).Observe(duration.Seconds())
|
||||
|
||||
@@ -104,6 +104,10 @@ argocd_app_sync_status{name="my-app",namespace="argocd",project="default",sync_s
|
||||
argocd_app_sync_status{name="my-app",namespace="argocd",project="default",sync_status="Unknown"} 0
|
||||
`
|
||||
|
||||
var noOpHealthCheck = func() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func newFakeApp(fakeApp string) *argoappv1.Application {
|
||||
var app argoappv1.Application
|
||||
err := yaml.Unmarshal([]byte(fakeApp), &app)
|
||||
@@ -133,7 +137,7 @@ func newFakeLister(fakeApp ...string) (context.CancelFunc, applister.Application
|
||||
func testApp(t *testing.T, fakeApp string, expectedResponse string) {
|
||||
cancel, appLister := newFakeLister(fakeApp)
|
||||
defer cancel()
|
||||
metricsServ := NewMetricsServer("localhost:8082", appLister)
|
||||
metricsServ := NewMetricsServer("localhost:8082", appLister, noOpHealthCheck)
|
||||
req, err := http.NewRequest("GET", "/metrics", nil)
|
||||
assert.NoError(t, err)
|
||||
rr := httptest.NewRecorder()
|
||||
@@ -176,7 +180,7 @@ argocd_app_sync_total{name="my-app",namespace="argocd",phase="Succeeded",project
|
||||
func TestMetricsSyncCounter(t *testing.T) {
|
||||
cancel, appLister := newFakeLister()
|
||||
defer cancel()
|
||||
metricsServ := NewMetricsServer("localhost:8082", appLister)
|
||||
metricsServ := NewMetricsServer("localhost:8082", appLister, noOpHealthCheck)
|
||||
|
||||
fakeApp := newFakeApp(fakeApp)
|
||||
metricsServ.IncSync(fakeApp, &argoappv1.OperationState{Phase: argoappv1.OperationRunning})
|
||||
@@ -217,7 +221,7 @@ argocd_app_reconcile_count{name="my-app",namespace="argocd",project="important-p
|
||||
func TestReconcileMetrics(t *testing.T) {
|
||||
cancel, appLister := newFakeLister()
|
||||
defer cancel()
|
||||
metricsServ := NewMetricsServer("localhost:8082", appLister)
|
||||
metricsServ := NewMetricsServer("localhost:8082", appLister, noOpHealthCheck)
|
||||
|
||||
fakeApp := newFakeApp(fakeApp)
|
||||
metricsServ.IncReconcile(fakeApp, 5*time.Second)
|
||||
|
||||
37
controller/metrics/transportwrapper.go
Normal file
@@ -0,0 +1,37 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"k8s.io/client-go/rest"
|
||||
|
||||
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
|
||||
)
|
||||
|
||||
type metricsRoundTripper struct {
|
||||
roundTripper http.RoundTripper
|
||||
app *v1alpha1.Application
|
||||
metricsServer *MetricsServer
|
||||
}
|
||||
|
||||
func (mrt *metricsRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) {
|
||||
resp, err := mrt.roundTripper.RoundTrip(r)
|
||||
statusCode := 0
|
||||
if resp != nil {
|
||||
statusCode = resp.StatusCode
|
||||
}
|
||||
mrt.metricsServer.IncKubernetesRequest(mrt.app, statusCode)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// AddMetricsTransportWrapper adds a transport wrapper which increments 'argocd_app_k8s_request_total' counter on each kubernetes request
|
||||
func AddMetricsTransportWrapper(server *MetricsServer, app *v1alpha1.Application, config *rest.Config) *rest.Config {
|
||||
wrap := config.WrapTransport
|
||||
config.WrapTransport = func(rt http.RoundTripper) http.RoundTripper {
|
||||
if wrap != nil {
|
||||
rt = wrap(rt)
|
||||
}
|
||||
return &metricsRoundTripper{roundTripper: rt, metricsServer: server, app: app}
|
||||
}
|
||||
return config
|
||||
}
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
|
||||
"github.com/argoproj/argo-cd/common"
|
||||
statecache "github.com/argoproj/argo-cd/controller/cache"
|
||||
"github.com/argoproj/argo-cd/controller/metrics"
|
||||
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
|
||||
appv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
|
||||
appclientset "github.com/argoproj/argo-cd/pkg/client/clientset/versioned"
|
||||
@@ -26,6 +27,7 @@ import (
|
||||
"github.com/argoproj/argo-cd/util/health"
|
||||
hookutil "github.com/argoproj/argo-cd/util/hook"
|
||||
kubeutil "github.com/argoproj/argo-cd/util/kube"
|
||||
"github.com/argoproj/argo-cd/util/resource"
|
||||
"github.com/argoproj/argo-cd/util/settings"
|
||||
)
|
||||
|
||||
@@ -55,7 +57,7 @@ type ResourceInfoProvider interface {
|
||||
|
||||
// AppStateManager defines methods which allow to compare application spec and actual application state.
|
||||
type AppStateManager interface {
|
||||
CompareAppState(app *v1alpha1.Application, revision string, source v1alpha1.ApplicationSource, noCache bool) (*comparisonResult, error)
|
||||
CompareAppState(app *v1alpha1.Application, revision string, source v1alpha1.ApplicationSource, noCache bool, localObjects []string) (*comparisonResult, error)
|
||||
SyncAppState(app *v1alpha1.Application, state *v1alpha1.OperationState)
|
||||
}
|
||||
|
||||
@@ -73,6 +75,7 @@ type comparisonResult struct {
|
||||
|
||||
// appStateManager allows to compare applications to git
|
||||
type appStateManager struct {
|
||||
metricsServer *metrics.MetricsServer
|
||||
db db.ArgoDB
|
||||
settings *settings.ArgoCDSettings
|
||||
appclientset appclientset.Interface
|
||||
@@ -88,7 +91,10 @@ func (m *appStateManager) getRepoObjs(app *v1alpha1.Application, source v1alpha1
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
repo := m.getRepo(source.RepoURL)
|
||||
repo, err := m.db.GetRepository(context.Background(), source.RepoURL)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
conn, repoClient, err := m.repoClientset.NewRepoServerClient()
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
@@ -119,12 +125,21 @@ func (m *appStateManager) getRepoObjs(app *v1alpha1.Application, source v1alpha1
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
targetObjs, hooks, nil := unmarshalManifests(manifestInfo.Manifests)
|
||||
return targetObjs, hooks, manifestInfo, nil
|
||||
|
||||
}
|
||||
|
||||
func unmarshalManifests(manifests []string) ([]*unstructured.Unstructured, []*unstructured.Unstructured, error) {
|
||||
targetObjs := make([]*unstructured.Unstructured, 0)
|
||||
hooks := make([]*unstructured.Unstructured, 0)
|
||||
for _, manifest := range manifestInfo.Manifests {
|
||||
for _, manifest := range manifests {
|
||||
obj, err := v1alpha1.UnmarshalToUnstructured(manifest)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
if resource.Ignore(obj) {
|
||||
continue
|
||||
}
|
||||
if hookutil.IsHook(obj) {
|
||||
hooks = append(hooks, obj)
|
||||
@@ -132,7 +147,7 @@ func (m *appStateManager) getRepoObjs(app *v1alpha1.Application, source v1alpha1
|
||||
targetObjs = append(targetObjs, obj)
|
||||
}
|
||||
}
|
||||
return targetObjs, hooks, manifestInfo, nil
|
||||
return targetObjs, hooks, nil
|
||||
}
|
||||
|
||||
func DeduplicateTargetObjects(
|
||||
@@ -172,10 +187,46 @@ func DeduplicateTargetObjects(
|
||||
return result, conditions, nil
|
||||
}
|
||||
|
||||
// dedupLiveResources handles removes live resource duplicates with the same UID. Duplicates are created in a separate resource groups.
|
||||
// E.g. apps/Deployment produces duplicate in extensions/Deployment, authorization.openshift.io/ClusterRole produces duplicate in rbac.authorization.k8s.io/ClusterRole etc.
|
||||
// The method removes such duplicates unless it was defined in git ( exists in target resources list ). At least one duplicate stays.
|
||||
// If non of duplicates are in git at random one stays
|
||||
func dedupLiveResources(targetObjs []*unstructured.Unstructured, liveObjsByKey map[kubeutil.ResourceKey]*unstructured.Unstructured) {
|
||||
targetObjByKey := make(map[kubeutil.ResourceKey]*unstructured.Unstructured)
|
||||
for i := range targetObjs {
|
||||
targetObjByKey[kubeutil.GetResourceKey(targetObjs[i])] = targetObjs[i]
|
||||
}
|
||||
liveObjsById := make(map[types.UID][]*unstructured.Unstructured)
|
||||
for k := range liveObjsByKey {
|
||||
obj := liveObjsByKey[k]
|
||||
if obj != nil {
|
||||
liveObjsById[obj.GetUID()] = append(liveObjsById[obj.GetUID()], obj)
|
||||
}
|
||||
}
|
||||
for id := range liveObjsById {
|
||||
objs := liveObjsById[id]
|
||||
|
||||
if len(objs) > 1 {
|
||||
duplicatesLeft := len(objs)
|
||||
for i := range objs {
|
||||
obj := objs[i]
|
||||
resourceKey := kubeutil.GetResourceKey(obj)
|
||||
if _, ok := targetObjByKey[resourceKey]; !ok {
|
||||
delete(liveObjsByKey, resourceKey)
|
||||
duplicatesLeft--
|
||||
if duplicatesLeft == 1 {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// CompareAppState compares application git state to the live app state, using the specified
|
||||
// revision and supplied source. If revision or overrides are empty, then compares against
|
||||
// revision and overrides in the app spec.
|
||||
func (m *appStateManager) CompareAppState(app *v1alpha1.Application, revision string, source v1alpha1.ApplicationSource, noCache bool) (*comparisonResult, error) {
|
||||
func (m *appStateManager) CompareAppState(app *v1alpha1.Application, revision string, source v1alpha1.ApplicationSource, noCache bool, localManifests []string) (*comparisonResult, error) {
|
||||
diffNormalizer, err := argo.NewDiffNormalizer(app.Spec.IgnoreDifferences, m.settings.ResourceOverrides)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -186,12 +237,28 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, revision st
|
||||
failedToLoadObjs := false
|
||||
conditions := make([]v1alpha1.ApplicationCondition, 0)
|
||||
appLabelKey := m.settings.GetAppInstanceLabelKey()
|
||||
targetObjs, hooks, manifestInfo, err := m.getRepoObjs(app, source, appLabelKey, revision, noCache)
|
||||
if err != nil {
|
||||
targetObjs = make([]*unstructured.Unstructured, 0)
|
||||
conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: err.Error()})
|
||||
failedToLoadObjs = true
|
||||
|
||||
var targetObjs []*unstructured.Unstructured
|
||||
var hooks []*unstructured.Unstructured
|
||||
var manifestInfo *repository.ManifestResponse
|
||||
|
||||
if len(localManifests) == 0 {
|
||||
targetObjs, hooks, manifestInfo, err = m.getRepoObjs(app, source, appLabelKey, revision, noCache)
|
||||
if err != nil {
|
||||
targetObjs = make([]*unstructured.Unstructured, 0)
|
||||
conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: err.Error()})
|
||||
failedToLoadObjs = true
|
||||
}
|
||||
} else {
|
||||
targetObjs, hooks, err = unmarshalManifests(localManifests)
|
||||
if err != nil {
|
||||
targetObjs = make([]*unstructured.Unstructured, 0)
|
||||
conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: err.Error()})
|
||||
failedToLoadObjs = true
|
||||
}
|
||||
manifestInfo = nil
|
||||
}
|
||||
|
||||
targetObjs, dedupConditions, err := DeduplicateTargetObjects(app.Spec.Destination.Server, app.Spec.Destination.Namespace, targetObjs, m.liveStateCache)
|
||||
if err != nil {
|
||||
conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: err.Error()})
|
||||
@@ -200,6 +267,7 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, revision st
|
||||
|
||||
logCtx.Debugf("Generated config manifests")
|
||||
liveObjByKey, err := m.liveStateCache.GetManagedLiveObjs(app, targetObjs)
|
||||
dedupLiveResources(targetObjs, liveObjByKey)
|
||||
if err != nil {
|
||||
liveObjByKey = make(map[kubeutil.ResourceKey]*unstructured.Unstructured)
|
||||
conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: err.Error()})
|
||||
@@ -251,10 +319,11 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, revision st
|
||||
syncCode := v1alpha1.SyncStatusCodeSynced
|
||||
managedResources := make([]managedResource, len(targetObjs))
|
||||
resourceSummaries := make([]v1alpha1.ResourceStatus, len(targetObjs))
|
||||
for i := 0; i < len(targetObjs); i++ {
|
||||
obj := managedLiveObj[i]
|
||||
for i, targetObj := range targetObjs {
|
||||
liveObj := managedLiveObj[i]
|
||||
obj := liveObj
|
||||
if obj == nil {
|
||||
obj = targetObjs[i]
|
||||
obj = targetObj
|
||||
}
|
||||
if obj == nil {
|
||||
continue
|
||||
@@ -271,15 +340,19 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, revision st
|
||||
}
|
||||
|
||||
diffResult := diffResults.Diffs[i]
|
||||
if resState.Hook {
|
||||
if resState.Hook || resource.Ignore(obj) {
|
||||
// For resource hooks, don't store sync status, and do not affect overall sync status
|
||||
} else if diffResult.Modified || targetObjs[i] == nil || managedLiveObj[i] == nil {
|
||||
} else if diffResult.Modified || targetObj == nil || liveObj == nil {
|
||||
// Set resource state to OutOfSync since one of the following is true:
|
||||
// * target and live resource are different
|
||||
// * target resource not defined and live resource is extra
|
||||
// * target resource present but live resource is missing
|
||||
resState.Status = v1alpha1.SyncStatusCodeOutOfSync
|
||||
syncCode = v1alpha1.SyncStatusCodeOutOfSync
|
||||
// we ignore the status if the obj needs pruning AND we have the annotation
|
||||
needsPruning := targetObj == nil && liveObj != nil
|
||||
if !(needsPruning && resource.HasAnnotationOption(obj, common.AnnotationCompareOptions, "IgnoreExtraneous")) {
|
||||
syncCode = v1alpha1.SyncStatusCodeOutOfSync
|
||||
}
|
||||
} else {
|
||||
resState.Status = v1alpha1.SyncStatusCodeSynced
|
||||
}
|
||||
@@ -289,8 +362,8 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, revision st
|
||||
Group: resState.Group,
|
||||
Kind: resState.Kind,
|
||||
Version: resState.Version,
|
||||
Live: managedLiveObj[i],
|
||||
Target: targetObjs[i],
|
||||
Live: liveObj,
|
||||
Target: targetObj,
|
||||
Diff: diffResult,
|
||||
Hook: resState.Hook,
|
||||
}
|
||||
@@ -311,7 +384,10 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, revision st
|
||||
syncStatus.Revision = manifestInfo.Revision
|
||||
}
|
||||
|
||||
healthStatus, err := health.SetApplicationHealth(resourceSummaries, GetLiveObjs(managedResources), m.settings.ResourceOverrides)
|
||||
healthStatus, err := health.SetApplicationHealth(resourceSummaries, GetLiveObjs(managedResources), m.settings.ResourceOverrides, func(obj *unstructured.Unstructured) bool {
|
||||
return !isSelfReferencedApp(app, kubeutil.GetObjectRef(obj))
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
conditions = append(conditions, appv1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: err.Error()})
|
||||
}
|
||||
@@ -332,15 +408,6 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, revision st
|
||||
return &compRes, nil
|
||||
}
|
||||
|
||||
func (m *appStateManager) getRepo(repoURL string) *v1alpha1.Repository {
|
||||
repo, err := m.db.GetRepository(context.Background(), repoURL)
|
||||
if err != nil {
|
||||
// If we couldn't retrieve from the repo service, assume public repositories
|
||||
repo = &v1alpha1.Repository{Repo: repoURL}
|
||||
}
|
||||
return repo
|
||||
}
|
||||
|
||||
func (m *appStateManager) persistRevisionHistory(app *v1alpha1.Application, revision string, source v1alpha1.ApplicationSource) error {
|
||||
var nextID int64
|
||||
if len(app.Status.History) > 0 {
|
||||
@@ -379,6 +446,7 @@ func NewAppStateManager(
|
||||
settings *settings.ArgoCDSettings,
|
||||
liveStateCache statecache.LiveStateCache,
|
||||
projInformer cache.SharedIndexInformer,
|
||||
metricsServer *metrics.MetricsServer,
|
||||
) AppStateManager {
|
||||
return &appStateManager{
|
||||
liveStateCache: liveStateCache,
|
||||
@@ -389,5 +457,6 @@ func NewAppStateManager(
|
||||
namespace: namespace,
|
||||
settings: settings,
|
||||
projInformer: projInformer,
|
||||
metricsServer: metricsServer,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,6 +5,8 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
v1 "k8s.io/api/apps/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
@@ -28,7 +30,7 @@ func TestCompareAppStateEmpty(t *testing.T) {
|
||||
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
|
||||
}
|
||||
ctrl := newFakeController(&data)
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, "", app.Spec.Source, false)
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, "", app.Spec.Source, false, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, compRes)
|
||||
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
|
||||
@@ -51,7 +53,7 @@ func TestCompareAppStateMissing(t *testing.T) {
|
||||
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
|
||||
}
|
||||
ctrl := newFakeController(&data)
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, "", app.Spec.Source, false)
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, "", app.Spec.Source, false, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, compRes)
|
||||
assert.Equal(t, argoappv1.SyncStatusCodeOutOfSync, compRes.syncStatus.Status)
|
||||
@@ -78,7 +80,7 @@ func TestCompareAppStateExtra(t *testing.T) {
|
||||
},
|
||||
}
|
||||
ctrl := newFakeController(&data)
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, "", app.Spec.Source, false)
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, "", app.Spec.Source, false, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, compRes)
|
||||
assert.Equal(t, argoappv1.SyncStatusCodeOutOfSync, compRes.syncStatus.Status)
|
||||
@@ -105,15 +107,43 @@ func TestCompareAppStateHook(t *testing.T) {
|
||||
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
|
||||
}
|
||||
ctrl := newFakeController(&data)
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, "", app.Spec.Source, false)
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, "", app.Spec.Source, false, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, compRes)
|
||||
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
|
||||
assert.Equal(t, 0, len(compRes.resources))
|
||||
assert.Equal(t, 0, len(compRes.managedResources))
|
||||
assert.Equal(t, 1, len(compRes.hooks))
|
||||
assert.Equal(t, 0, len(compRes.conditions))
|
||||
}
|
||||
|
||||
// checks that ignore resources are detected, but excluded from status
|
||||
func TestCompareAppStateCompareOptionIgnoreExtraneous(t *testing.T) {
|
||||
pod := test.NewPod()
|
||||
pod.SetAnnotations(map[string]string{common.AnnotationCompareOptions: "IgnoreExtraneous"})
|
||||
app := newFakeApp()
|
||||
data := fakeData{
|
||||
apps: []runtime.Object{app},
|
||||
manifestResponse: &repository.ManifestResponse{
|
||||
Manifests: []string{},
|
||||
Namespace: test.FakeDestNamespace,
|
||||
Server: test.FakeClusterURL,
|
||||
Revision: "abc123",
|
||||
},
|
||||
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
|
||||
}
|
||||
ctrl := newFakeController(&data)
|
||||
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, "", app.Spec.Source, false, nil)
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, compRes)
|
||||
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
|
||||
assert.Len(t, compRes.resources, 0)
|
||||
assert.Len(t, compRes.managedResources, 0)
|
||||
assert.Len(t, compRes.conditions, 0)
|
||||
}
|
||||
|
||||
// TestCompareAppStateExtraHook tests when there is an extra _hook_ object in live but not defined in git
|
||||
func TestCompareAppStateExtraHook(t *testing.T) {
|
||||
pod := test.NewPod()
|
||||
@@ -133,12 +163,13 @@ func TestCompareAppStateExtraHook(t *testing.T) {
|
||||
},
|
||||
}
|
||||
ctrl := newFakeController(&data)
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, "", app.Spec.Source, false)
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, "", app.Spec.Source, false, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, compRes)
|
||||
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
|
||||
assert.Equal(t, 1, len(compRes.resources))
|
||||
assert.Equal(t, 1, len(compRes.managedResources))
|
||||
assert.Equal(t, 0, len(compRes.hooks))
|
||||
assert.Equal(t, 0, len(compRes.conditions))
|
||||
}
|
||||
|
||||
@@ -169,7 +200,7 @@ func TestCompareAppStateDuplicatedNamespacedResources(t *testing.T) {
|
||||
},
|
||||
}
|
||||
ctrl := newFakeController(&data)
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, "", app.Spec.Source, false)
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, "", app.Spec.Source, false, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, compRes)
|
||||
assert.Contains(t, compRes.conditions, argoappv1.ApplicationCondition{
|
||||
@@ -178,3 +209,83 @@ func TestCompareAppStateDuplicatedNamespacedResources(t *testing.T) {
|
||||
})
|
||||
assert.Equal(t, 2, len(compRes.resources))
|
||||
}
|
||||
|
||||
var defaultProj = argoappv1.AppProject{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "default",
|
||||
Namespace: test.FakeArgoCDNamespace,
|
||||
},
|
||||
Spec: argoappv1.AppProjectSpec{
|
||||
SourceRepos: []string{"*"},
|
||||
Destinations: []argoappv1.ApplicationDestination{
|
||||
{
|
||||
Server: "*",
|
||||
Namespace: "*",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func TestSetHealth(t *testing.T) {
|
||||
app := newFakeApp()
|
||||
deployment := kube.MustToUnstructured(&v1.Deployment{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "apps/v1beta1",
|
||||
Kind: "Deployment",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "demo",
|
||||
Namespace: "default",
|
||||
},
|
||||
})
|
||||
ctrl := newFakeController(&fakeData{
|
||||
apps: []runtime.Object{app, &defaultProj},
|
||||
manifestResponse: &repository.ManifestResponse{
|
||||
Manifests: []string{},
|
||||
Namespace: test.FakeDestNamespace,
|
||||
Server: test.FakeClusterURL,
|
||||
Revision: "abc123",
|
||||
},
|
||||
managedLiveObjs: map[kube.ResourceKey]*unstructured.Unstructured{
|
||||
kube.GetResourceKey(deployment): deployment,
|
||||
},
|
||||
})
|
||||
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, "", app.Spec.Source, false, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, compRes.healthStatus.Status, argoappv1.HealthStatusHealthy)
|
||||
}
|
||||
|
||||
func TestSetHealthSelfReferencedApp(t *testing.T) {
|
||||
app := newFakeApp()
|
||||
unstructuredApp := kube.MustToUnstructured(app)
|
||||
deployment := kube.MustToUnstructured(&v1.Deployment{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "apps/v1beta1",
|
||||
Kind: "Deployment",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "demo",
|
||||
Namespace: "default",
|
||||
},
|
||||
})
|
||||
ctrl := newFakeController(&fakeData{
|
||||
apps: []runtime.Object{app, &defaultProj},
|
||||
manifestResponse: &repository.ManifestResponse{
|
||||
Manifests: []string{},
|
||||
Namespace: test.FakeDestNamespace,
|
||||
Server: test.FakeClusterURL,
|
||||
Revision: "abc123",
|
||||
},
|
||||
managedLiveObjs: map[kube.ResourceKey]*unstructured.Unstructured{
|
||||
kube.GetResourceKey(deployment): deployment,
|
||||
kube.GetResourceKey(unstructuredApp): unstructuredApp,
|
||||
},
|
||||
})
|
||||
|
||||
compRes, err := ctrl.appStateManager.CompareAppState(app, "", app.Spec.Source, false, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, compRes.healthStatus.Status, argoappv1.HealthStatusHealthy)
|
||||
}
|
||||
|
||||
@@ -1,66 +0,0 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
||||
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/test"
|
||||
"github.com/argoproj/argo-cd/util/kube/kubetest"
|
||||
)
|
||||
|
||||
var clusterRoleHook = `
|
||||
{
|
||||
"apiVersion": "rbac.authorization.k8s.io/v1",
|
||||
"kind": "ClusterRole",
|
||||
"metadata": {
|
||||
"name": "cluster-role-hook",
|
||||
"annotations": {
|
||||
"argocd.argoproj.io/hook": "PostSync"
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
func TestSyncHookProjectPermissions(t *testing.T) {
|
||||
syncCtx := newTestSyncCtx(&v1.APIResourceList{
|
||||
GroupVersion: "v1",
|
||||
APIResources: []v1.APIResource{
|
||||
{Name: "pod", Namespaced: true, Kind: "Pod", Group: "v1"},
|
||||
},
|
||||
}, &v1.APIResourceList{
|
||||
GroupVersion: "rbac.authorization.k8s.io/v1",
|
||||
APIResources: []v1.APIResource{
|
||||
{Name: "clusterroles", Namespaced: false, Kind: "ClusterRole", Group: "rbac.authorization.k8s.io"},
|
||||
},
|
||||
})
|
||||
|
||||
syncCtx.kubectl = kubetest.MockKubectlCmd{}
|
||||
crHook, _ := v1alpha1.UnmarshalToUnstructured(clusterRoleHook)
|
||||
syncCtx.compareResult = &comparisonResult{
|
||||
hooks: []*unstructured.Unstructured{
|
||||
crHook,
|
||||
},
|
||||
managedResources: []managedResource{{
|
||||
Target: test.NewPod(),
|
||||
}},
|
||||
}
|
||||
syncCtx.proj.Spec.ClusterResourceWhitelist = []v1.GroupKind{}
|
||||
|
||||
syncCtx.syncOp.SyncStrategy = nil
|
||||
syncCtx.sync()
|
||||
assert.Equal(t, v1alpha1.OperationFailed, syncCtx.opState.Phase)
|
||||
assert.Len(t, syncCtx.syncRes.Resources, 0)
|
||||
assert.Contains(t, syncCtx.opState.Message, "not permitted in project")
|
||||
|
||||
// Now add the resource to the whitelist and try again. Resource should be created
|
||||
syncCtx.proj.Spec.ClusterResourceWhitelist = []v1.GroupKind{
|
||||
{Group: "rbac.authorization.k8s.io", Kind: "ClusterRole"},
|
||||
}
|
||||
syncCtx.syncOp.SyncStrategy = nil
|
||||
syncCtx.sync()
|
||||
assert.Len(t, syncCtx.syncRes.Resources, 1)
|
||||
assert.Equal(t, v1alpha1.ResultCodeSynced, syncCtx.syncRes.Resources[0].Status)
|
||||
}
|
||||
@@ -2,334 +2,55 @@ package controller
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
wfv1 "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1"
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
apierr "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/kubernetes/pkg/apis/batch"
|
||||
|
||||
"github.com/argoproj/argo-cd/common"
|
||||
appv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/util"
|
||||
hookutil "github.com/argoproj/argo-cd/util/hook"
|
||||
"github.com/argoproj/argo-cd/util/kube"
|
||||
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
|
||||
)
|
||||
|
||||
// doHookSync initiates (or continues) a hook-based sync. This method will be invoked when there may
|
||||
// already be in-flight (potentially incomplete) jobs/workflows, and should be idempotent.
|
||||
func (sc *syncContext) doHookSync(syncTasks []syncTask, hooks []*unstructured.Unstructured) {
|
||||
if !sc.startedPreSyncPhase() {
|
||||
if !sc.verifyPermittedHooks(hooks) {
|
||||
return
|
||||
}
|
||||
}
|
||||
// 1. Run PreSync hooks
|
||||
if !sc.runHooks(hooks, appv1.HookTypePreSync) {
|
||||
return
|
||||
}
|
||||
|
||||
// 2. Run Sync hooks (e.g. blue-green sync workflow)
|
||||
// Before performing Sync hooks, apply any normal manifests which aren't annotated with a hook.
|
||||
// We only want to do this once per operation.
|
||||
shouldContinue := true
|
||||
if !sc.startedSyncPhase() {
|
||||
if !sc.syncNonHookTasks(syncTasks) {
|
||||
sc.setOperationPhase(appv1.OperationFailed, "one or more objects failed to apply")
|
||||
return
|
||||
}
|
||||
shouldContinue = false
|
||||
}
|
||||
if !sc.runHooks(hooks, appv1.HookTypeSync) {
|
||||
shouldContinue = false
|
||||
}
|
||||
if !shouldContinue {
|
||||
return
|
||||
}
|
||||
|
||||
// 3. Run PostSync hooks
|
||||
// Before running PostSync hooks, we want to make rollout is complete (app is healthy). If we
|
||||
// already started the post-sync phase, then we do not need to perform the health check.
|
||||
postSyncHooks, _ := sc.getHooks(appv1.HookTypePostSync)
|
||||
if len(postSyncHooks) > 0 && !sc.startedPostSyncPhase() {
|
||||
sc.log.Infof("PostSync application health check: %s", sc.compareResult.healthStatus.Status)
|
||||
if sc.compareResult.healthStatus.Status != appv1.HealthStatusHealthy {
|
||||
sc.setOperationPhase(appv1.OperationRunning, fmt.Sprintf("waiting for %s state to run %s hooks (current health: %s)",
|
||||
appv1.HealthStatusHealthy, appv1.HookTypePostSync, sc.compareResult.healthStatus.Status))
|
||||
return
|
||||
}
|
||||
}
|
||||
if !sc.runHooks(hooks, appv1.HookTypePostSync) {
|
||||
return
|
||||
}
|
||||
|
||||
// if we get here, all hooks successfully completed
|
||||
sc.setOperationPhase(appv1.OperationSucceeded, "successfully synced")
|
||||
}
|
||||
|
||||
// verifyPermittedHooks verifies all hooks are permitted in the project
|
||||
func (sc *syncContext) verifyPermittedHooks(hooks []*unstructured.Unstructured) bool {
|
||||
for _, hook := range hooks {
|
||||
gvk := hook.GroupVersionKind()
|
||||
serverRes, err := kube.ServerResourceForGroupVersionKind(sc.disco, gvk)
|
||||
if err != nil {
|
||||
sc.setOperationPhase(appv1.OperationError, fmt.Sprintf("unable to identify api resource type: %v", gvk))
|
||||
return false
|
||||
}
|
||||
if !sc.proj.IsResourcePermitted(metav1.GroupKind{Group: gvk.Group, Kind: gvk.Kind}, serverRes.Namespaced) {
|
||||
sc.setOperationPhase(appv1.OperationFailed, fmt.Sprintf("Hook resource %s:%s is not permitted in project %s", gvk.Group, gvk.Kind, sc.proj.Name))
|
||||
return false
|
||||
}
|
||||
|
||||
if serverRes.Namespaced && !sc.proj.IsDestinationPermitted(appv1.ApplicationDestination{Namespace: hook.GetNamespace(), Server: sc.server}) {
|
||||
gvk := hook.GroupVersionKind()
|
||||
sc.setResourceDetails(&appv1.ResourceResult{
|
||||
Name: hook.GetName(),
|
||||
Group: gvk.Group,
|
||||
Version: gvk.Version,
|
||||
Kind: hook.GetKind(),
|
||||
Namespace: hook.GetNamespace(),
|
||||
Message: fmt.Sprintf("namespace %v is not permitted in project '%s'", hook.GetNamespace(), sc.proj.Name),
|
||||
Status: appv1.ResultCodeSyncFailed,
|
||||
})
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// getHooks returns all Argo CD hooks, optionally filtered by ones of the specific type(s)
|
||||
func (sc *syncContext) getHooks(hookTypes ...appv1.HookType) ([]*unstructured.Unstructured, error) {
|
||||
var hooks []*unstructured.Unstructured
|
||||
for _, hook := range sc.compareResult.hooks {
|
||||
if hook.GetNamespace() == "" {
|
||||
hook.SetNamespace(sc.namespace)
|
||||
}
|
||||
if !hookutil.IsArgoHook(hook) {
|
||||
// TODO: in the future, if we want to map helm hooks to Argo CD lifecycles, we should
|
||||
// include helm hooks in the returned list
|
||||
continue
|
||||
}
|
||||
if len(hookTypes) > 0 {
|
||||
match := false
|
||||
for _, desiredType := range hookTypes {
|
||||
if isHookType(hook, desiredType) {
|
||||
match = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !match {
|
||||
continue
|
||||
}
|
||||
}
|
||||
hooks = append(hooks, hook)
|
||||
}
|
||||
return hooks, nil
|
||||
}
|
||||
|
||||
// runHooks iterates & filters the target manifests for resources of the specified hook type, then
|
||||
// creates the resource. Updates the sc.opRes.hooks with the current status. Returns whether or not
|
||||
// we should continue to the next hook phase.
|
||||
func (sc *syncContext) runHooks(hooks []*unstructured.Unstructured, hookType appv1.HookType) bool {
|
||||
shouldContinue := true
|
||||
for _, hook := range hooks {
|
||||
if hookType == appv1.HookTypeSync && isHookType(hook, appv1.HookTypeSkip) {
|
||||
// If we get here, we are invoking all sync hooks and reached a resource that is
|
||||
// annotated with the Skip hook. This will update the resource details to indicate it
|
||||
// was skipped due to annotation
|
||||
gvk := hook.GroupVersionKind()
|
||||
sc.setResourceDetails(&appv1.ResourceResult{
|
||||
Name: hook.GetName(),
|
||||
Group: gvk.Group,
|
||||
Version: gvk.Version,
|
||||
Kind: hook.GetKind(),
|
||||
Namespace: hook.GetNamespace(),
|
||||
Message: "Skipped",
|
||||
})
|
||||
continue
|
||||
}
|
||||
if !isHookType(hook, hookType) {
|
||||
continue
|
||||
}
|
||||
updated, err := sc.runHook(hook, hookType)
|
||||
if err != nil {
|
||||
sc.setOperationPhase(appv1.OperationError, fmt.Sprintf("%s hook error: %v", hookType, err))
|
||||
return false
|
||||
}
|
||||
if updated {
|
||||
// If the result of running a hook, caused us to modify hook resource state, we should
|
||||
// not proceed to the next hook phase. This is because before proceeding to the next
|
||||
// phase, we want a full health assessment to happen. By returning early, we allow
|
||||
// the application to get requeued into the controller workqueue, and on the next
|
||||
// process iteration, a new CompareAppState() will be performed to get the most
|
||||
// up-to-date live state. This enables us to accurately wait for an application to
|
||||
// become Healthy before proceeding to run PostSync tasks.
|
||||
shouldContinue = false
|
||||
}
|
||||
}
|
||||
if !shouldContinue {
|
||||
sc.log.Infof("Stopping after %s phase due to modifications to hook resource state", hookType)
|
||||
return false
|
||||
}
|
||||
completed, successful := areHooksCompletedSuccessful(hookType, sc.syncRes.Resources)
|
||||
if !completed {
|
||||
return false
|
||||
}
|
||||
if !successful {
|
||||
sc.setOperationPhase(appv1.OperationFailed, fmt.Sprintf("%s hook failed", hookType))
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// syncNonHookTasks syncs or prunes the objects that are not handled by hooks using an apply sync.
|
||||
// returns true if the sync was successful
|
||||
func (sc *syncContext) syncNonHookTasks(syncTasks []syncTask) bool {
|
||||
var nonHookTasks []syncTask
|
||||
for _, task := range syncTasks {
|
||||
if task.targetObj == nil {
|
||||
nonHookTasks = append(nonHookTasks, task)
|
||||
} else {
|
||||
annotations := task.targetObj.GetAnnotations()
|
||||
if annotations != nil && annotations[common.AnnotationKeyHook] != "" {
|
||||
// we are doing a hook sync and this resource is annotated with a hook annotation
|
||||
continue
|
||||
}
|
||||
// if we get here, this resource does not have any hook annotation so we
|
||||
// should perform an `kubectl apply`
|
||||
nonHookTasks = append(nonHookTasks, task)
|
||||
}
|
||||
}
|
||||
return sc.doApplySync(nonHookTasks, false, sc.syncOp.SyncStrategy.Hook.Force, true)
|
||||
}
|
||||
|
||||
// runHook runs the supplied hook and updates the hook status. Returns true if the result of
|
||||
// invoking this method resulted in changes to any hook status
|
||||
func (sc *syncContext) runHook(hook *unstructured.Unstructured, hookType appv1.HookType) (bool, error) {
|
||||
// Hook resources names are deterministic, whether they are defined by the user (metadata.name),
|
||||
// or formulated at the time of the operation (metadata.generateName). If user specifies
|
||||
// metadata.generateName, then we will generate a formulated metadata.name before submission.
|
||||
if hook.GetName() == "" {
|
||||
postfix := strings.ToLower(fmt.Sprintf("%s-%s-%d", sc.syncRes.Revision[0:7], hookType, sc.opState.StartedAt.UTC().Unix()))
|
||||
generatedName := hook.GetGenerateName()
|
||||
hook = hook.DeepCopy()
|
||||
hook.SetName(fmt.Sprintf("%s%s", generatedName, postfix))
|
||||
}
|
||||
// Check our hook statuses to see if we already completed this hook.
|
||||
// If so, this method is a noop
|
||||
prevStatus := sc.getHookStatus(hook, hookType)
|
||||
if prevStatus != nil && prevStatus.HookPhase.Completed() {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
gvk := hook.GroupVersionKind()
|
||||
apiResource, err := kube.ServerResourceForGroupVersionKind(sc.disco, gvk)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
resource := kube.ToGroupVersionResource(gvk.GroupVersion().String(), apiResource)
|
||||
resIf := kube.ToResourceInterface(sc.dynamicIf, apiResource, resource, hook.GetNamespace())
|
||||
|
||||
var liveObj *unstructured.Unstructured
|
||||
existing, err := resIf.Get(hook.GetName(), metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if !apierr.IsNotFound(err) {
|
||||
return false, fmt.Errorf("Failed to get status of %s hook %s '%s': %v", hookType, gvk, hook.GetName(), err)
|
||||
}
|
||||
_, err := sc.kubectl.ApplyResource(sc.config, hook, hook.GetNamespace(), false, false)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Failed to create %s hook %s '%s': %v", hookType, gvk, hook.GetName(), err)
|
||||
}
|
||||
created, err := resIf.Get(hook.GetName(), metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return true, fmt.Errorf("Failed to get status of %s hook %s '%s': %v", hookType, gvk, hook.GetName(), err)
|
||||
}
|
||||
sc.log.Infof("%s hook %s '%s' created", hookType, gvk, created.GetName())
|
||||
sc.setOperationPhase(appv1.OperationRunning, fmt.Sprintf("running %s hooks", hookType))
|
||||
liveObj = created
|
||||
} else {
|
||||
liveObj = existing
|
||||
}
|
||||
hookStatus := newHookStatus(liveObj, hookType)
|
||||
if hookStatus.HookPhase.Completed() {
|
||||
if enforceHookDeletePolicy(hook, hookStatus.HookPhase) {
|
||||
err = sc.deleteHook(hook.GetName(), hook.GetNamespace(), hook.GroupVersionKind())
|
||||
if err != nil {
|
||||
hookStatus.HookPhase = appv1.OperationFailed
|
||||
hookStatus.Message = fmt.Sprintf("failed to delete %s hook: %v", hookStatus.HookPhase, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return sc.updateHookStatus(hookStatus), nil
|
||||
}
|
||||
|
||||
// enforceHookDeletePolicy examines the hook deletion policy of a object and deletes it based on the status
|
||||
func enforceHookDeletePolicy(hook *unstructured.Unstructured, phase appv1.OperationPhase) bool {
|
||||
func enforceHookDeletePolicy(hook *unstructured.Unstructured, operation v1alpha1.OperationPhase) bool {
|
||||
|
||||
annotations := hook.GetAnnotations()
|
||||
if annotations == nil {
|
||||
return false
|
||||
}
|
||||
deletePolicies := strings.Split(annotations[common.AnnotationKeyHookDeletePolicy], ",")
|
||||
for _, dp := range deletePolicies {
|
||||
policy := appv1.HookDeletePolicy(strings.TrimSpace(dp))
|
||||
if policy == appv1.HookDeletePolicyHookSucceeded && phase == appv1.OperationSucceeded {
|
||||
policy := v1alpha1.HookDeletePolicy(strings.TrimSpace(dp))
|
||||
if policy == v1alpha1.HookDeletePolicyHookSucceeded && operation == v1alpha1.OperationSucceeded {
|
||||
return true
|
||||
}
|
||||
if policy == appv1.HookDeletePolicyHookFailed && phase == appv1.OperationFailed {
|
||||
if policy == v1alpha1.HookDeletePolicyHookFailed && operation == v1alpha1.OperationFailed {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// isHookType tells whether or not the supplied object is a hook of the specified type
|
||||
func isHookType(hook *unstructured.Unstructured, hookType appv1.HookType) bool {
|
||||
annotations := hook.GetAnnotations()
|
||||
if annotations == nil {
|
||||
return false
|
||||
}
|
||||
resHookTypes := strings.Split(annotations[common.AnnotationKeyHook], ",")
|
||||
for _, ht := range resHookTypes {
|
||||
if string(hookType) == strings.TrimSpace(ht) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// newHookStatus returns a hook status from an _live_ unstructured object
|
||||
func newHookStatus(hook *unstructured.Unstructured, hookType appv1.HookType) appv1.ResourceResult {
|
||||
// getOperationPhase returns a hook status from an _live_ unstructured object
|
||||
func getOperationPhase(hook *unstructured.Unstructured) (operation v1alpha1.OperationPhase, message string) {
|
||||
gvk := hook.GroupVersionKind()
|
||||
hookStatus := appv1.ResourceResult{
|
||||
Name: hook.GetName(),
|
||||
Kind: hook.GetKind(),
|
||||
Group: gvk.Group,
|
||||
Version: gvk.Version,
|
||||
HookType: hookType,
|
||||
HookPhase: appv1.OperationRunning,
|
||||
Namespace: hook.GetNamespace(),
|
||||
}
|
||||
if isBatchJob(gvk) {
|
||||
updateStatusFromBatchJob(hook, &hookStatus)
|
||||
return getStatusFromBatchJob(hook)
|
||||
} else if isArgoWorkflow(gvk) {
|
||||
updateStatusFromArgoWorkflow(hook, &hookStatus)
|
||||
return getStatusFromArgoWorkflow(hook)
|
||||
} else if isPod(gvk) {
|
||||
updateStatusFromPod(hook, &hookStatus)
|
||||
return getStatusFromPod(hook)
|
||||
} else {
|
||||
hookStatus.HookPhase = appv1.OperationSucceeded
|
||||
hookStatus.Message = fmt.Sprintf("%s created", hook.GetName())
|
||||
return v1alpha1.OperationSucceeded, fmt.Sprintf("%s created", hook.GetName())
|
||||
}
|
||||
return hookStatus
|
||||
}
|
||||
|
||||
// isRunnable returns if the resource object is a runnable type which needs to be terminated
|
||||
func isRunnable(res *appv1.ResourceResult) bool {
|
||||
gvk := res.GroupVersionKind()
|
||||
func isRunnable(gvk schema.GroupVersionKind) bool {
|
||||
return isBatchJob(gvk) || isArgoWorkflow(gvk) || isPod(gvk)
|
||||
}
|
||||
|
||||
@@ -337,18 +58,16 @@ func isBatchJob(gvk schema.GroupVersionKind) bool {
|
||||
return gvk.Group == "batch" && gvk.Kind == "Job"
|
||||
}
|
||||
|
||||
func updateStatusFromBatchJob(hook *unstructured.Unstructured, hookStatus *appv1.ResourceResult) {
|
||||
// TODO this is a copy-and-paste of health.getJobHealth(), refactor out?
|
||||
func getStatusFromBatchJob(hook *unstructured.Unstructured) (operation v1alpha1.OperationPhase, message string) {
|
||||
var job batch.Job
|
||||
err := runtime.DefaultUnstructuredConverter.FromUnstructured(hook.Object, &job)
|
||||
if err != nil {
|
||||
hookStatus.HookPhase = appv1.OperationError
|
||||
hookStatus.Message = err.Error()
|
||||
return
|
||||
return v1alpha1.OperationError, err.Error()
|
||||
}
|
||||
failed := false
|
||||
var failMsg string
|
||||
complete := false
|
||||
var message string
|
||||
for _, condition := range job.Status.Conditions {
|
||||
switch condition.Type {
|
||||
case batch.JobFailed:
|
||||
@@ -361,14 +80,11 @@ func updateStatusFromBatchJob(hook *unstructured.Unstructured, hookStatus *appv1
|
||||
}
|
||||
}
|
||||
if !complete {
|
||||
hookStatus.HookPhase = appv1.OperationRunning
|
||||
hookStatus.Message = message
|
||||
return v1alpha1.OperationRunning, message
|
||||
} else if failed {
|
||||
hookStatus.HookPhase = appv1.OperationFailed
|
||||
hookStatus.Message = failMsg
|
||||
return v1alpha1.OperationFailed, failMsg
|
||||
} else {
|
||||
hookStatus.HookPhase = appv1.OperationSucceeded
|
||||
hookStatus.Message = message
|
||||
return v1alpha1.OperationSucceeded, message
|
||||
}
|
||||
}
|
||||
|
||||
@@ -376,38 +92,36 @@ func isArgoWorkflow(gvk schema.GroupVersionKind) bool {
|
||||
return gvk.Group == "argoproj.io" && gvk.Kind == "Workflow"
|
||||
}
|
||||
|
||||
func updateStatusFromArgoWorkflow(hook *unstructured.Unstructured, hookStatus *appv1.ResourceResult) {
|
||||
// TODO - should we move this to health.go?
|
||||
func getStatusFromArgoWorkflow(hook *unstructured.Unstructured) (operation v1alpha1.OperationPhase, message string) {
|
||||
var wf wfv1.Workflow
|
||||
err := runtime.DefaultUnstructuredConverter.FromUnstructured(hook.Object, &wf)
|
||||
if err != nil {
|
||||
hookStatus.HookPhase = appv1.OperationError
|
||||
hookStatus.Message = err.Error()
|
||||
return
|
||||
return v1alpha1.OperationError, err.Error()
|
||||
}
|
||||
switch wf.Status.Phase {
|
||||
case wfv1.NodePending, wfv1.NodeRunning:
|
||||
hookStatus.HookPhase = appv1.OperationRunning
|
||||
return v1alpha1.OperationRunning, wf.Status.Message
|
||||
case wfv1.NodeSucceeded:
|
||||
hookStatus.HookPhase = appv1.OperationSucceeded
|
||||
return v1alpha1.OperationSucceeded, wf.Status.Message
|
||||
case wfv1.NodeFailed:
|
||||
hookStatus.HookPhase = appv1.OperationFailed
|
||||
return v1alpha1.OperationFailed, wf.Status.Message
|
||||
case wfv1.NodeError:
|
||||
hookStatus.HookPhase = appv1.OperationError
|
||||
return v1alpha1.OperationError, wf.Status.Message
|
||||
}
|
||||
hookStatus.Message = wf.Status.Message
|
||||
return v1alpha1.OperationSucceeded, wf.Status.Message
|
||||
}
|
||||
|
||||
func isPod(gvk schema.GroupVersionKind) bool {
|
||||
return gvk.Group == "" && gvk.Kind == "Pod"
|
||||
}
|
||||
|
||||
func updateStatusFromPod(hook *unstructured.Unstructured, hookStatus *appv1.ResourceResult) {
|
||||
// TODO - this is very similar to health.getPodHealth() should we use that instead?
|
||||
func getStatusFromPod(hook *unstructured.Unstructured) (v1alpha1.OperationPhase, string) {
|
||||
var pod apiv1.Pod
|
||||
err := runtime.DefaultUnstructuredConverter.FromUnstructured(hook.Object, &pod)
|
||||
if err != nil {
|
||||
hookStatus.HookPhase = appv1.OperationError
|
||||
hookStatus.Message = err.Error()
|
||||
return
|
||||
return v1alpha1.OperationError, err.Error()
|
||||
}
|
||||
getFailMessage := func(ctr *apiv1.ContainerStatus) string {
|
||||
if ctr.State.Terminated != nil {
|
||||
@@ -426,135 +140,22 @@ func updateStatusFromPod(hook *unstructured.Unstructured, hookStatus *appv1.Reso
|
||||
|
||||
switch pod.Status.Phase {
|
||||
case apiv1.PodPending, apiv1.PodRunning:
|
||||
hookStatus.HookPhase = appv1.OperationRunning
|
||||
return v1alpha1.OperationRunning, ""
|
||||
case apiv1.PodSucceeded:
|
||||
hookStatus.HookPhase = appv1.OperationSucceeded
|
||||
return v1alpha1.OperationSucceeded, ""
|
||||
case apiv1.PodFailed:
|
||||
hookStatus.HookPhase = appv1.OperationFailed
|
||||
if pod.Status.Message != "" {
|
||||
// Pod has a nice error message. Use that.
|
||||
hookStatus.Message = pod.Status.Message
|
||||
return
|
||||
return v1alpha1.OperationFailed, pod.Status.Message
|
||||
}
|
||||
for _, ctr := range append(pod.Status.InitContainerStatuses, pod.Status.ContainerStatuses...) {
|
||||
if msg := getFailMessage(&ctr); msg != "" {
|
||||
hookStatus.Message = msg
|
||||
return
|
||||
return v1alpha1.OperationFailed, msg
|
||||
}
|
||||
}
|
||||
return v1alpha1.OperationFailed, ""
|
||||
case apiv1.PodUnknown:
|
||||
hookStatus.HookPhase = appv1.OperationError
|
||||
return v1alpha1.OperationError, ""
|
||||
}
|
||||
}
|
||||
|
||||
func (sc *syncContext) getHookStatus(hookObj *unstructured.Unstructured, hookType appv1.HookType) *appv1.ResourceResult {
|
||||
for _, hr := range sc.syncRes.Resources {
|
||||
if !hr.IsHook() {
|
||||
continue
|
||||
}
|
||||
ns := util.FirstNonEmpty(hookObj.GetNamespace(), sc.namespace)
|
||||
if hookEqual(hr, hookObj.GroupVersionKind().Group, hookObj.GetKind(), ns, hookObj.GetName(), hookType) {
|
||||
return hr
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func hookEqual(hr *appv1.ResourceResult, group, kind, namespace, name string, hookType appv1.HookType) bool {
|
||||
return bool(
|
||||
hr.Group == group &&
|
||||
hr.Kind == kind &&
|
||||
hr.Namespace == namespace &&
|
||||
hr.Name == name &&
|
||||
hr.HookType == hookType)
|
||||
}
|
||||
|
||||
// updateHookStatus updates the status of a hook. Returns true if the hook was modified
|
||||
func (sc *syncContext) updateHookStatus(hookStatus appv1.ResourceResult) bool {
|
||||
sc.lock.Lock()
|
||||
defer sc.lock.Unlock()
|
||||
for i, prev := range sc.syncRes.Resources {
|
||||
if !prev.IsHook() {
|
||||
continue
|
||||
}
|
||||
if hookEqual(prev, hookStatus.Group, hookStatus.Kind, hookStatus.Namespace, hookStatus.Name, hookStatus.HookType) {
|
||||
if reflect.DeepEqual(prev, hookStatus) {
|
||||
return false
|
||||
}
|
||||
if prev.HookPhase != hookStatus.HookPhase {
|
||||
sc.log.Infof("Hook %s %s/%s hookPhase: %s -> %s", hookStatus.HookType, prev.Kind, prev.Name, prev.HookPhase, hookStatus.HookPhase)
|
||||
}
|
||||
if prev.Status != hookStatus.Status {
|
||||
sc.log.Infof("Hook %s %s/%s status: %s -> %s", hookStatus.HookType, prev.Kind, prev.Name, prev.Status, hookStatus.Status)
|
||||
}
|
||||
if prev.Message != hookStatus.Message {
|
||||
sc.log.Infof("Hook %s %s/%s message: '%s' -> '%s'", hookStatus.HookType, prev.Kind, prev.Name, prev.Message, hookStatus.Message)
|
||||
}
|
||||
sc.syncRes.Resources[i] = &hookStatus
|
||||
return true
|
||||
}
|
||||
}
|
||||
sc.syncRes.Resources = append(sc.syncRes.Resources, &hookStatus)
|
||||
sc.log.Infof("Set new hook %s %s/%s. phase: %s, message: %s", hookStatus.HookType, hookStatus.Kind, hookStatus.Name, hookStatus.HookPhase, hookStatus.Message)
|
||||
return true
|
||||
}
|
||||
|
||||
// areHooksCompletedSuccessful checks if all the hooks of the specified type are completed and successful
|
||||
func areHooksCompletedSuccessful(hookType appv1.HookType, hookStatuses []*appv1.ResourceResult) (bool, bool) {
|
||||
isSuccessful := true
|
||||
for _, hookStatus := range hookStatuses {
|
||||
if !hookStatus.IsHook() {
|
||||
continue
|
||||
}
|
||||
if hookStatus.HookType != hookType {
|
||||
continue
|
||||
}
|
||||
if !hookStatus.HookPhase.Completed() {
|
||||
return false, false
|
||||
}
|
||||
if !hookStatus.HookPhase.Successful() {
|
||||
isSuccessful = false
|
||||
}
|
||||
}
|
||||
return true, isSuccessful
|
||||
}
|
||||
|
||||
// terminate looks for any running jobs/workflow hooks and deletes the resource
|
||||
func (sc *syncContext) terminate() {
|
||||
terminateSuccessful := true
|
||||
for _, hookStatus := range sc.syncRes.Resources {
|
||||
if !hookStatus.IsHook() {
|
||||
continue
|
||||
}
|
||||
if hookStatus.HookPhase.Completed() {
|
||||
continue
|
||||
}
|
||||
if isRunnable(hookStatus) {
|
||||
hookStatus.HookPhase = appv1.OperationFailed
|
||||
err := sc.deleteHook(hookStatus.Name, hookStatus.Namespace, hookStatus.GroupVersionKind())
|
||||
if err != nil {
|
||||
hookStatus.Message = fmt.Sprintf("Failed to delete %s hook %s/%s: %v", hookStatus.HookType, hookStatus.Kind, hookStatus.Name, err)
|
||||
terminateSuccessful = false
|
||||
} else {
|
||||
hookStatus.Message = fmt.Sprintf("Deleted %s hook %s/%s", hookStatus.HookType, hookStatus.Kind, hookStatus.Name)
|
||||
}
|
||||
sc.updateHookStatus(*hookStatus)
|
||||
}
|
||||
}
|
||||
if terminateSuccessful {
|
||||
sc.setOperationPhase(appv1.OperationFailed, "Operation terminated")
|
||||
} else {
|
||||
sc.setOperationPhase(appv1.OperationError, "Operation termination had errors")
|
||||
}
|
||||
}
|
||||
|
||||
func (sc *syncContext) deleteHook(name, namespace string, gvk schema.GroupVersionKind) error {
|
||||
apiResource, err := kube.ServerResourceForGroupVersionKind(sc.disco, gvk)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resource := kube.ToGroupVersionResource(gvk.GroupVersion().String(), apiResource)
|
||||
resIf := kube.ToResourceInterface(sc.dynamicIf, apiResource, resource, namespace)
|
||||
propagationPolicy := metav1.DeletePropagationForeground
|
||||
return resIf.Delete(name, &metav1.DeleteOptions{PropagationPolicy: &propagationPolicy})
|
||||
return v1alpha1.OperationRunning, ""
|
||||
}
|
||||
|
||||
25
controller/sync_phase.go
Normal file
@@ -0,0 +1,25 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
||||
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/util/hook"
|
||||
)
|
||||
|
||||
func syncPhases(obj *unstructured.Unstructured) []v1alpha1.SyncPhase {
|
||||
if hook.Skip(obj) {
|
||||
return nil
|
||||
} else if hook.IsHook(obj) {
|
||||
var phases []v1alpha1.SyncPhase
|
||||
for _, hookType := range hook.Types(obj) {
|
||||
switch hookType {
|
||||
case v1alpha1.HookTypePreSync, v1alpha1.HookTypeSync, v1alpha1.HookTypePostSync:
|
||||
phases = append(phases, v1alpha1.SyncPhase(hookType))
|
||||
}
|
||||
}
|
||||
return phases
|
||||
} else {
|
||||
return []v1alpha1.SyncPhase{v1alpha1.SyncPhaseSync}
|
||||
}
|
||||
}
|
||||
46
controller/sync_phase_test.go
Normal file
@@ -0,0 +1,46 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
||||
. "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/test"
|
||||
)
|
||||
|
||||
func TestSyncPhaseNone(t *testing.T) {
|
||||
assert.Equal(t, []SyncPhase{SyncPhaseSync}, syncPhases(&unstructured.Unstructured{}))
|
||||
}
|
||||
|
||||
func TestSyncPhasePreSync(t *testing.T) {
|
||||
assert.Equal(t, []SyncPhase{SyncPhasePreSync}, syncPhases(pod("PreSync")))
|
||||
}
|
||||
|
||||
func TestSyncPhaseSync(t *testing.T) {
|
||||
assert.Equal(t, []SyncPhase{SyncPhaseSync}, syncPhases(pod("Sync")))
|
||||
}
|
||||
|
||||
func TestSyncPhaseSkip(t *testing.T) {
|
||||
assert.Nil(t, syncPhases(pod("Skip")))
|
||||
}
|
||||
|
||||
// garbage hooks are still hooks, but have no phases, because some user spelled something wrong
|
||||
func TestSyncPhaseGarbage(t *testing.T) {
|
||||
assert.Nil(t, syncPhases(pod("Garbage")))
|
||||
}
|
||||
|
||||
func TestSyncPhasePost(t *testing.T) {
|
||||
assert.Equal(t, []SyncPhase{SyncPhasePostSync}, syncPhases(pod("PostSync")))
|
||||
}
|
||||
|
||||
func TestSyncPhaseTwoPhases(t *testing.T) {
|
||||
assert.Equal(t, []SyncPhase{SyncPhasePreSync, SyncPhasePostSync}, syncPhases(pod("PreSync,PostSync")))
|
||||
}
|
||||
|
||||
func pod(hookType string) *unstructured.Unstructured {
|
||||
pod := test.NewPod()
|
||||
pod.SetAnnotations(map[string]string{"argocd.argoproj.io/hook": hookType})
|
||||
return pod
|
||||
}
|
||||
115
controller/sync_task.go
Normal file
@@ -0,0 +1,115 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
||||
"github.com/argoproj/argo-cd/common"
|
||||
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/util/hook"
|
||||
)
|
||||
|
||||
// syncTask holds the live and target object. At least one should be non-nil. A targetObj of nil
|
||||
// indicates the live object needs to be pruned. A liveObj of nil indicates the object has yet to
|
||||
// be deployed
|
||||
type syncTask struct {
|
||||
phase v1alpha1.SyncPhase
|
||||
liveObj *unstructured.Unstructured
|
||||
targetObj *unstructured.Unstructured
|
||||
skipDryRun bool
|
||||
syncStatus v1alpha1.ResultCode
|
||||
operationState v1alpha1.OperationPhase
|
||||
message string
|
||||
}
|
||||
|
||||
func ternary(val bool, a, b string) string {
|
||||
if val {
|
||||
return a
|
||||
} else {
|
||||
return b
|
||||
}
|
||||
}
|
||||
|
||||
func (t *syncTask) String() string {
|
||||
return fmt.Sprintf("%s/%d %s %s/%s:%s/%s %s->%s (%s,%s,%s)",
|
||||
t.phase, t.wave(),
|
||||
ternary(t.isHook(), "hook", "resource"), t.group(), t.kind(), t.namespace(), t.name(),
|
||||
ternary(t.liveObj != nil, "obj", "nil"), ternary(t.targetObj != nil, "obj", "nil"),
|
||||
t.syncStatus, t.operationState, t.message,
|
||||
)
|
||||
}
|
||||
|
||||
func (t *syncTask) isPrune() bool {
|
||||
return t.targetObj == nil
|
||||
}
|
||||
|
||||
// return the target object (if this exists) otherwise the live object
|
||||
// some caution - often you explicitly want the live object not the target object
|
||||
func (t *syncTask) obj() *unstructured.Unstructured {
|
||||
return obj(t.targetObj, t.liveObj)
|
||||
}
|
||||
|
||||
func (t *syncTask) wave() int {
|
||||
|
||||
text := t.obj().GetAnnotations()[common.AnnotationSyncWave]
|
||||
if text == "" {
|
||||
return 0
|
||||
}
|
||||
|
||||
val, err := strconv.Atoi(text)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
return val
|
||||
}
|
||||
|
||||
func (t *syncTask) isHook() bool {
|
||||
return hook.IsHook(t.obj())
|
||||
}
|
||||
|
||||
func (t *syncTask) group() string {
|
||||
return t.groupVersionKind().Group
|
||||
}
|
||||
func (t *syncTask) kind() string {
|
||||
return t.groupVersionKind().Kind
|
||||
}
|
||||
|
||||
func (t *syncTask) version() string {
|
||||
return t.groupVersionKind().Version
|
||||
}
|
||||
|
||||
func (t *syncTask) groupVersionKind() schema.GroupVersionKind {
|
||||
return t.obj().GroupVersionKind()
|
||||
}
|
||||
|
||||
func (t *syncTask) name() string {
|
||||
return t.obj().GetName()
|
||||
}
|
||||
|
||||
func (t *syncTask) namespace() string {
|
||||
return t.obj().GetNamespace()
|
||||
}
|
||||
|
||||
func (t *syncTask) running() bool {
|
||||
return t.operationState == v1alpha1.OperationRunning
|
||||
}
|
||||
|
||||
func (t *syncTask) completed() bool {
|
||||
return t.operationState.Completed()
|
||||
}
|
||||
|
||||
func (t *syncTask) successful() bool {
|
||||
return t.operationState.Successful()
|
||||
}
|
||||
|
||||
func (t *syncTask) hookType() v1alpha1.HookType {
|
||||
if t.isHook() {
|
||||
return v1alpha1.HookType(t.phase)
|
||||
} else {
|
||||
return ""
|
||||
}
|
||||
}
|
||||
38
controller/sync_task_test.go
Normal file
@@ -0,0 +1,38 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
||||
. "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/test"
|
||||
)
|
||||
|
||||
func Test_syncTask_hookType(t *testing.T) {
|
||||
type fields struct {
|
||||
phase SyncPhase
|
||||
liveObj *unstructured.Unstructured
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
want HookType
|
||||
}{
|
||||
{"Empty", fields{SyncPhaseSync, test.NewPod()}, ""},
|
||||
{"PreSyncHook", fields{SyncPhasePreSync, test.NewHook(HookTypePreSync)}, HookTypePreSync},
|
||||
{"SyncHook", fields{SyncPhaseSync, test.NewHook(HookTypeSync)}, HookTypeSync},
|
||||
{"PostSyncHook", fields{SyncPhasePostSync, test.NewHook(HookTypePostSync)}, HookTypePostSync},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
task := &syncTask{
|
||||
phase: tt.fields.phase,
|
||||
liveObj: tt.fields.liveObj,
|
||||
}
|
||||
hookType := task.hookType()
|
||||
assert.EqualValues(t, tt.want, hookType)
|
||||
})
|
||||
}
|
||||
}
|
||||
137
controller/sync_tasks.go
Normal file
@@ -0,0 +1,137 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
|
||||
)
|
||||
|
||||
// kindOrder represents the correct order of Kubernetes resources within a manifest
|
||||
var syncPhaseOrder = map[v1alpha1.SyncPhase]int{
|
||||
v1alpha1.SyncPhasePreSync: -1,
|
||||
v1alpha1.SyncPhaseSync: 0,
|
||||
v1alpha1.SyncPhasePostSync: 1,
|
||||
}
|
||||
|
||||
// kindOrder represents the correct order of Kubernetes resources within a manifest
|
||||
// https://github.com/helm/helm/blob/master/pkg/tiller/kind_sorter.go
|
||||
var kindOrder = map[string]int{}
|
||||
|
||||
func init() {
|
||||
kinds := []string{
|
||||
"Namespace",
|
||||
"ResourceQuota",
|
||||
"LimitRange",
|
||||
"PodSecurityPolicy",
|
||||
"PodDisruptionBudget",
|
||||
"Secret",
|
||||
"ConfigMap",
|
||||
"StorageClass",
|
||||
"PersistentVolume",
|
||||
"PersistentVolumeClaim",
|
||||
"ServiceAccount",
|
||||
"CustomResourceDefinition",
|
||||
"ClusterRole",
|
||||
"ClusterRoleBinding",
|
||||
"Role",
|
||||
"RoleBinding",
|
||||
"Service",
|
||||
"DaemonSet",
|
||||
"Pod",
|
||||
"ReplicationController",
|
||||
"ReplicaSet",
|
||||
"Deployment",
|
||||
"StatefulSet",
|
||||
"Job",
|
||||
"CronJob",
|
||||
"Ingress",
|
||||
"APIService",
|
||||
}
|
||||
for i, kind := range kinds {
|
||||
// make sure none of the above entries are zero, we need that for custom resources
|
||||
kindOrder[kind] = i - len(kinds)
|
||||
}
|
||||
}
|
||||
|
||||
type syncTasks []*syncTask
|
||||
|
||||
func (s syncTasks) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
func (s syncTasks) Swap(i, j int) {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
|
||||
// order is
|
||||
// 1. phase
|
||||
// 2. wave
|
||||
// 3. kind
|
||||
// 4. name
|
||||
func (s syncTasks) Less(i, j int) bool {
|
||||
|
||||
tA := s[i]
|
||||
tB := s[j]
|
||||
|
||||
d := syncPhaseOrder[tA.phase] - syncPhaseOrder[tB.phase]
|
||||
if d != 0 {
|
||||
return d < 0
|
||||
}
|
||||
|
||||
d = tA.wave() - tB.wave()
|
||||
if d != 0 {
|
||||
return d < 0
|
||||
}
|
||||
|
||||
a := tA.obj()
|
||||
b := tB.obj()
|
||||
|
||||
// we take advantage of the fact that if the kind is not in the kindOrder map,
|
||||
// then it will return the default int value of zero, which is the highest value
|
||||
d = kindOrder[a.GetKind()] - kindOrder[b.GetKind()]
|
||||
if d != 0 {
|
||||
return d < 0
|
||||
}
|
||||
|
||||
return a.GetName() < b.GetName()
|
||||
}
|
||||
|
||||
func (s syncTasks) Filter(predicate func(task *syncTask) bool) (tasks syncTasks) {
|
||||
for _, task := range s {
|
||||
if predicate(task) {
|
||||
tasks = append(tasks, task)
|
||||
}
|
||||
}
|
||||
return tasks
|
||||
}
|
||||
|
||||
func (s syncTasks) Find(predicate func(task *syncTask) bool) *syncTask {
|
||||
for _, task := range s {
|
||||
if predicate(task) {
|
||||
return task
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s syncTasks) String() string {
|
||||
var values []string
|
||||
for _, task := range s {
|
||||
values = append(values, task.String())
|
||||
}
|
||||
return "[" + strings.Join(values, ", ") + "]"
|
||||
}
|
||||
|
||||
func (s syncTasks) phase() v1alpha1.SyncPhase {
|
||||
if len(s) > 0 {
|
||||
return s[0].phase
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (s syncTasks) wave() int {
|
||||
if len(s) > 0 {
|
||||
return s[0].wave()
|
||||
}
|
||||
return 0
|
||||
}
|
||||
231
controller/sync_tasks_test.go
Normal file
@@ -0,0 +1,231 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
||||
. "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
|
||||
)
|
||||
|
||||
func Test_syncTasks_kindOrder(t *testing.T) {
|
||||
assert.Equal(t, -27, kindOrder["Namespace"])
|
||||
assert.Equal(t, -1, kindOrder["APIService"])
|
||||
assert.Equal(t, 0, kindOrder["MyCRD"])
|
||||
}
|
||||
|
||||
func TestSortSyncTask(t *testing.T) {
|
||||
sort.Sort(unsortedTasks)
|
||||
assert.Equal(t, sortedTasks, unsortedTasks)
|
||||
}
|
||||
|
||||
var unsortedTasks = syncTasks{
|
||||
{
|
||||
targetObj: &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"GroupVersion": apiv1.SchemeGroupVersion.String(),
|
||||
"kind": "Pod",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
targetObj: &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"GroupVersion": apiv1.SchemeGroupVersion.String(),
|
||||
"kind": "Service",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
targetObj: &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"GroupVersion": apiv1.SchemeGroupVersion.String(),
|
||||
"kind": "PersistentVolume",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
targetObj: &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"metadata": map[string]interface{}{
|
||||
"annotations": map[string]interface{}{
|
||||
"argocd.argoproj.io/sync-wave": "1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
targetObj: &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"metadata": map[string]interface{}{
|
||||
"name": "b",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
targetObj: &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"metadata": map[string]interface{}{
|
||||
"name": "a",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
targetObj: &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"metadata": map[string]interface{}{
|
||||
"annotations": map[string]interface{}{
|
||||
"argocd.argoproj.io/sync-wave": "-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
targetObj: &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"GroupVersion": apiv1.SchemeGroupVersion.String(),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
phase: SyncPhasePreSync,
|
||||
targetObj: &unstructured.Unstructured{},
|
||||
},
|
||||
{
|
||||
phase: SyncPhasePostSync, targetObj: &unstructured.Unstructured{},
|
||||
},
|
||||
{
|
||||
targetObj: &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"GroupVersion": apiv1.SchemeGroupVersion.String(),
|
||||
"kind": "ConfigMap",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
var sortedTasks = syncTasks{
|
||||
{
|
||||
phase: SyncPhasePreSync,
|
||||
targetObj: &unstructured.Unstructured{},
|
||||
},
|
||||
{
|
||||
targetObj: &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"metadata": map[string]interface{}{
|
||||
"annotations": map[string]interface{}{
|
||||
"argocd.argoproj.io/sync-wave": "-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
targetObj: &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"GroupVersion": apiv1.SchemeGroupVersion.String(),
|
||||
"kind": "ConfigMap",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
targetObj: &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"GroupVersion": apiv1.SchemeGroupVersion.String(),
|
||||
"kind": "PersistentVolume",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
targetObj: &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"GroupVersion": apiv1.SchemeGroupVersion.String(),
|
||||
"kind": "Service",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
targetObj: &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"GroupVersion": apiv1.SchemeGroupVersion.String(),
|
||||
"kind": "Pod",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
targetObj: &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"GroupVersion": apiv1.SchemeGroupVersion.String(),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
targetObj: &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"metadata": map[string]interface{}{
|
||||
"name": "a",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
targetObj: &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"metadata": map[string]interface{}{
|
||||
"name": "b",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
targetObj: &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"metadata": map[string]interface{}{
|
||||
"annotations": map[string]interface{}{
|
||||
"argocd.argoproj.io/sync-wave": "1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
phase: SyncPhasePostSync,
|
||||
targetObj: &unstructured.Unstructured{},
|
||||
},
|
||||
}
|
||||
|
||||
func Test_syncTasks_Filter(t *testing.T) {
|
||||
tasks := syncTasks{{phase: SyncPhaseSync}, {phase: SyncPhasePostSync}}
|
||||
|
||||
assert.Equal(t, syncTasks{{phase: SyncPhaseSync}}, tasks.Filter(func(t *syncTask) bool {
|
||||
return t.phase == SyncPhaseSync
|
||||
}))
|
||||
}
|
||||
|
||||
func TestSyncNamespaceAgainstCRD(t *testing.T) {
|
||||
crd := &syncTask{
|
||||
targetObj: &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"kind": "Workflow",
|
||||
},
|
||||
}}
|
||||
namespace := &syncTask{
|
||||
targetObj: &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"kind": "Namespace",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
unsorted := syncTasks{crd, namespace}
|
||||
sort.Sort(unsorted)
|
||||
|
||||
assert.Equal(t, syncTasks{namespace, crd}, unsorted)
|
||||
}
|
||||
@@ -2,12 +2,11 @@ package controller
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/assert"
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -19,6 +18,7 @@ import (
|
||||
|
||||
"github.com/argoproj/argo-cd/common"
|
||||
"github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
|
||||
. "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/reposerver/repository"
|
||||
"github.com/argoproj/argo-cd/test"
|
||||
"github.com/argoproj/argo-cd/util/kube"
|
||||
@@ -45,7 +45,9 @@ func newTestSyncCtx(resources ...*v1.APIResourceList) *syncContext {
|
||||
config: &rest.Config{},
|
||||
namespace: test.FakeArgoCDNamespace,
|
||||
server: test.FakeClusterURL,
|
||||
syncRes: &v1alpha1.SyncOperationResult{},
|
||||
syncRes: &v1alpha1.SyncOperationResult{
|
||||
Revision: "FooBarBaz",
|
||||
},
|
||||
syncOp: &v1alpha1.SyncOperation{
|
||||
Prune: true,
|
||||
SyncStrategy: &v1alpha1.SyncStrategy{
|
||||
@@ -104,18 +106,19 @@ func TestSyncCreateInSortedOrder(t *testing.T) {
|
||||
}},
|
||||
}
|
||||
syncCtx.sync()
|
||||
assert.Equal(t, v1alpha1.OperationSucceeded, syncCtx.opState.Phase)
|
||||
assert.Len(t, syncCtx.syncRes.Resources, 2)
|
||||
for i := range syncCtx.syncRes.Resources {
|
||||
if syncCtx.syncRes.Resources[i].Kind == "Pod" {
|
||||
assert.Equal(t, v1alpha1.ResultCodeSynced, syncCtx.syncRes.Resources[i].Status)
|
||||
} else if syncCtx.syncRes.Resources[i].Kind == "Service" {
|
||||
assert.Equal(t, v1alpha1.ResultCodeSynced, syncCtx.syncRes.Resources[i].Status)
|
||||
result := syncCtx.syncRes.Resources[i]
|
||||
if result.Kind == "Pod" {
|
||||
assert.Equal(t, v1alpha1.ResultCodeSynced, result.Status)
|
||||
assert.Equal(t, "", result.Message)
|
||||
} else if result.Kind == "Service" {
|
||||
assert.Equal(t, "", result.Message)
|
||||
} else {
|
||||
t.Error("Resource isn't a pod or a service")
|
||||
}
|
||||
}
|
||||
syncCtx.sync()
|
||||
assert.Equal(t, syncCtx.opState.Phase, v1alpha1.OperationSucceeded)
|
||||
}
|
||||
|
||||
func TestSyncCreateNotWhitelistedClusterResources(t *testing.T) {
|
||||
@@ -147,8 +150,9 @@ func TestSyncCreateNotWhitelistedClusterResources(t *testing.T) {
|
||||
}
|
||||
syncCtx.sync()
|
||||
assert.Len(t, syncCtx.syncRes.Resources, 1)
|
||||
assert.Equal(t, v1alpha1.ResultCodeSyncFailed, syncCtx.syncRes.Resources[0].Status)
|
||||
assert.Contains(t, syncCtx.syncRes.Resources[0].Message, "not permitted in project")
|
||||
result := syncCtx.syncRes.Resources[0]
|
||||
assert.Equal(t, v1alpha1.ResultCodeSyncFailed, result.Status)
|
||||
assert.Contains(t, result.Message, "not permitted in project")
|
||||
}
|
||||
|
||||
func TestSyncBlacklistedNamespacedResources(t *testing.T) {
|
||||
@@ -166,73 +170,83 @@ func TestSyncBlacklistedNamespacedResources(t *testing.T) {
|
||||
}
|
||||
syncCtx.sync()
|
||||
assert.Len(t, syncCtx.syncRes.Resources, 1)
|
||||
assert.Equal(t, v1alpha1.ResultCodeSyncFailed, syncCtx.syncRes.Resources[0].Status)
|
||||
assert.Contains(t, syncCtx.syncRes.Resources[0].Message, "not permitted in project")
|
||||
result := syncCtx.syncRes.Resources[0]
|
||||
assert.Equal(t, v1alpha1.ResultCodeSyncFailed, result.Status)
|
||||
assert.Contains(t, result.Message, "not permitted in project")
|
||||
}
|
||||
|
||||
func TestSyncSuccessfully(t *testing.T) {
|
||||
syncCtx := newTestSyncCtx()
|
||||
pod := test.NewPod()
|
||||
pod.SetNamespace(test.FakeArgoCDNamespace)
|
||||
syncCtx.compareResult = &comparisonResult{
|
||||
managedResources: []managedResource{{
|
||||
Live: nil,
|
||||
Target: test.NewService(),
|
||||
}, {
|
||||
Live: test.NewPod(),
|
||||
Live: pod,
|
||||
Target: nil,
|
||||
}},
|
||||
}
|
||||
syncCtx.sync()
|
||||
assert.Equal(t, v1alpha1.OperationSucceeded, syncCtx.opState.Phase)
|
||||
assert.Len(t, syncCtx.syncRes.Resources, 2)
|
||||
for i := range syncCtx.syncRes.Resources {
|
||||
if syncCtx.syncRes.Resources[i].Kind == "Pod" {
|
||||
assert.Equal(t, v1alpha1.ResultCodePruned, syncCtx.syncRes.Resources[i].Status)
|
||||
} else if syncCtx.syncRes.Resources[i].Kind == "Service" {
|
||||
assert.Equal(t, v1alpha1.ResultCodeSynced, syncCtx.syncRes.Resources[i].Status)
|
||||
result := syncCtx.syncRes.Resources[i]
|
||||
if result.Kind == "Pod" {
|
||||
assert.Equal(t, v1alpha1.ResultCodePruned, result.Status)
|
||||
assert.Equal(t, "pruned", result.Message)
|
||||
} else if result.Kind == "Service" {
|
||||
assert.Equal(t, v1alpha1.ResultCodeSynced, result.Status)
|
||||
assert.Equal(t, "", result.Message)
|
||||
} else {
|
||||
t.Error("Resource isn't a pod or a service")
|
||||
}
|
||||
}
|
||||
syncCtx.sync()
|
||||
assert.Equal(t, syncCtx.opState.Phase, v1alpha1.OperationSucceeded)
|
||||
}
|
||||
|
||||
func TestSyncDeleteSuccessfully(t *testing.T) {
|
||||
syncCtx := newTestSyncCtx()
|
||||
svc := test.NewService()
|
||||
svc.SetNamespace(test.FakeArgoCDNamespace)
|
||||
pod := test.NewPod()
|
||||
pod.SetNamespace(test.FakeArgoCDNamespace)
|
||||
syncCtx.compareResult = &comparisonResult{
|
||||
managedResources: []managedResource{{
|
||||
Live: test.NewService(),
|
||||
Live: svc,
|
||||
Target: nil,
|
||||
}, {
|
||||
Live: test.NewPod(),
|
||||
Live: pod,
|
||||
Target: nil,
|
||||
}},
|
||||
}
|
||||
syncCtx.sync()
|
||||
assert.Equal(t, v1alpha1.OperationSucceeded, syncCtx.opState.Phase)
|
||||
for i := range syncCtx.syncRes.Resources {
|
||||
if syncCtx.syncRes.Resources[i].Kind == "Pod" {
|
||||
assert.Equal(t, v1alpha1.ResultCodePruned, syncCtx.syncRes.Resources[i].Status)
|
||||
} else if syncCtx.syncRes.Resources[i].Kind == "Service" {
|
||||
assert.Equal(t, v1alpha1.ResultCodePruned, syncCtx.syncRes.Resources[i].Status)
|
||||
result := syncCtx.syncRes.Resources[i]
|
||||
if result.Kind == "Pod" {
|
||||
assert.Equal(t, v1alpha1.ResultCodePruned, result.Status)
|
||||
assert.Equal(t, "pruned", result.Message)
|
||||
} else if result.Kind == "Service" {
|
||||
assert.Equal(t, v1alpha1.ResultCodePruned, result.Status)
|
||||
assert.Equal(t, "pruned", result.Message)
|
||||
} else {
|
||||
t.Error("Resource isn't a pod or a service")
|
||||
}
|
||||
}
|
||||
syncCtx.sync()
|
||||
assert.Equal(t, syncCtx.opState.Phase, v1alpha1.OperationSucceeded)
|
||||
}
|
||||
|
||||
func TestSyncCreateFailure(t *testing.T) {
|
||||
syncCtx := newTestSyncCtx()
|
||||
testSvc := test.NewService()
|
||||
syncCtx.kubectl = kubetest.MockKubectlCmd{
|
||||
Commands: map[string]kubetest.KubectlOutput{
|
||||
"test-service": {
|
||||
testSvc.GetName(): {
|
||||
Output: "",
|
||||
Err: fmt.Errorf("error: error validating \"test.yaml\": error validating data: apiVersion not set; if you choose to ignore these errors, turn validation off with --validate=false"),
|
||||
Err: fmt.Errorf("foo"),
|
||||
},
|
||||
},
|
||||
}
|
||||
testSvc := test.NewService()
|
||||
testSvc.SetAPIVersion("")
|
||||
syncCtx.compareResult = &comparisonResult{
|
||||
managedResources: []managedResource{{
|
||||
Live: nil,
|
||||
@@ -241,7 +255,9 @@ func TestSyncCreateFailure(t *testing.T) {
|
||||
}
|
||||
syncCtx.sync()
|
||||
assert.Len(t, syncCtx.syncRes.Resources, 1)
|
||||
assert.Equal(t, v1alpha1.ResultCodeSyncFailed, syncCtx.syncRes.Resources[0].Status)
|
||||
result := syncCtx.syncRes.Resources[0]
|
||||
assert.Equal(t, v1alpha1.ResultCodeSyncFailed, result.Status)
|
||||
assert.Equal(t, "foo", result.Message)
|
||||
}
|
||||
|
||||
func TestSyncPruneFailure(t *testing.T) {
|
||||
@@ -250,12 +266,13 @@ func TestSyncPruneFailure(t *testing.T) {
|
||||
Commands: map[string]kubetest.KubectlOutput{
|
||||
"test-service": {
|
||||
Output: "",
|
||||
Err: fmt.Errorf(" error: timed out waiting for \"test-service\" to be synced"),
|
||||
Err: fmt.Errorf("foo"),
|
||||
},
|
||||
},
|
||||
}
|
||||
testSvc := test.NewService()
|
||||
testSvc.SetName("test-service")
|
||||
testSvc.SetNamespace(test.FakeArgoCDNamespace)
|
||||
syncCtx.compareResult = &comparisonResult{
|
||||
managedResources: []managedResource{{
|
||||
Live: testSvc,
|
||||
@@ -263,155 +280,11 @@ func TestSyncPruneFailure(t *testing.T) {
|
||||
}},
|
||||
}
|
||||
syncCtx.sync()
|
||||
assert.Equal(t, v1alpha1.OperationFailed, syncCtx.opState.Phase)
|
||||
assert.Len(t, syncCtx.syncRes.Resources, 1)
|
||||
assert.Equal(t, v1alpha1.ResultCodeSyncFailed, syncCtx.syncRes.Resources[0].Status)
|
||||
}
|
||||
|
||||
func unsortedManifest() []syncTask {
|
||||
return []syncTask{
|
||||
{
|
||||
targetObj: &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"GroupVersion": apiv1.SchemeGroupVersion.String(),
|
||||
"kind": "Pod",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
targetObj: &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"GroupVersion": apiv1.SchemeGroupVersion.String(),
|
||||
"kind": "Service",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
targetObj: &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"GroupVersion": apiv1.SchemeGroupVersion.String(),
|
||||
"kind": "PersistentVolume",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
targetObj: &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"GroupVersion": apiv1.SchemeGroupVersion.String(),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
targetObj: &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"GroupVersion": apiv1.SchemeGroupVersion.String(),
|
||||
"kind": "ConfigMap",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func sortedManifest() []syncTask {
|
||||
return []syncTask{
|
||||
{
|
||||
targetObj: &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"GroupVersion": apiv1.SchemeGroupVersion.String(),
|
||||
"kind": "ConfigMap",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
targetObj: &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"GroupVersion": apiv1.SchemeGroupVersion.String(),
|
||||
"kind": "PersistentVolume",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
targetObj: &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"GroupVersion": apiv1.SchemeGroupVersion.String(),
|
||||
"kind": "Service",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
targetObj: &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"GroupVersion": apiv1.SchemeGroupVersion.String(),
|
||||
"kind": "Pod",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
targetObj: &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"GroupVersion": apiv1.SchemeGroupVersion.String(),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestSortKubernetesResourcesSuccessfully(t *testing.T) {
|
||||
unsorted := unsortedManifest()
|
||||
ks := newKindSorter(unsorted, resourceOrder)
|
||||
sort.Sort(ks)
|
||||
|
||||
expectedOrder := sortedManifest()
|
||||
assert.Equal(t, len(unsorted), len(expectedOrder))
|
||||
for i, sorted := range unsorted {
|
||||
assert.Equal(t, expectedOrder[i], sorted)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestSortManifestHandleNil(t *testing.T) {
|
||||
task := syncTask{
|
||||
targetObj: &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"GroupVersion": apiv1.SchemeGroupVersion.String(),
|
||||
"kind": "Service",
|
||||
},
|
||||
},
|
||||
}
|
||||
manifest := []syncTask{
|
||||
{},
|
||||
task,
|
||||
}
|
||||
ks := newKindSorter(manifest, resourceOrder)
|
||||
sort.Sort(ks)
|
||||
assert.Equal(t, task, manifest[0])
|
||||
assert.Nil(t, manifest[1].targetObj)
|
||||
}
|
||||
|
||||
func TestSyncNamespaceAgainstCRD(t *testing.T) {
|
||||
crd := syncTask{
|
||||
targetObj: &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"GroupVersion": "argoproj.io/alpha1",
|
||||
"kind": "Workflow",
|
||||
},
|
||||
}}
|
||||
namespace := syncTask{
|
||||
targetObj: &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"GroupVersion": apiv1.SchemeGroupVersion.String(),
|
||||
"kind": "Namespace",
|
||||
},
|
||||
},
|
||||
}
|
||||
unsorted := []syncTask{crd, namespace}
|
||||
ks := newKindSorter(unsorted, resourceOrder)
|
||||
sort.Sort(ks)
|
||||
|
||||
expectedOrder := []syncTask{namespace, crd}
|
||||
assert.Equal(t, len(unsorted), len(expectedOrder))
|
||||
for i, sorted := range unsorted {
|
||||
assert.Equal(t, expectedOrder[i], sorted)
|
||||
}
|
||||
result := syncCtx.syncRes.Resources[0]
|
||||
assert.Equal(t, v1alpha1.ResultCodeSyncFailed, result.Status)
|
||||
assert.Equal(t, "foo", result.Message)
|
||||
}
|
||||
|
||||
func TestDontSyncOrPruneHooks(t *testing.T) {
|
||||
@@ -421,23 +294,114 @@ func TestDontSyncOrPruneHooks(t *testing.T) {
|
||||
targetPod.SetAnnotations(map[string]string{common.AnnotationKeyHook: "PreSync"})
|
||||
liveSvc := test.NewService()
|
||||
liveSvc.SetName("dont-prune-me")
|
||||
liveSvc.SetNamespace(test.FakeArgoCDNamespace)
|
||||
liveSvc.SetAnnotations(map[string]string{common.AnnotationKeyHook: "PreSync"})
|
||||
|
||||
syncCtx.compareResult = &comparisonResult{
|
||||
managedResources: []managedResource{{
|
||||
Live: nil,
|
||||
Target: targetPod,
|
||||
Hook: true,
|
||||
}, {
|
||||
Live: liveSvc,
|
||||
Target: nil,
|
||||
Hook: true,
|
||||
}},
|
||||
hooks: []*unstructured.Unstructured{targetPod, liveSvc},
|
||||
}
|
||||
syncCtx.sync()
|
||||
assert.Len(t, syncCtx.syncRes.Resources, 0)
|
||||
syncCtx.sync()
|
||||
assert.Equal(t, syncCtx.opState.Phase, v1alpha1.OperationSucceeded)
|
||||
assert.Equal(t, v1alpha1.OperationSucceeded, syncCtx.opState.Phase)
|
||||
}
|
||||
|
||||
// make sure that we do not prune resources with Prune=false
|
||||
func TestDontPrunePruneFalse(t *testing.T) {
|
||||
syncCtx := newTestSyncCtx()
|
||||
pod := test.NewPod()
|
||||
pod.SetAnnotations(map[string]string{common.AnnotationSyncOptions: "Prune=false"})
|
||||
pod.SetNamespace(test.FakeArgoCDNamespace)
|
||||
syncCtx.compareResult = &comparisonResult{managedResources: []managedResource{{Live: pod}}}
|
||||
|
||||
syncCtx.sync()
|
||||
|
||||
assert.Equal(t, v1alpha1.OperationSucceeded, syncCtx.opState.Phase)
|
||||
assert.Len(t, syncCtx.syncRes.Resources, 1)
|
||||
assert.Equal(t, v1alpha1.ResultCodePruneSkipped, syncCtx.syncRes.Resources[0].Status)
|
||||
assert.Equal(t, "ignored (no prune)", syncCtx.syncRes.Resources[0].Message)
|
||||
|
||||
syncCtx.sync()
|
||||
|
||||
assert.Equal(t, v1alpha1.OperationSucceeded, syncCtx.opState.Phase)
|
||||
}
|
||||
|
||||
func TestSelectiveSyncOnly(t *testing.T) {
|
||||
syncCtx := newTestSyncCtx()
|
||||
pod1 := test.NewPod()
|
||||
pod1.SetName("pod-1")
|
||||
pod2 := test.NewPod()
|
||||
pod2.SetName("pod-2")
|
||||
syncCtx.compareResult = &comparisonResult{
|
||||
managedResources: []managedResource{{Target: pod1}},
|
||||
}
|
||||
syncCtx.syncResources = []v1alpha1.SyncOperationResource{{Kind: "Pod", Name: "pod-1"}}
|
||||
|
||||
tasks, successful := syncCtx.getSyncTasks()
|
||||
|
||||
assert.True(t, successful)
|
||||
assert.Len(t, tasks, 1)
|
||||
assert.Equal(t, "pod-1", tasks[0].name())
|
||||
}
|
||||
|
||||
func TestUnnamedHooksGetUniqueNames(t *testing.T) {
|
||||
syncCtx := newTestSyncCtx()
|
||||
syncCtx.syncOp.SyncStrategy.Apply = nil
|
||||
pod := test.NewPod()
|
||||
pod.SetName("")
|
||||
pod.SetAnnotations(map[string]string{common.AnnotationKeyHook: "PreSync,PostSync"})
|
||||
syncCtx.compareResult = &comparisonResult{hooks: []*unstructured.Unstructured{pod}}
|
||||
|
||||
tasks, successful := syncCtx.getSyncTasks()
|
||||
|
||||
assert.True(t, successful)
|
||||
assert.Len(t, tasks, 2)
|
||||
assert.Contains(t, tasks[0].name(), "foobarb-presync-")
|
||||
assert.Contains(t, tasks[1].name(), "foobarb-postsync-")
|
||||
assert.Equal(t, "", pod.GetName())
|
||||
}
|
||||
|
||||
func TestManagedResourceAreNotNamed(t *testing.T) {
|
||||
syncCtx := newTestSyncCtx()
|
||||
pod := test.NewPod()
|
||||
pod.SetName("")
|
||||
syncCtx.compareResult = &comparisonResult{managedResources: []managedResource{{Target: pod}}}
|
||||
|
||||
tasks, successful := syncCtx.getSyncTasks()
|
||||
|
||||
assert.True(t, successful)
|
||||
assert.Len(t, tasks, 1)
|
||||
assert.Equal(t, "", tasks[0].name())
|
||||
assert.Equal(t, "", pod.GetName())
|
||||
}
|
||||
|
||||
func TestDeDupingTasks(t *testing.T) {
|
||||
syncCtx := newTestSyncCtx()
|
||||
syncCtx.syncOp.SyncStrategy.Apply = nil
|
||||
pod := test.NewPod()
|
||||
pod.SetAnnotations(map[string]string{common.AnnotationKeyHook: "Sync"})
|
||||
syncCtx.compareResult = &comparisonResult{
|
||||
managedResources: []managedResource{{Target: pod}},
|
||||
hooks: []*unstructured.Unstructured{pod},
|
||||
}
|
||||
|
||||
tasks, successful := syncCtx.getSyncTasks()
|
||||
|
||||
assert.True(t, successful)
|
||||
assert.Len(t, tasks, 1)
|
||||
}
|
||||
|
||||
func TestObjectsGetANamespace(t *testing.T) {
|
||||
syncCtx := newTestSyncCtx()
|
||||
pod := test.NewPod()
|
||||
syncCtx.compareResult = &comparisonResult{managedResources: []managedResource{{Target: pod}}}
|
||||
|
||||
tasks, successful := syncCtx.getSyncTasks()
|
||||
|
||||
assert.True(t, successful)
|
||||
assert.Len(t, tasks, 1)
|
||||
assert.Equal(t, test.FakeArgoCDNamespace, tasks[0].namespace())
|
||||
assert.Equal(t, "", pod.GetNamespace())
|
||||
}
|
||||
|
||||
func TestPersistRevisionHistory(t *testing.T) {
|
||||
@@ -526,3 +490,74 @@ func TestPersistRevisionHistoryRollback(t *testing.T) {
|
||||
assert.Equal(t, source, updatedApp.Status.History[0].Source)
|
||||
assert.Equal(t, "abc123", updatedApp.Status.History[0].Revision)
|
||||
}
|
||||
|
||||
func Test_syncContext_isSelectiveSync(t *testing.T) {
|
||||
type fields struct {
|
||||
compareResult *comparisonResult
|
||||
syncResources []SyncOperationResource
|
||||
}
|
||||
oneSyncResource := []SyncOperationResource{{}}
|
||||
oneResource := func(group, kind, name string, hook bool) *comparisonResult {
|
||||
return &comparisonResult{resources: []v1alpha1.ResourceStatus{{Group: group, Kind: kind, Name: name, Hook: hook}}}
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
want bool
|
||||
}{
|
||||
{"Empty", fields{}, false},
|
||||
{"OneCompareResult", fields{oneResource("", "", "", false), []SyncOperationResource{}}, true},
|
||||
{"OneSyncResource", fields{&comparisonResult{}, oneSyncResource}, true},
|
||||
{"Equal", fields{oneResource("", "", "", false), oneSyncResource}, false},
|
||||
{"EqualOutOfOrder", fields{&comparisonResult{resources: []v1alpha1.ResourceStatus{{Group: "a"}, {Group: "b"}}}, []SyncOperationResource{{Group: "b"}, {Group: "a"}}}, false},
|
||||
{"KindDifferent", fields{oneResource("foo", "", "", false), oneSyncResource}, true},
|
||||
{"GroupDifferent", fields{oneResource("", "foo", "", false), oneSyncResource}, true},
|
||||
{"NameDifferent", fields{oneResource("", "", "foo", false), oneSyncResource}, true},
|
||||
{"HookIgnored", fields{oneResource("", "", "", true), []SyncOperationResource{}}, false},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
sc := &syncContext{
|
||||
compareResult: tt.fields.compareResult,
|
||||
syncResources: tt.fields.syncResources,
|
||||
}
|
||||
if got := sc.isSelectiveSync(); got != tt.want {
|
||||
t.Errorf("syncContext.isSelectiveSync() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_syncContext_liveObj(t *testing.T) {
|
||||
type fields struct {
|
||||
compareResult *comparisonResult
|
||||
}
|
||||
type args struct {
|
||||
obj *unstructured.Unstructured
|
||||
}
|
||||
obj := test.NewPod()
|
||||
obj.SetNamespace("my-ns")
|
||||
|
||||
found := test.NewPod()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
args args
|
||||
want *unstructured.Unstructured
|
||||
}{
|
||||
{"None", fields{compareResult: &comparisonResult{managedResources: []managedResource{}}}, args{obj: &unstructured.Unstructured{}}, nil},
|
||||
{"Found", fields{compareResult: &comparisonResult{managedResources: []managedResource{{Group: obj.GroupVersionKind().Group, Kind: obj.GetKind(), Namespace: obj.GetNamespace(), Name: obj.GetName(), Live: found}}}}, args{obj: obj}, found},
|
||||
{"EmptyNamespace", fields{compareResult: &comparisonResult{managedResources: []managedResource{{Group: obj.GroupVersionKind().Group, Kind: obj.GetKind(), Name: obj.GetName(), Live: found}}}}, args{obj: obj}, found},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
sc := &syncContext{
|
||||
compareResult: tt.fields.compareResult,
|
||||
}
|
||||
if got := sc.liveObj(tt.args.obj); !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("syncContext.liveObj() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,11 +22,22 @@ Install:
|
||||
* [kubectx](https://kubectx.dev)
|
||||
* [minikube](https://kubernetes.io/docs/setup/minikube/) or Docker for Desktop
|
||||
|
||||
!!! warning "Versions"
|
||||
You will find problems generating code if you do not have the correct versions of `protoc` and `swagger`
|
||||
|
||||
```bash
|
||||
$ protoc --version
|
||||
libprotoc 3.7.1
|
||||
~/go/src/github.com/argoproj/argo-cd (ui)
|
||||
$ swagger version
|
||||
version: v0.19.0
|
||||
```
|
||||
|
||||
Brew users can quickly install the lot:
|
||||
|
||||
```bash
|
||||
brew tap go-swagger/go-swagger
|
||||
brew install go dep protobuf kubectl kubectx ksonnet/tap/ks kubernetes-helm jq go-swagger
|
||||
brew install go dep protobuf kubectl kubectx ksonnet/tap/ks kubernetes-helm jq go-swagger kustomize
|
||||
```
|
||||
|
||||
!!! note "Kustomize"
|
||||
@@ -39,23 +50,30 @@ export GOPATH=~/go
|
||||
export PATH=$PATH:$GOPATH/bin
|
||||
```
|
||||
|
||||
Checkout the code:
|
||||
|
||||
```bash
|
||||
go get -u github.com/argoproj/argo-cd
|
||||
cd ~/go/src/github.com/argoproj/argo-cd
|
||||
```
|
||||
|
||||
Install go dependencies:
|
||||
|
||||
```bash
|
||||
go get -u github.com/golang/protobuf/protoc-gen-go
|
||||
go get -u github.com/go-swagger/go-swagger/cmd/swagger
|
||||
go get -u github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway
|
||||
go get -u github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger
|
||||
go get -u github.com/golangci/golangci-lint/cmd/golangci-lint
|
||||
go get -u github.com/mattn/goreman
|
||||
go get -u gotest.tools/gotestsum
|
||||
go get github.com/gobuffalo/packr/packr
|
||||
go get github.com/gogo/protobuf/gogoproto
|
||||
go get github.com/golang/protobuf/protoc-gen-go
|
||||
go get github.com/golangci/golangci-lint/cmd/golangci-lint
|
||||
go get github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway
|
||||
go get github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger
|
||||
go get github.com/jstemmer/go-junit-report
|
||||
go get github.com/mattn/goreman
|
||||
go get golang.org/x/tools/cmd/goimports
|
||||
```
|
||||
|
||||
## Building
|
||||
|
||||
```bash
|
||||
go get -u github.com/argoproj/argo-cd
|
||||
dep ensure
|
||||
make
|
||||
```
|
||||
|
||||
@@ -90,15 +108,6 @@ kubectl -n argocd scale deployment.extensions/argocd-server --replicas 0
|
||||
kubectl -n argocd scale deployment.extensions/argocd-redis --replicas 0
|
||||
```
|
||||
|
||||
Then checkout and build the UI next to your code
|
||||
|
||||
```
|
||||
cd ~/go/src/github.com/argoproj
|
||||
git clone git@github.com:argoproj/argo-cd-ui.git
|
||||
```
|
||||
|
||||
Follow the UI's [README](https://github.com/argoproj/argo-cd-ui/blob/master/README.md) to build it.
|
||||
|
||||
Note: you'll need to use the https://localhost:6443 cluster now.
|
||||
|
||||
Then start the services:
|
||||
@@ -134,7 +143,7 @@ Add your username as the environment variable, e.g. to your `~/.bash_profile`:
|
||||
export IMAGE_NAMESPACE=alexcollinsintuit
|
||||
```
|
||||
|
||||
If you have not built the UI image (see [the UI README](https://github.com/argoproj/argo-cd-ui/blob/master/README.md)), then do the following:
|
||||
If you have not built the UI image (see [the UI README](https://github.com/argoproj/argo-cd/blob/master/ui/README.md)), then do the following:
|
||||
|
||||
```bash
|
||||
docker pull argoproj/argocd-ui:latest
|
||||
@@ -171,16 +180,3 @@ kubectl -n argocd scale deployment.extensions/argocd-redis --replicas 1
|
||||
```
|
||||
|
||||
Now you can set-up the port-forwarding and open the UI or CLI.
|
||||
|
||||
## Pre-commit Checks
|
||||
|
||||
Before you commit, make sure you've formatted and linted your code, or your PR will fail CI:
|
||||
|
||||
```bash
|
||||
STAGED_GO_FILES=$(git diff --cached --name-only | grep ".go$")
|
||||
|
||||
gofmt -w $STAGED_GO_FILES
|
||||
|
||||
make codgen
|
||||
make precommit ;# lint and test
|
||||
```
|
||||
|
||||
@@ -2,5 +2,5 @@
|
||||
|
||||
1. Make sure you've read [understanding the basics](understand_the_basics.md) the [getting started guide](getting_started.md).
|
||||
2. Looked for an answer [the frequently asked questions](faq.md).
|
||||
3. Ask a question in [the Argo CD Slack channel ⧉](https://argoproj.slack.com/messages/CASHNF6MS).
|
||||
4. [Read issues, report a bug, or request a feature ⧉](https://github.com/argoproj/argo-cd/issues)
|
||||
3. Ask a question in [the Argo CD Slack channel ⧉](https://argoproj.github.io/community/join-slack).
|
||||
4. [Read issues, report a bug, or request a feature ⧉](https://github.com/argoproj/argo-cd/issues)
|
||||
|
||||
BIN
docs/assets/application-of-applications.png
Normal file
|
After Width: | Height: | Size: 51 KiB |
BIN
docs/assets/compare-option-ignore-needs-pruning.png
Normal file
|
After Width: | Height: | Size: 39 KiB |
BIN
docs/assets/download-codegen-patch-file.png
Normal file
|
After Width: | Height: | Size: 46 KiB |
BIN
docs/assets/selective-sync.png
Normal file
|
After Width: | Height: | Size: 26 KiB |
BIN
docs/assets/sync-option-no-prune-sync-status.png
Normal file
|
After Width: | Height: | Size: 31 KiB |
BIN
docs/assets/sync-option-no-prune.png
Normal file
|
After Width: | Height: | Size: 34 KiB |
BIN
docs/assets/synchronization-button.png
Normal file
|
After Width: | Height: | Size: 10 KiB |
BIN
docs/assets/terminate-button.png
Normal file
|
After Width: | Height: | Size: 5.1 KiB |
41
docs/developer-guide/ci.md
Normal file
@@ -0,0 +1,41 @@
|
||||
# CI
|
||||
|
||||
## Troubleshooting Builds
|
||||
|
||||
### "Check nothing has changed" step fails
|
||||
|
||||
If your PR fails the `codegen` CI step, you can either:
|
||||
|
||||
(1) Simple - download the `codgen.patch` file from CircleCI and apply it:
|
||||
|
||||

|
||||
|
||||
```bash
|
||||
git apply codegen.patch
|
||||
git commit -am "Applies codegen patch"
|
||||
```
|
||||
|
||||
(2) Advanced - if you have the tools installed (see the contributing guide), run the following:
|
||||
|
||||
```bash
|
||||
make pre-commit
|
||||
git commit -am 'Ran pre-commit checks'
|
||||
```
|
||||
|
||||
## Updating The Builder Image
|
||||
|
||||
Login to Docker Hub:
|
||||
|
||||
```bash
|
||||
docker login
|
||||
```
|
||||
|
||||
Build image:
|
||||
|
||||
```bash
|
||||
make builder-image IMAGE_NAMESPACE=argoproj IMAGE_TAG=v1.0.0
|
||||
```
|
||||
|
||||
## Public CD
|
||||
|
||||
[https://cd.apps.argoproj.io/](https://cd.apps.argoproj.io/)
|
||||
@@ -1,48 +1,107 @@
|
||||
# Releasing
|
||||
|
||||
1. Tag, build, and push argo-cd-ui
|
||||
Make sure you are logged into Docker Hub:
|
||||
|
||||
```bash
|
||||
docker login
|
||||
```
|
||||
|
||||
Export the upstream repository and branch name, e.g.:
|
||||
|
||||
```bash
|
||||
REPO=upstream ;# or origin
|
||||
BRANCH=release-1.0
|
||||
```
|
||||
|
||||
Set the `VERSION` environment variable:
|
||||
|
||||
```bash
|
||||
# release candidate
|
||||
VERSION=v1.0.0-rc1
|
||||
# GA release
|
||||
VERSION=v1.0.0
|
||||
```
|
||||
|
||||
If not already created, create UI release branch:
|
||||
|
||||
```bash
|
||||
cd argo-cd-ui
|
||||
git checkout -b release-X.Y
|
||||
git tag vX.Y.Z
|
||||
git push upstream release-X.Y --tags
|
||||
IMAGE_NAMESPACE=argoproj IMAGE_TAG=vX.Y.Z DOCKER_PUSH=true yarn docker
|
||||
git checkout -b $BRANCH
|
||||
```
|
||||
|
||||
2. Create release-X.Y branch (if creating initial X.Y release)
|
||||
Tag UI:
|
||||
|
||||
```bash
|
||||
git checkout -b release-X.Y
|
||||
git push upstream release-X.Y
|
||||
git tag $VERSION
|
||||
git push $REPO $BRANCH --tags
|
||||
IMAGE_NAMESPACE=argoproj IMAGE_TAG=$VERSION DOCKER_PUSH=true yarn docker
|
||||
```
|
||||
|
||||
3. Update VERSION and manifests with new version
|
||||
If not already created, create release branch:
|
||||
|
||||
```bash
|
||||
vi VERSION # ensure value is desired X.Y.Z semantic version
|
||||
make manifests IMAGE_TAG=vX.Y.Z
|
||||
git commit -a -m "Update manifests to vX.Y.Z"
|
||||
git push upstream release-X.Y
|
||||
cd argo-cd
|
||||
git checkout -b $BRANCH
|
||||
git push $REPO $BRANCH
|
||||
```
|
||||
|
||||
4. Tag, build, and push release to docker hub
|
||||
Update `VERSION` and manifests with new version:
|
||||
|
||||
```bash
|
||||
git tag vX.Y.Z
|
||||
make release IMAGE_NAMESPACE=argoproj IMAGE_TAG=vX.Y.Z DOCKER_PUSH=true
|
||||
git push upstream vX.Y.Z
|
||||
echo ${VERSION:1} > VERSION
|
||||
make manifests IMAGE_TAG=$VERSION
|
||||
git commit -am "Update manifests to $VERSION"
|
||||
git push $REPO $BRANCH
|
||||
```
|
||||
|
||||
5. Update argocd brew formula
|
||||
Tag, build, and push release to Docker Hub
|
||||
|
||||
```bash
|
||||
git tag $VERSION
|
||||
make release IMAGE_NAMESPACE=argoproj IMAGE_TAG=$VERSION DOCKER_PUSH=true
|
||||
git push $REPO $VERSION
|
||||
```
|
||||
|
||||
Update [Github releases](https://github.com/argoproj/argo-cd/releases) with:
|
||||
|
||||
* Getting started (copy from previous release)
|
||||
* Changelog
|
||||
* Binaries (e.g. dist/argocd-darwin-amd64).
|
||||
|
||||
|
||||
If GA, update `stable` tag:
|
||||
|
||||
```bash
|
||||
git tag stable --force && git push $REPO stable --force
|
||||
```
|
||||
|
||||
If GA, update Brew formula:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/argoproj/homebrew-tap
|
||||
cd homebrew-tap
|
||||
./update.sh ~/go/src/github.com/argoproj/argo-cd/dist/argocd-darwin-amd64
|
||||
git commit -a -m "Update argocd to vX.Y.Z"
|
||||
git commit -a -m "Update argocd to $VERSION"
|
||||
git push
|
||||
```
|
||||
|
||||
6. Update documentation:
|
||||
* Edit CHANGELOG.md with release notes
|
||||
* Update `stable` tag
|
||||
### Verify
|
||||
|
||||
Locally:
|
||||
|
||||
```bash
|
||||
kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/$VERSION/manifests/install.yaml
|
||||
```
|
||||
git tag stable --force && git push upstream stable --force
|
||||
|
||||
Follow the [Getting Started Guide](../getting_started/).
|
||||
|
||||
If GA:
|
||||
|
||||
```bash
|
||||
brew upgrade argocd
|
||||
/usr/local/bin/argocd version
|
||||
```
|
||||
* Create GitHub release from new tag and upload binaries (e.g. dist/argocd-darwin-amd64)
|
||||
|
||||
Sync Argo CD in [https://cd.apps.argoproj.io/applications/argo-cd](https://cd.apps.argoproj.io/applications/argo-cd).
|
||||
|
||||
Deploy the [site](site.md).
|
||||
|
||||
@@ -17,3 +17,34 @@ You can observe the tests by using the UI [http://localhost:8080/applications](h
|
||||
|
||||
The tests are executed by Argo Workflow defined at `.argo-ci/ci.yaml`. CI job The builds an Argo CD image, deploy argo cd components into throw-away kubernetes cluster provisioned
|
||||
using k3s and run e2e tests against it.
|
||||
|
||||
## Test Isolation
|
||||
|
||||
Some effort has been made to balance test isolation with speed. Tests are isolated as follows as each test gets:
|
||||
|
||||
* A random 5 character ID.
|
||||
* A unique Git repository containing the `testdata` in `/tmp/argocd-e2e/${id}`.
|
||||
* A namespace `argocd-e2e-ns-${id}`.
|
||||
* An primary name for the app `argocd-e2e-${id}`.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
**Tests fails to delete `argocd-e2e-ns-*` namespaces.**
|
||||
|
||||
This maybe due to the metrics server, run this:
|
||||
|
||||
```bash
|
||||
kubectl api-resources
|
||||
```
|
||||
|
||||
If it exits with status code 1, run:
|
||||
|
||||
```bash
|
||||
kubectl delete apiservice v1beta1.metrics.k8s.io
|
||||
```
|
||||
|
||||
Remove `/spec/finalizers` from the namespace
|
||||
|
||||
```bash
|
||||
kubectl edit ns argocd-e2e-ns-*
|
||||
```
|
||||
@@ -56,3 +56,9 @@ KUBECONFIG=/tmp/config kubectl get pods # test connection manually
|
||||
```
|
||||
|
||||
Now you can manually verify that cluster is accessible from the Argo CD pod.
|
||||
|
||||
## How Can I Terminate A Sync?
|
||||
|
||||
To terminate the sync, click on the "synchronisation" then "terminate":
|
||||
|
||||
 
|
||||
@@ -29,6 +29,8 @@ data:
|
||||
issuer: https://dev-123456.oktapreview.com
|
||||
clientID: aaaabbbbccccddddeee
|
||||
clientSecret: $oidc.okta.clientSecret
|
||||
# Optional set of OIDC scopes to request. If omitted, defaults to: ["openid", "profile", "email", "groups"]
|
||||
requestedScopes: ["openid", "profile", "email"]
|
||||
|
||||
# Git repositories configure Argo CD with (optional).
|
||||
# This list is updated when configuring/removing repos from the UI/CLI
|
||||
|
||||
@@ -19,3 +19,8 @@ data:
|
||||
# authorizing API requests (optional). If omitted or empty, users may be still be able to login,
|
||||
# but will see no apps, projects, etc...
|
||||
policy.default: role:readonly
|
||||
|
||||
# scopes controls which OIDC scopes to examine during rbac enforcement (in addition to `sub` scope).
|
||||
# If omitted, defaults to: `[groups]`. The scope value can be a string, or a list of strings.
|
||||
scopes: [cognito:groups, email]
|
||||
|
||||
|
||||
82
docs/operator-manual/cluster-bootstrapping.md
Normal file
@@ -0,0 +1,82 @@
|
||||
# Cluster Bootstrapping
|
||||
|
||||
This guide for operators who have already installed Argo CD, and have a new cluster and are looking to install many applications in that cluster.
|
||||
|
||||
There's no one particular pattern to solve this problem, e.g. you could write a script to create your applications, or you could even manually create them. However, users of Argo CD tend to use the **application of applications pattern**.
|
||||
|
||||
## Application Of Applications Pattern
|
||||
|
||||
[Declaratively](declarative-setup.md) specify one Argo CD application that consists only of other applications.
|
||||
|
||||

|
||||
|
||||
### Helm Example
|
||||
|
||||
This example shows how to use Helm to achieve this. You can, of course, use another tool if you like.
|
||||
|
||||
A typical layout of your Git repository for this might be:
|
||||
|
||||
```
|
||||
├── Chart.yaml
|
||||
├── templates
|
||||
│ ├── guestbook.yaml
|
||||
│ ├── helm-dependency.yaml
|
||||
│ ├── helm-guestbook.yaml
|
||||
│ └── kustomize-guestbook.yaml
|
||||
└── values.yaml
|
||||
```
|
||||
|
||||
`Chart.yaml` is boiler-plate.
|
||||
|
||||
`templates` contains one file for each application, roughly:
|
||||
|
||||
```yaml
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: guestbook
|
||||
namespace: argocd
|
||||
finalizers:
|
||||
- resources-finalizer.argocd.argoproj.io
|
||||
spec:
|
||||
destination:
|
||||
namespace: argocd
|
||||
server: {{ .Values.spec.destination.server }}
|
||||
project: default
|
||||
source:
|
||||
path: guestbook
|
||||
repoURL: https://github.com/argoproj/argocd-example-apps
|
||||
targetRevision: {{ .Values.spec.source.targetRevision }}
|
||||
syncPolicy:
|
||||
automated:
|
||||
prune: true
|
||||
```
|
||||
|
||||
In this example, I've set the sync policy to automated + prune, so that applications are automatically created, synced, and deleted when the manifest is changed, but you may wish to disable this. I've also added the finalizer, which will ensure that you applications are deleted correctly.
|
||||
|
||||
As you probably want to override the cluster server and maybe the revision, these are templated values.
|
||||
|
||||
`values.yaml` contains the default values:
|
||||
|
||||
```yaml
|
||||
spec:
|
||||
destination:
|
||||
server: https://kubernetes.default.svc
|
||||
source:
|
||||
targetRevision: HEAD
|
||||
```
|
||||
|
||||
Finally, you need to create your application, e.g.:
|
||||
|
||||
```bash
|
||||
argocd app create applications \
|
||||
--dest-namespace argocd \
|
||||
--dest-server https://kubernetes.default.svc \
|
||||
--repo https://github.com/argoproj/argocd-example-apps.git \
|
||||
--path applications \
|
||||
--sync-policy automated
|
||||
```
|
||||
|
||||
In this example, I excluded auto-prune, as this would result in all applications being deleted if some accidentally deleted the *application of applications*.
|
||||
|
||||
View [the example on Github](https://github.com/argoproj/argocd-example-apps/tree/master/applications).
|
||||
@@ -48,10 +48,12 @@ metadata:
|
||||
- resources-finalizer.argocd.argoproj.io
|
||||
```
|
||||
|
||||
### App of Apps of Apps
|
||||
### Application of Applications
|
||||
|
||||
You can create an application that creates other applications, which in turn can create other applications.
|
||||
This allows you to declaratively manage a group of applications that can be deployed and configured in concert.
|
||||
This allows you to declaratively manage a group of applications that can be deployed and configured in concert.
|
||||
|
||||
See [cluster bootstrapping](cluster-bootstrapping.md).
|
||||
|
||||
## Projects
|
||||
The AppProject CRD is the Kubernetes resource object representing a logical grouping of applications.
|
||||
@@ -150,6 +152,88 @@ data:
|
||||
key: sshPrivateKey
|
||||
```
|
||||
|
||||
!!! tip
|
||||
The Kubernetes documentation has [instructions for creating a secret containing a private key](https://kubernetes.io/docs/concepts/configuration/secret/#use-case-pod-with-ssh-keys).
|
||||
|
||||
|
||||
### Repository Credentials (v1.1+)
|
||||
|
||||
If you want to use the same credentials for multiple repositories, you can use `repository.credentials`:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: argocd-cm
|
||||
data:
|
||||
repositories: |
|
||||
- url: https://github.com/argoproj/private-repo
|
||||
- url: https://github.com/argoproj/other-private-repo
|
||||
repository.credentials: |
|
||||
- url: https://github.com/argoproj
|
||||
passwordSecret:
|
||||
name: my-secret
|
||||
key: password
|
||||
usernameSecret:
|
||||
name: my-secret
|
||||
key: username
|
||||
```
|
||||
|
||||
Argo CD will only use the credentials if you omit `usernameSecret`, `passwordSecret`, and `sshPrivateKeySecret` fields (`insecureIgnoreHostKey` is ignored).
|
||||
|
||||
A credential may be match if it's URL is the prefix of the repository's URL. The means that credentials may match, e.g in the above example both [https://github.com/argoproj](https://github.com/argoproj) and [https://github.com](https://github.com) would match. Argo CD selects the first one that matches.
|
||||
|
||||
!!! tip
|
||||
Order your credentials with the most specific at the top and the least specific at the bottom.
|
||||
|
||||
A complete example.
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: argocd-cm
|
||||
data:
|
||||
repositories: |
|
||||
# this has it's own credentials
|
||||
- url: https://github.com/argoproj/private-repo
|
||||
passwordSecret:
|
||||
name: private-repo-secret
|
||||
key: password
|
||||
usernameSecret:
|
||||
name: private-repo-secret
|
||||
key: username
|
||||
sshPrivateKeySecret:
|
||||
name: private-repo-secret
|
||||
key: sshPrivateKey
|
||||
- url: https://github.com/argoproj/other-private-repo
|
||||
- url: https://github.com/otherproj/another-private-repo
|
||||
repository.credentials: |
|
||||
# this will be used for the second repo
|
||||
- url: https://github.com/argoproj
|
||||
passwordSecret:
|
||||
name: other-private-repo-secret
|
||||
key: password
|
||||
usernameSecret:
|
||||
name: other-private-repo-secret
|
||||
key: username
|
||||
sshPrivateKeySecret:
|
||||
name: other-private-repo-secret
|
||||
key: sshPrivateKey
|
||||
# this will be used for the third repo
|
||||
- url: https://github.com
|
||||
passwordSecret:
|
||||
name: another-private-repo-secret
|
||||
key: password
|
||||
usernameSecret:
|
||||
name: another-private-repo-secret
|
||||
key: username
|
||||
sshPrivateKeySecret:
|
||||
name: another-private-repo-secret
|
||||
key: sshPrivateKey
|
||||
```
|
||||
|
||||
|
||||
## Clusters
|
||||
|
||||
Cluster credentials are stored in secrets same as repository credentials but does not require entry in `argocd-cm` config map. Each secret must have label
|
||||
@@ -305,7 +389,7 @@ Example of `kustomization.yaml`:
|
||||
|
||||
```yaml
|
||||
bases:
|
||||
- github.com/argoproj/argo-cd//manifests/cluster-install?ref=v0.10.6
|
||||
- github.com/argoproj/argo-cd//manifests/cluster-install?ref=v1.0.1
|
||||
|
||||
# additional resources like ingress rules, cluster and repository secrets.
|
||||
resources:
|
||||
|
||||
25
docs/operator-manual/disaster_recovery.md
Normal file
@@ -0,0 +1,25 @@
|
||||
# Disaster Recovery
|
||||
|
||||
You can use `argocd-util` can be used to import and export all Argo CD data.
|
||||
|
||||
Make sure you have `~/.kube/config` pointing to your Argo CD cluster.
|
||||
|
||||
Figure out what version of Argo CD you're running:
|
||||
|
||||
```bash
|
||||
argocd version | grep server
|
||||
# ...
|
||||
export VERSION=v1.0.1
|
||||
```
|
||||
|
||||
Export to a backup:
|
||||
|
||||
```bash
|
||||
docker run -v ~/.kube:/home/argocd/.kube --rm argoproj/argocd:$VERSION argocd-util export > backup.yaml
|
||||
```
|
||||
|
||||
Import from a backup:
|
||||
|
||||
```bash
|
||||
docker run -v ~/.kube:/home/argocd/.kube --rm argoproj/argocd:$VERSION argocd-util import - < backup.yaml
|
||||
```
|
||||
19
docs/operator-manual/high_availability.md
Normal file
@@ -0,0 +1,19 @@
|
||||
# High Availability
|
||||
|
||||
Argo CD is largely stateless, all data is persisted as Kubernetes objects, which in turn is stored in Kubernetes' etcd. Redis is only used as a throw-away cache and can be lost. When lost, it will be rebuilt without loss of service.
|
||||
|
||||
A set HA of manifests are provided for users who wish to run Argo CD in a highly available manner. This runs more containers, and run Redis in HA mode.
|
||||
|
||||
[Manifests ⧉](https://github.com/argoproj/argo-cd/tree/master/manifests)
|
||||
|
||||
!!! note
|
||||
The HA installation will require at least three different nodes due to pod anti-affinity roles in the specs.
|
||||
|
||||
## Scaling Up
|
||||
|
||||
You might scale up some Argo CD services in the following circumstances:
|
||||
|
||||
* The `argocd-repo-server` can scale up when there is too much contention on a single git repo (e.g. many apps defined in a single git repo).
|
||||
* The `argocd-server` can scale up to support more front-end load.
|
||||
|
||||
All other services should run with their pre-determined number of replicas. The `argocd-application-controller` must not be increased because multiple controllers will fight. The `argocd-dex-server` uses an in-memory database, and two or more instances would have inconsistent data. `argocd-redis` is pre-configured with the understanding of only three total redis servers/sentinels.
|
||||
46
docs/user-guide/app_deletion.md
Normal file
@@ -0,0 +1,46 @@
|
||||
# App Deletion
|
||||
|
||||
Apps can be deleted with or without a cascade option. A **cascade delete**, deletes the both app's and its resources, rather than only the app.
|
||||
|
||||
## Deletion Using `argocd`
|
||||
|
||||
To perform an non-cascade delete:
|
||||
|
||||
```bash
|
||||
argocd app delete APPNAME
|
||||
```
|
||||
|
||||
To perform a cascade delete:
|
||||
|
||||
```bash
|
||||
argocd app delete APPNAME --cascade
|
||||
```
|
||||
|
||||
# Deletion Using `kubectl`
|
||||
|
||||
To perform a non-cascade delete:
|
||||
|
||||
```bash
|
||||
kubetctl delete app APPNAME
|
||||
```
|
||||
|
||||
To perform a cascade delete set the finalizer, e.g. using `kubctl patch`:
|
||||
|
||||
```bash
|
||||
kubectl patch app APPNAME -p '{"metadata": {"finalizers": ["resources-finalizer.argocd.argoproj.io"]}}' --type merge
|
||||
kubectl delete app APPNAME
|
||||
```
|
||||
|
||||
# About The Deletion Finalizer
|
||||
|
||||
For the technical amongst you, the Argo CD application controller watches for this finalizer:
|
||||
|
||||
```yaml
|
||||
metadata:
|
||||
finalizers:
|
||||
- resources-finalizer.argocd.argoproj.io
|
||||
```
|
||||
|
||||
Argo CD's app controller watches for this and will then delete both the app and its resources.
|
||||
|
||||
When you invoke `argocd app delete` with `--cascade`, the finalizer is added automatically.
|
||||
@@ -1,144 +1,21 @@
|
||||
# Application Source Types
|
||||
# Tools
|
||||
|
||||
Argo CD supports several different ways in which kubernetes manifests can be defined:
|
||||
## Production
|
||||
|
||||
* **[Ksonnet](https://ksonnet.io)** applications
|
||||
* **[Kustomize](https://kustomize.io)** applications
|
||||
* **[Helm](https://helm.sh)** charts
|
||||
* **Directory** of YAML/json/jsonnet manifests
|
||||
* Any custom config management tool configured as a config management plugin
|
||||
Argo CD supports several different ways in which Kubernetes manifests can be defined:
|
||||
|
||||
Some additional considerations should be made when deploying apps of a particular type:
|
||||
* [Kustomize](kustomize.md) applications
|
||||
* [Helm](helm.md) charts
|
||||
* [Ksonnet](ksonnet.md) applications
|
||||
* A directory of YAML/JSO/Jsonnet manifests
|
||||
* Any [custom config management tool](config-management-plugins.md) configured as a config management plugin
|
||||
|
||||
## Kustomize
|
||||
|
||||
Ops. We haven't got around to writing this part yet.
|
||||
|
||||
## Ksonnet
|
||||
|
||||
### Environments
|
||||
Ksonnet has a first class concept of an "environment." To create an application from a ksonnet
|
||||
app directory, an environment must be specified. For example, the following command creates the
|
||||
"guestbook-default" app, which points to the `default` environment:
|
||||
## Development
|
||||
Argo CD also supports uploading local manifests directly. Since this is an anti-pattern of the
|
||||
GitOps paradigm, this should only be done for development purposes. A user with an `override` permission is required
|
||||
to upload manifests locally (typically an admin). All of the different Kubernetes deployment tools above are supported.
|
||||
To upload a local application:
|
||||
|
||||
```bash
|
||||
argocd app create guestbook-default --repo https://github.com/argoproj/argocd-example-apps.git --path guestbook --env default
|
||||
$ argocd app sync APPNAME --local /path/to/dir/
|
||||
```
|
||||
|
||||
### Parameters
|
||||
Ksonnet parameters all belong to a component. For example, the following are the parameters
|
||||
available in the guestbook app, all of which belong to the `guestbook-ui` component:
|
||||
|
||||
```bash
|
||||
$ ks param list
|
||||
COMPONENT PARAM VALUE
|
||||
========= ===== =====
|
||||
guestbook-ui containerPort 80
|
||||
guestbook-ui image "gcr.io/heptio-images/ks-guestbook-demo:0.1"
|
||||
guestbook-ui name "guestbook-ui"
|
||||
guestbook-ui replicas 1
|
||||
guestbook-ui servicePort 80
|
||||
guestbook-ui type "LoadBalancer"
|
||||
```
|
||||
|
||||
When overriding ksonnet parameters in Argo CD, the component name should also be specified in the
|
||||
`argocd app set` command, in the form of `-p COMPONENT=PARAM=VALUE`. For example:
|
||||
|
||||
```bash
|
||||
argocd app set guestbook-default -p guestbook-ui=image=gcr.io/heptio-images/ks-guestbook-demo:0.1
|
||||
```
|
||||
|
||||
## Helm
|
||||
|
||||
### Values Files
|
||||
|
||||
Helm has the ability to use a different, or even multiple "values.yaml" files to derive its
|
||||
parameters from. Alternate or multiple values file(s), can be specified using the `--values`
|
||||
flag. The flag can be repeated to support multiple values files:
|
||||
|
||||
```bash
|
||||
argocd app set helm-guestbook --values values-production.yaml
|
||||
```
|
||||
|
||||
### Helm Parameters
|
||||
|
||||
Helm has the ability to set parameter values, which override any values in
|
||||
a `values.yaml`. For example, `service.type` is a common parameter which is exposed in a Helm chart:
|
||||
|
||||
```bash
|
||||
helm template . --set service.type=LoadBalancer
|
||||
```
|
||||
|
||||
Similarly Argo CD can override values in the `values.yaml` parameters using `argo app set` command,
|
||||
in the form of `-p PARAM=VALUE`. For example:
|
||||
|
||||
```bash
|
||||
argocd app set helm-guestbook -p service.type=LoadBalancer
|
||||
```
|
||||
|
||||
### Helm Hooks
|
||||
|
||||
Helm hooks are equivalent in concept to [Argo CD resource hooks](resource_hooks.md). In helm, a hook
|
||||
is any normal kubernetes resource annotated with the `helm.sh/hook` annotation. When Argo CD deploys
|
||||
helm application which contains helm hooks, all helm hook resources are currently ignored during
|
||||
the `kubectl apply` of the manifests. There is an
|
||||
[open issue](https://github.com/argoproj/argo-cd/issues/355) to map Helm hooks to Argo CD's concept
|
||||
of Pre/Post/Sync hooks.
|
||||
|
||||
### Random Data
|
||||
|
||||
Helm templating has the ability to generate random data during chart rendering via the
|
||||
`randAlphaNum` function. Many helm charts from the [charts repository](https://github.com/helm/charts)
|
||||
make use of this feature. For example, the following is the secret for the
|
||||
[redis helm chart](https://github.com/helm/charts/blob/master/stable/redis/templates/secret.yaml):
|
||||
|
||||
```yaml
|
||||
data:
|
||||
{{- if .Values.password }}
|
||||
redis-password: {{ .Values.password | b64enc | quote }}
|
||||
{{- else }}
|
||||
redis-password: {{ randAlphaNum 10 | b64enc | quote }}
|
||||
{{- end }}
|
||||
```
|
||||
|
||||
The Argo CD application controller periodically compares Git state against the live state, running
|
||||
the `helm template <CHART>` command to generate the helm manifests. Because the random value is
|
||||
regenerated every time the comparison is made, any application which makes use of the `randAlphaNum`
|
||||
function will always be in an `OutOfSync` state. This can be mitigated by explicitly setting a
|
||||
value, in the values.yaml such that the value is stable between each comparison. For example:
|
||||
|
||||
```bash
|
||||
argocd app set redis -p password=abc123
|
||||
```
|
||||
|
||||
## Config Management Plugins
|
||||
|
||||
Argo CD allows integrating more config management tools using config management plugins. Following changes are required to configure new plugin:
|
||||
|
||||
* Make sure required binaries are available in `argocd-repo-server` pod. The binaries can be added via volume mounts or using custom image (see [custom_tools](../operator-manual/custom_tools.md)).
|
||||
* Register a new plugin in `argocd-cm` ConfigMap:
|
||||
|
||||
```yaml
|
||||
data:
|
||||
configManagementPlugins: |
|
||||
- name: pluginName
|
||||
init: # Optional command to initialize application source directory
|
||||
command: ["sample command"]
|
||||
args: ["sample args"]
|
||||
generate: # Command to generate manifests YAML
|
||||
command: ["sample command"]
|
||||
args: ["sample args"]
|
||||
```
|
||||
|
||||
The `generate` command must print a valid YAML stream to stdout. Both `init` and `generate` commands are executed inside the application source directory.
|
||||
Commands have access to system environment variables and following additional variables:
|
||||
|
||||
`ARGOCD_APP_NAME` - name of application; `ARGOCD_APP_NAMESPACE` - destination application namespace
|
||||
|
||||
* Create an application and specify required config management plugin name.
|
||||
|
||||
```bash
|
||||
argocd app create <appName> --config-management-plugin <pluginName>
|
||||
```
|
||||
|
||||
More config management plugin examples are available in [argocd-example-apps](https://github.com/argoproj/argocd-example-apps/tree/master/plugins).
|
||||
|
||||
@@ -24,7 +24,7 @@ from your application source code, is highly recommended for the following reaso
|
||||
unintentionally. By having separate repos, commit access can be given to the source code repo,
|
||||
and not the application config repo.
|
||||
|
||||
5. If you are automating your CI pipeline, pushing manifest changes to the same Cit repository can
|
||||
5. If you are automating your CI pipeline, pushing manifest changes to the same Git repository can
|
||||
trigger an infinite loop of build jobs and Git commit triggers. Having a separate repo to push
|
||||
config changes to, prevents this from happening.
|
||||
|
||||
|
||||
34
docs/user-guide/compare-options.md
Normal file
@@ -0,0 +1,34 @@
|
||||
# Compare Options
|
||||
|
||||
## Ignoring Resources That Are Extraneous
|
||||
|
||||
You may wish to exclude resources from the app's overall sync status under certain circumstances. E.g. if they are generated by a tool. This can be done by adding this annotation:
|
||||
|
||||
```yaml
|
||||
metadata:
|
||||
annotations:
|
||||
argocd.argoproj.io/compare-options: IgnoreExtraneous
|
||||
```
|
||||
|
||||

|
||||
|
||||
!!! note
|
||||
This only affects the sync status. If the resource's health is degraded, then the app will also be degraded.
|
||||
|
||||
Kustomize has a feature that allows you to generate config maps ([read more ⧉](https://github.com/kubernetes-sigs/kustomize/blob/master/examples/configGeneration.md)). You can set `generatorOptions` to add this annotation so that your app remains in sync:
|
||||
|
||||
```yaml
|
||||
configMapGenerator:
|
||||
- name: my-map
|
||||
literals:
|
||||
- foo=bar
|
||||
generatorOptions:
|
||||
annotations:
|
||||
argocd.argoproj.io/compare-options: IgnoreExtraneous
|
||||
kind: Kustomization
|
||||
```
|
||||
|
||||
!!! note
|
||||
`generatorOptions` adds annotations to both config maps and secrets ([read more ⧉](https://github.com/kubernetes-sigs/kustomize/blob/master/examples/generatorOptions.md)).
|
||||
|
||||
You may wish to combine this with the [`Prune=false` sync option](sync-options.md).
|
||||
31
docs/user-guide/config-management-plugins.md
Normal file
@@ -0,0 +1,31 @@
|
||||
# Plugins
|
||||
|
||||
Argo CD allows integrating more config management tools using config management plugins. Following changes are required to configure new plugin:
|
||||
|
||||
* Make sure required binaries are available in `argocd-repo-server` pod. The binaries can be added via volume mounts or using custom image (see [custom_tools](../operator-manual/custom_tools.md)).
|
||||
* Register a new plugin in `argocd-cm` ConfigMap:
|
||||
|
||||
```yaml
|
||||
data:
|
||||
configManagementPlugins: |
|
||||
- name: pluginName
|
||||
init: # Optional command to initialize application source directory
|
||||
command: ["sample command"]
|
||||
args: ["sample args"]
|
||||
generate: # Command to generate manifests YAML
|
||||
command: ["sample command"]
|
||||
args: ["sample args"]
|
||||
```
|
||||
|
||||
The `generate` command must print a valid YAML stream to stdout. Both `init` and `generate` commands are executed inside the application source directory.
|
||||
Commands have access to system environment variables and following additional variables:
|
||||
|
||||
`ARGOCD_APP_NAME` - name of application; `ARGOCD_APP_NAMESPACE` - destination application namespace
|
||||
|
||||
* Create an application and specify required config management plugin name.
|
||||
|
||||
```bash
|
||||
argocd app create <appName> --config-management-plugin <pluginName>
|
||||
```
|
||||
|
||||
More config management plugin examples are available in [argocd-example-apps](https://github.com/argoproj/argocd-example-apps/tree/master/plugins).
|
||||
89
docs/user-guide/helm.md
Normal file
@@ -0,0 +1,89 @@
|
||||
# Helm
|
||||
|
||||
## Values Files
|
||||
|
||||
Helm has the ability to use a different, or even multiple "values.yaml" files to derive its
|
||||
parameters from. Alternate or multiple values file(s), can be specified using the `--values`
|
||||
flag. The flag can be repeated to support multiple values files:
|
||||
|
||||
```bash
|
||||
argocd app set helm-guestbook --values values-production.yaml
|
||||
```
|
||||
|
||||
## Helm Parameters
|
||||
|
||||
Helm has the ability to set parameter values, which override any values in
|
||||
a `values.yaml`. For example, `service.type` is a common parameter which is exposed in a Helm chart:
|
||||
|
||||
```bash
|
||||
helm template . --set service.type=LoadBalancer
|
||||
```
|
||||
|
||||
Similarly Argo CD can override values in the `values.yaml` parameters using `argo app set` command,
|
||||
in the form of `-p PARAM=VALUE`. For example:
|
||||
|
||||
```bash
|
||||
argocd app set helm-guestbook -p service.type=LoadBalancer
|
||||
```
|
||||
|
||||
## Helm Release Name
|
||||
|
||||
By default the Helm release name is equal to the Application name to which it belongs. Sometimes, especially on a centralised ArgoCD,
|
||||
you may want to override that name, and it is possible with the `release-name` flag on the cli:
|
||||
|
||||
```bash
|
||||
argocd app set helm-guestbook --release-name myRelease
|
||||
```
|
||||
|
||||
or using the releaseName for yaml:
|
||||
|
||||
```yaml
|
||||
source:
|
||||
helm:
|
||||
releaseName: myRelease
|
||||
```
|
||||
|
||||
```diff
|
||||
- Important notice on overriding the release name
|
||||
```
|
||||
Please not that overriding the Helm release name might cause problems when the chart you are deploying is using the `app.kubernetes.io/instance` label.
|
||||
ArgoCD injects this label with the value of the Application name for tracking purposes. So when overriding the release name, the Application name will
|
||||
stop being equal to the release name. Because ArgoCD will overwrite the label with the Application name it might cause some selectors on the resources
|
||||
to stop working. In order to avoid this we can configure ArgoCD to use another label for tracking in the [ArgoCD configmap argocd-cm.yaml](./../operator-manual/argocd-cm.yaml) -
|
||||
check the lines describing `application.instanceLabelKey`
|
||||
|
||||
|
||||
## Helm Hooks
|
||||
|
||||
Helm hooks are equivalent in concept to [Argo CD resource hooks](resource_hooks.md). In helm, a hook
|
||||
is any normal kubernetes resource annotated with the `helm.sh/hook` annotation. When Argo CD deploys
|
||||
helm application which contains helm hooks, all helm hook resources are currently ignored during
|
||||
the `kubectl apply` of the manifests. There is an
|
||||
[open issue](https://github.com/argoproj/argo-cd/issues/355) to map Helm hooks to Argo CD's concept
|
||||
of Pre/Post/Sync hooks.
|
||||
|
||||
## Random Data
|
||||
|
||||
Helm templating has the ability to generate random data during chart rendering via the
|
||||
`randAlphaNum` function. Many helm charts from the [charts repository](https://github.com/helm/charts)
|
||||
make use of this feature. For example, the following is the secret for the
|
||||
[redis helm chart](https://github.com/helm/charts/blob/master/stable/redis/templates/secret.yaml):
|
||||
|
||||
```yaml
|
||||
data:
|
||||
{{- if .Values.password }}
|
||||
redis-password: {{ .Values.password | b64enc | quote }}
|
||||
{{- else }}
|
||||
redis-password: {{ randAlphaNum 10 | b64enc | quote }}
|
||||
{{- end }}
|
||||
```
|
||||
|
||||
The Argo CD application controller periodically compares Git state against the live state, running
|
||||
the `helm template <CHART>` command to generate the helm manifests. Because the random value is
|
||||
regenerated every time the comparison is made, any application which makes use of the `randAlphaNum`
|
||||
function will always be in an `OutOfSync` state. This can be mitigated by explicitly setting a
|
||||
value, in the values.yaml such that the value is stable between each comparison. For example:
|
||||
|
||||
```bash
|
||||
argocd app set redis -p password=abc123
|
||||
```
|
||||
35
docs/user-guide/ksonnet.md
Normal file
@@ -0,0 +1,35 @@
|
||||
# Ksonnet
|
||||
|
||||
## Environments
|
||||
Ksonnet has a first class concept of an "environment." To create an application from a ksonnet
|
||||
app directory, an environment must be specified. For example, the following command creates the
|
||||
"guestbook-default" app, which points to the `default` environment:
|
||||
|
||||
```bash
|
||||
argocd app create guestbook-default --repo https://github.com/argoproj/argocd-example-apps.git --path guestbook --env default
|
||||
```
|
||||
|
||||
## Parameters
|
||||
Ksonnet parameters all belong to a component. For example, the following are the parameters
|
||||
available in the guestbook app, all of which belong to the `guestbook-ui` component:
|
||||
|
||||
```bash
|
||||
$ ks param list
|
||||
COMPONENT PARAM VALUE
|
||||
========= ===== =====
|
||||
guestbook-ui containerPort 80
|
||||
guestbook-ui image "gcr.io/heptio-images/ks-guestbook-demo:0.1"
|
||||
guestbook-ui name "guestbook-ui"
|
||||
guestbook-ui replicas 1
|
||||
guestbook-ui servicePort 80
|
||||
guestbook-ui type "LoadBalancer"
|
||||
```
|
||||
|
||||
When overriding ksonnet parameters in Argo CD, the component name should also be specified in the
|
||||
`argocd app set` command, in the form of `-p COMPONENT=PARAM=VALUE`. For example:
|
||||
|
||||
```bash
|
||||
argocd app set guestbook-default -p guestbook-ui=image=gcr.io/heptio-images/ks-guestbook-demo:0.1
|
||||
```
|
||||
|
||||
|
||||
24
docs/user-guide/kustomize.md
Normal file
@@ -0,0 +1,24 @@
|
||||
# Kustomize
|
||||
|
||||
!!! warning Kustomize 1 vs 2
|
||||
Argo CD supports both versions, and auto-detects then by looking for `apiVersion/kind` is `kustomize.yaml`.
|
||||
You're probably using version 2 now, so make sure you you have those fields.
|
||||
|
||||
You have three configuration options for Kustomize:
|
||||
|
||||
* `namePrefix` is a prefix appended to resources for Kustomize apps
|
||||
* `imageTags` is a list of Kustomize 1.0 image tag overrides
|
||||
* `images` is a list of Kustomize 2.0 image overrides
|
||||
|
||||
To use Kustomize with an overlay, point your path to the overlay.
|
||||
|
||||
!!! tip
|
||||
If you're generating resources, you should read up how to ignore those generated resources using the [`IgnoreExtraneous` compare option](compare-options.md).
|
||||
|
||||
## Private Remote Bases
|
||||
|
||||
If you have remote bases that are either (a) HTTPS and need username/password (b) SSH and need SSH private key, then they'll inherit that from the app's repo.
|
||||
|
||||
This will work if the remote bases uses the same credentials/private key. It will not work if they use different ones. For security reasons your app only ever knows about it's own repo (not other team's or users repos), and so you won't be able to access other private repo, even if Argo CD knows about them.
|
||||
|
||||
Read more about [private repos](private-repositories.md).
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
If application manifests are located in private repository then repository credentials have to be configured. Argo CD supports both HTTP and SSH Git credentials.
|
||||
|
||||
### HTTP Username And Password Credential
|
||||
### HTTPS Username And Password Credential
|
||||
|
||||
Private repositories that require a username and password typically have a URL that start with "https://" rather than "git@" or "ssh://".
|
||||
|
||||
@@ -41,30 +41,34 @@ The Argo CD UI don't support configuring SSH credentials. The SSH credentials ca
|
||||
argocd repo add git@github.com:argoproj/argocd-example-apps.git --ssh-private-key-path ~/.ssh/id_rsa
|
||||
```
|
||||
|
||||
## Self-Signed Certificates
|
||||
## Self-signed & Untrusted TLS Certificates
|
||||
|
||||
If you are using self-hosted Git hosting service with the self-signed certificate then you need to disable certificate validation for that Git host.
|
||||
Following options are available:
|
||||
We do not currently have first-class support for this. See [#1513](https://github.com/argoproj/argo-cd/issues/1513).
|
||||
|
||||
Add repository using Argo CD CLI and `--insecure-ignore-host-key` flag:
|
||||
As a work-around, you can customize your Argo CD image. See [#1344](https://github.com/argoproj/argo-cd/issues/1344#issuecomment-479811810)
|
||||
|
||||
## Unknown SSH Hosts
|
||||
|
||||
```bash
|
||||
argocd repo add git@github.com:argoproj/argocd-example-apps.git --ssh-private-key-path ~/.ssh/id_rsa
|
||||
```
|
||||
If you are using a privately hosted Git service over SSH, then you have the following options:
|
||||
|
||||
The flag disables certificate validation only for specified repository.
|
||||
|
||||
!!! warning
|
||||
The `--insecure-ignore-host-key` flag does not work for HTTPS Git URLs. See [#1513](https://github.com/argoproj/argo-cd/issues/1513).
|
||||
|
||||
You can add Git service hostname to the `/etc/ssh/ssh_known_hosts` in each Argo CD deployment and disables cert validation for Git SSL URLs. For more information see
|
||||
[example](https://github.com/argoproj/argo-cd/tree/master/examples/known-hosts) which demonstrates how `/etc/ssh/ssh_known_hosts` can be customized.
|
||||
(1) You can customize the Argo CD Docker image by adding the host's SSH public key to `/etc/ssh/ssh_known_hosts`. Additional entries to this file can be generated using the `ssh-keyscan` utility (e.g. `ssh-keyscan your-private-git-server.com`. For more information see [example](https://github.com/argoproj/argo-cd/tree/master/examples/known-hosts) which demonstrates how `/etc/ssh/ssh_known_hosts` can be customized.
|
||||
|
||||
!!! note
|
||||
The `/etc/ssh/ssh_known_hosts` should include Git host on each Argo CD deployment as well as on a computer where `argocd repo add` is executed. After resolving issue
|
||||
[#1514](https://github.com/argoproj/argo-cd/issues/1514) only `argocd-repo-server` deployment has to be customized.
|
||||
|
||||
(1) Add repository using Argo CD CLI and `--insecure-ignore-host-key` flag:
|
||||
|
||||
```bash
|
||||
argocd repo add git@github.com:argoproj/argocd-example-apps.git --ssh-private-key-path ~/.ssh/id_rsa --insecure-ignore-host-key
|
||||
```
|
||||
|
||||
!!! warning "Don't use in production"
|
||||
The `--insecure-ignore-host-key` should not be used in production as this is subject to man-in-the-middle attacks.
|
||||
|
||||
!!! warning "This does not work for Kustomize remote bases or custom plugins"
|
||||
For Kustomize support, see [#827](https://github.com/argoproj/argo-cd/issues/827).
|
||||
|
||||
## Declarative Configuration
|
||||
|
||||
See [declarative setup](../operator-manual/declarative-setup#Repositories)
|
||||
|
||||
@@ -1,5 +1,8 @@
|
||||
# Resource Hooks
|
||||
|
||||
!!! warning
|
||||
Helm hooks are currently ignored. [Read more](helm.md).
|
||||
|
||||
## Overview
|
||||
|
||||
Synchronization can be configured using resource hooks. Hooks are ways to interject custom logic before, during,
|
||||
@@ -40,6 +43,10 @@ The following hooks are defined:
|
||||
| `PostSync` | Executes after all `Sync` hooks completed and were successful, a succcessful apply, and all resources in a `Healthy` state. |
|
||||
|
||||
|
||||
## Selective Sync
|
||||
|
||||
Hooks are run during [selective sync](selective_sync.md).
|
||||
|
||||
## Hook Deletion Policies
|
||||
|
||||
Hooks can be deleted in an automatic fashion using the annotation: `argocd.argoproj.io/hook-delete-policy`.
|
||||
|
||||
9
docs/user-guide/selective_sync.md
Normal file
@@ -0,0 +1,9 @@
|
||||
# Selective Sync
|
||||
|
||||
A *selective sync* is one where only some resources are sync'd. You can choose which resources from the UI:
|
||||
|
||||

|
||||
|
||||
When doing so, bear in mind:
|
||||
|
||||
* Your sync is not recorded in the history, and so rollback is not possible.
|
||||
24
docs/user-guide/sync-options.md
Normal file
@@ -0,0 +1,24 @@
|
||||
# Sync Options
|
||||
|
||||
## No Prune Resources
|
||||
|
||||
You may wish to prevent an object from being pruned:
|
||||
|
||||
```yaml
|
||||
metadata:
|
||||
annotations:
|
||||
argocd.argoproj.io/sync-options: Prune=false
|
||||
```
|
||||
|
||||
In the UI, the pod will simply appear as out-of-sync:
|
||||
|
||||

|
||||
|
||||
|
||||
The sync-status panel shows that pruning was skipped, and why:
|
||||
|
||||

|
||||
|
||||
!!! note
|
||||
The app will be out of sync if Argo CD expects a resource to be pruned. You may wish to use this along with [compare options](compare-options.md).
|
||||
|
||||
48
docs/user-guide/sync-waves.md
Normal file
@@ -0,0 +1,48 @@
|
||||
# Sync Phases and Waves
|
||||
|
||||
<iframe width="560" height="315" src="https://www.youtube.com/embed/zIHe3EVp528" frameborder="0" allow="accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
|
||||
|
||||
Argo CD executes a sync operation in a number of steps. At a high-level, there are three phases *pre-sync*, *sync* and *post-sync*.
|
||||
|
||||
Within each phase you can have one or more waves, than allows you to ensure certain resources are healthy before subsequent resources are synced.
|
||||
|
||||
## How Do I Configure Phases?
|
||||
|
||||
Pre-sync and post-sync can only contain hooks. Apply the hook annotation:
|
||||
|
||||
```yaml
|
||||
metadata:
|
||||
annotations:
|
||||
argocd.argoproj.io/hook: PreSync
|
||||
```
|
||||
|
||||
[Read more about hooks](resource_hooks.md).
|
||||
|
||||
## How Do I Configure Waves?
|
||||
|
||||
Specify the wave using the following annotation:
|
||||
|
||||
```yaml
|
||||
metadata:
|
||||
annotations:
|
||||
argocd.argoproj.io/sync-wave: "5"
|
||||
```
|
||||
|
||||
Hooks and resources are assigned to wave zero by default. The wave can be negative, so you can create a wave that runs before all other resources.
|
||||
|
||||
## How Does It Work?
|
||||
|
||||
When Argo CD starts a sync, it orders the resources in the following precedence:
|
||||
|
||||
* The phase
|
||||
* The wave they are in (lower values first)
|
||||
* By kind (e.g. namespaces first)
|
||||
* By name
|
||||
|
||||
It then determines which the number of the next wave to apply. This is the first number where any resource is out-of-sync or unhealthy.
|
||||
|
||||
It applies resources in that wave.
|
||||
|
||||
It repeats this process until all phases and waves are in in-sync and healthy.
|
||||
|
||||
Because an application can have resources that are unhealthy in the first wave, it may be that the app can never get to healthy.
|
||||
@@ -10,3 +10,7 @@ func CheckError(err error) {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func FailOnErr(_ interface{}, err error) {
|
||||
CheckError(err)
|
||||
}
|
||||
|
||||
@@ -10,3 +10,10 @@ for Git repositories connected using SSL urls:
|
||||
!!! note
|
||||
The `/etc/ssh/ssh_known_hosts` should include Git host on each Argo CD deployment as well as on a computer where `argocd repo add` is executed. After resolving issue
|
||||
[#1514](https://github.com/argoproj/argo-cd/issues/1514) only `argocd-repo-server` deployment has to be customized.
|
||||
|
||||
For the known_hosts file to work with custom repository port you have to obtain the public key using `ssh-keyscan` and hash the file before adding it to configmap, i.e.:
|
||||
```
|
||||
ssh-keyscan -p 1234 git.repo.com > known_hosts
|
||||
ssh-keygen -Hf known_hosts
|
||||
cat known_hosts
|
||||
```
|
||||
|
||||
@@ -9,6 +9,11 @@ set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
# output tool versions
|
||||
protoc --version
|
||||
swagger version
|
||||
jq --version
|
||||
|
||||
PROJECT_ROOT=$(cd $(dirname ${BASH_SOURCE})/..; pwd)
|
||||
CODEGEN_PKG=${CODEGEN_PKG:-$(cd ${PROJECT_ROOT}; ls -d -1 ./vendor/k8s.io/code-generator 2>/dev/null || echo ../code-generator)}
|
||||
PATH="${PROJECT_ROOT}/dist:${PATH}"
|
||||
@@ -58,7 +63,7 @@ go build -i -o dist/protoc-gen-grpc-gateway ./vendor/github.com/grpc-ecosystem/g
|
||||
go build -i -o dist/protoc-gen-swagger ./vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger
|
||||
|
||||
# Generate server/<service>/(<service>.pb.go|<service>.pb.gw.go)
|
||||
PROTO_FILES=$(find $PROJECT_ROOT \( -name "*.proto" -and -path '*/server/*' -or -path '*/reposerver/*' -and -name "*.proto" \))
|
||||
PROTO_FILES=$(find $PROJECT_ROOT \( -name "*.proto" -and -path '*/server/*' -or -path '*/reposerver/*' -and -name "*.proto" \) | sort)
|
||||
for i in ${PROTO_FILES}; do
|
||||
# Path to the google API gateway annotations.proto will be different depending if we are
|
||||
# building natively (e.g. from workspace) vs. part of a docker build.
|
||||
@@ -116,7 +121,7 @@ clean_swagger() {
|
||||
/usr/bin/find "${SWAGGER_ROOT}" -name '*.swagger.json' -delete
|
||||
}
|
||||
|
||||
collect_swagger server 24
|
||||
collect_swagger server 26
|
||||
clean_swagger server
|
||||
clean_swagger reposerver
|
||||
clean_swagger controller
|
||||
|
||||