mirror of
https://github.com/argoproj/argo-cd.git
synced 2026-02-20 17:48:47 +01:00
Compare commits
380 Commits
release-2.
...
v2.9.2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c5ea5c4df5 | ||
|
|
c2c9746050 | ||
|
|
78cd50b2c7 | ||
|
|
dd86b08369 | ||
|
|
0ca43663c9 | ||
|
|
d4c37e2521 | ||
|
|
ba60fadd94 | ||
|
|
58b04e5e11 | ||
|
|
3acd5ee30d | ||
|
|
45d5de702e | ||
|
|
b8efc8b1ab | ||
|
|
9cf0c69bbe | ||
|
|
871b045368 | ||
|
|
7f9be43b8b | ||
|
|
78ad599120 | ||
|
|
557871d223 | ||
|
|
f29e4fbea3 | ||
|
|
766316ef74 | ||
|
|
eb0afcbc3d | ||
|
|
0083647b8b | ||
|
|
5289315c3f | ||
|
|
83ec3bfbf7 | ||
|
|
5c86f758c3 | ||
|
|
67e1e04afb | ||
|
|
266e92e3a1 | ||
|
|
6648d31671 | ||
|
|
82185106a2 | ||
|
|
b14837e58e | ||
|
|
9c4a90af91 | ||
|
|
3750adefa7 | ||
|
|
dc3d08e626 | ||
|
|
eaa9af21d7 | ||
|
|
55e5d6bf3e | ||
|
|
85422bbb17 | ||
|
|
79901a4e84 | ||
|
|
5506e8520c | ||
|
|
0a97e150d8 | ||
|
|
e1ac2f6071 | ||
|
|
8c3f38a97d | ||
|
|
8b9c448786 | ||
|
|
80baeb8a6c | ||
|
|
72f7b14594 | ||
|
|
b9bf46dfb9 | ||
|
|
d7c489b9cc | ||
|
|
38eb17a027 | ||
|
|
ea3402962f | ||
|
|
b3fabc23cd | ||
|
|
69d6d1064b | ||
|
|
d09621d36b | ||
|
|
728205618e | ||
|
|
912a2db05c | ||
|
|
d105196075 | ||
|
|
61dc8b5083 | ||
|
|
cbd88068b2 | ||
|
|
129cf5370f | ||
|
|
30767ae9b0 | ||
|
|
03c6e1a14e | ||
|
|
08e53e7274 | ||
|
|
1d274585bd | ||
|
|
62995f0675 | ||
|
|
17527044b8 | ||
|
|
cb25382658 | ||
|
|
045f5b1a21 | ||
|
|
ae47e05dd7 | ||
|
|
2b1b125fa6 | ||
|
|
ef88d1d026 | ||
|
|
98ee9443e3 | ||
|
|
491b3898ac | ||
|
|
45c0c2a422 | ||
|
|
1a684a7e1e | ||
|
|
1e490d9b53 | ||
|
|
5640d5d331 | ||
|
|
c379de75b3 | ||
|
|
ee7a18d116 | ||
|
|
5ad5826332 | ||
|
|
df714accc0 | ||
|
|
d58d1d3fca | ||
|
|
0e0896c831 | ||
|
|
cd6542be03 | ||
|
|
9cee726edf | ||
|
|
4ceef86eac | ||
|
|
59a934536e | ||
|
|
795f2256cd | ||
|
|
4604991dc0 | ||
|
|
f9690535f3 | ||
|
|
7dedb8ee4a | ||
|
|
0d45ce895b | ||
|
|
a40f7973b8 | ||
|
|
16c7c64f0d | ||
|
|
c733219926 | ||
|
|
084e66d7ed | ||
|
|
21672a2a83 | ||
|
|
c30f0cca52 | ||
|
|
0d0a2953dd | ||
|
|
56e8987e24 | ||
|
|
9f10a5fe0c | ||
|
|
9b4589eaa8 | ||
|
|
13375489bc | ||
|
|
ab9fc97136 | ||
|
|
504f1fb970 | ||
|
|
2fa89f14a6 | ||
|
|
db2da6b108 | ||
|
|
571bf89c15 | ||
|
|
4e44aae0ab | ||
|
|
6da1316f3e | ||
|
|
38f2c5e890 | ||
|
|
febb5867ca | ||
|
|
a4ad5c546a | ||
|
|
ce026de284 | ||
|
|
92406553b3 | ||
|
|
5c0deaedb3 | ||
|
|
472482c47b | ||
|
|
cfc5f9896c | ||
|
|
3fdb685c11 | ||
|
|
17f5cb4613 | ||
|
|
f33005b104 | ||
|
|
b89c9ec6e4 | ||
|
|
543fc25d2f | ||
|
|
15ec307be6 | ||
|
|
f2f7be2c1e | ||
|
|
8ed1d2b7e4 | ||
|
|
bd58788a55 | ||
|
|
339072f910 | ||
|
|
43c5905e6e | ||
|
|
dd727e7218 | ||
|
|
3391f9ed23 | ||
|
|
b7ef32eafc | ||
|
|
92ce5a4a3e | ||
|
|
2384560666 | ||
|
|
54601c8fd3 | ||
|
|
3d01b6d4e9 | ||
|
|
5a2097d9e3 | ||
|
|
a9dd90e7cc | ||
|
|
696e6e8b43 | ||
|
|
10eac22b6a | ||
|
|
4b2e5b06bf | ||
|
|
b8f92c4ff2 | ||
|
|
f3911a40ed | ||
|
|
a74c85aea7 | ||
|
|
d8806bc77a | ||
|
|
75b438a278 | ||
|
|
fec5b522a4 | ||
|
|
4fadd9e541 | ||
|
|
0de579dd34 | ||
|
|
ec2d3f6922 | ||
|
|
95ae1c5d98 | ||
|
|
eba40d4dec | ||
|
|
da84f9e5be | ||
|
|
ef7f32eb84 | ||
|
|
d557be4dd2 | ||
|
|
ef24b3f858 | ||
|
|
71e4fa352a | ||
|
|
b60861bf13 | ||
|
|
3d232434b3 | ||
|
|
eb526ff1bd | ||
|
|
1dc3e5d8f0 | ||
|
|
6baca2066f | ||
|
|
5a486f0847 | ||
|
|
dd49e228fd | ||
|
|
e6896189e3 | ||
|
|
fed45069ef | ||
|
|
e97a4f9b10 | ||
|
|
15e3eb8d9d | ||
|
|
0d04662371 | ||
|
|
24c080b5cb | ||
|
|
32f10de147 | ||
|
|
7317d6db33 | ||
|
|
3a72786c1a | ||
|
|
386d177435 | ||
|
|
0d86847eab | ||
|
|
e047efa8f9 | ||
|
|
f978e04c70 | ||
|
|
dc8d729078 | ||
|
|
628a5f63e6 | ||
|
|
a955151382 | ||
|
|
6ca2e90b50 | ||
|
|
ab9c29c2ba | ||
|
|
e3fcf2ace3 | ||
|
|
15254afdd1 | ||
|
|
c6e9893f0c | ||
|
|
f7b2178de1 | ||
|
|
a4e36bd3e1 | ||
|
|
2dbc6c7c71 | ||
|
|
f9961a0be1 | ||
|
|
e019b7fecb | ||
|
|
3290cb4842 | ||
|
|
d2720732c1 | ||
|
|
5f1a159058 | ||
|
|
25c82cb394 | ||
|
|
bfaac2b5ac | ||
|
|
1076733ad5 | ||
|
|
94b986da2e | ||
|
|
b877bf0755 | ||
|
|
0a97c228e1 | ||
|
|
48645db293 | ||
|
|
8bbab61731 | ||
|
|
d58b6e62c2 | ||
|
|
7a01a7ffe1 | ||
|
|
3ea3d5af53 | ||
|
|
f7bfb3f15b | ||
|
|
48cdba975b | ||
|
|
f0d0f61406 | ||
|
|
6e7e4729b9 | ||
|
|
9f5133479b | ||
|
|
c31da643aa | ||
|
|
51164e82f4 | ||
|
|
a4eeb00395 | ||
|
|
19de408dbc | ||
|
|
5c76d8f027 | ||
|
|
20a1649afb | ||
|
|
83e4bff8f7 | ||
|
|
76f5cd3c1a | ||
|
|
e7ad4fbc04 | ||
|
|
ee3719cb54 | ||
|
|
389cf7521d | ||
|
|
ab9bfd6dd7 | ||
|
|
8068be628d | ||
|
|
9331a9f05e | ||
|
|
68c33729ba | ||
|
|
9d9b872f72 | ||
|
|
9a0c3cfe5c | ||
|
|
d3c9f91c46 | ||
|
|
77e17932c2 | ||
|
|
18e9edccae | ||
|
|
505f77acd1 | ||
|
|
d836ec393d | ||
|
|
95e942e793 | ||
|
|
aaadb2097e | ||
|
|
21c71320bc | ||
|
|
fc60f007c2 | ||
|
|
82702251a3 | ||
|
|
c8ae5bc3e7 | ||
|
|
b730747e39 | ||
|
|
08465c6174 | ||
|
|
c721592d21 | ||
|
|
3f9133eb59 | ||
|
|
2ed089ace6 | ||
|
|
69a6c763b8 | ||
|
|
47066ea46a | ||
|
|
ec34b3fe7b | ||
|
|
4a7e6ec855 | ||
|
|
72754d7726 | ||
|
|
be1b1ea645 | ||
|
|
0d99943b44 | ||
|
|
49a395037e | ||
|
|
f099aa70e4 | ||
|
|
5795ba739d | ||
|
|
92616e4e0f | ||
|
|
951b6b1b46 | ||
|
|
e2ab450642 | ||
|
|
2f84dacc03 | ||
|
|
9e279d5a28 | ||
|
|
2205b94f4e | ||
|
|
8b5118c8e0 | ||
|
|
58138ad0a5 | ||
|
|
6e3bd70a8b | ||
|
|
c3970b462e | ||
|
|
0a11089db3 | ||
|
|
74a07f19c1 | ||
|
|
1810c4c24c | ||
|
|
9a47a765f8 | ||
|
|
5f8fc55094 | ||
|
|
89931c1826 | ||
|
|
433ba3678c | ||
|
|
f1607fee7c | ||
|
|
f7c654aac3 | ||
|
|
d04633b6cf | ||
|
|
e08840f12b | ||
|
|
703df96acd | ||
|
|
4a504401a6 | ||
|
|
ec9d305e3f | ||
|
|
0ae5882d5a | ||
|
|
e10c4186a5 | ||
|
|
4ed611c2dd | ||
|
|
21e7d92093 | ||
|
|
5120026d15 | ||
|
|
db52c265c3 | ||
|
|
244b9e162b | ||
|
|
7bade3c06c | ||
|
|
85cb3c378f | ||
|
|
3ee9c364ac | ||
|
|
278237e57b | ||
|
|
5cdd3e1645 | ||
|
|
43501ce3e7 | ||
|
|
7bc5804b04 | ||
|
|
340504a195 | ||
|
|
558cc8e05e | ||
|
|
208852c4de | ||
|
|
6f5d5ca810 | ||
|
|
12a5a7a70d | ||
|
|
dd800ec119 | ||
|
|
1f88d02848 | ||
|
|
fcbd4c2bbb | ||
|
|
2094404e0b | ||
|
|
462dffc0b7 | ||
|
|
15eeb307eb | ||
|
|
a57786657b | ||
|
|
9c0c6bcc17 | ||
|
|
ff92e60102 | ||
|
|
657df211a0 | ||
|
|
b90f3bc774 | ||
|
|
4bd8b07c51 | ||
|
|
9bf5e50784 | ||
|
|
1ee4389ae5 | ||
|
|
0479583be3 | ||
|
|
f8acbe93a8 | ||
|
|
575c5ad2c0 | ||
|
|
e713a1591b | ||
|
|
d558e42aec | ||
|
|
5fbe93504f | ||
|
|
00917d19a9 | ||
|
|
0fd5a99f20 | ||
|
|
927b940c3e | ||
|
|
0f107851d7 | ||
|
|
103a41992e | ||
|
|
22281c5aab | ||
|
|
6fb238c7d8 | ||
|
|
6a9f4c4fef | ||
|
|
5953cc2b59 | ||
|
|
faaa302fac | ||
|
|
056424928f | ||
|
|
0cf31fb9c9 | ||
|
|
ddb0d10946 | ||
|
|
7c5c72cedb | ||
|
|
1209ef2e5b | ||
|
|
3b496ceca9 | ||
|
|
918e19e27a | ||
|
|
987615f4b0 | ||
|
|
e2e0da7fcc | ||
|
|
c4fb292508 | ||
|
|
a656459521 | ||
|
|
f2105d937d | ||
|
|
f5675ca4f3 | ||
|
|
7cc6be45f1 | ||
|
|
8f7b69387d | ||
|
|
03026997d1 | ||
|
|
94ce1e99e9 | ||
|
|
6041c0b7dd | ||
|
|
47015cd37c | ||
|
|
ef8dae885d | ||
|
|
d475b81781 | ||
|
|
a28889f8f4 | ||
|
|
a4ada1ef9e | ||
|
|
0219a49719 | ||
|
|
ef9ceffd59 | ||
|
|
4a50a31caf | ||
|
|
f0b8ffb7ca | ||
|
|
52594d35a0 | ||
|
|
f30cb7a200 | ||
|
|
d7632df378 | ||
|
|
0c5cf1c782 | ||
|
|
b9fefca17b | ||
|
|
5f455af684 | ||
|
|
53db27e88a | ||
|
|
8032601d75 | ||
|
|
b12017c3b4 | ||
|
|
168ce14342 | ||
|
|
1bffe34492 | ||
|
|
3ed882728b | ||
|
|
a8553056b0 | ||
|
|
9417e8e234 | ||
|
|
f1e5610a73 | ||
|
|
092b3d132f | ||
|
|
1c2f279984 | ||
|
|
1801664e07 | ||
|
|
4d2cd06f86 | ||
|
|
ce2eff27dd | ||
|
|
6497b7558c | ||
|
|
3d133b2f55 | ||
|
|
93fc27d054 | ||
|
|
52e05a10b8 | ||
|
|
e48c9e1cb6 | ||
|
|
19c8edc10e | ||
|
|
00ba833a37 | ||
|
|
e05cf82ad4 | ||
|
|
77fbfc84f9 | ||
|
|
b50e9f3084 | ||
|
|
40e26fcfd1 | ||
|
|
67c752dc1a | ||
|
|
97906bb930 |
6
.github/pull_request_template.md
vendored
6
.github/pull_request_template.md
vendored
@@ -1,6 +1,8 @@
|
||||
<!--
|
||||
Note on DCO:
|
||||
|
||||
If the DCO action in the integration test fails, one or more of your commits are not signed off. Please click on the *Details* link next to the DCO action for instructions on how to resolve this.
|
||||
-->
|
||||
|
||||
Checklist:
|
||||
|
||||
@@ -14,8 +16,8 @@ Checklist:
|
||||
* [ ] Optional. My organization is added to USERS.md.
|
||||
* [ ] I have signed off all my commits as required by [DCO](https://github.com/argoproj/argoproj/blob/master/community/CONTRIBUTING.md#legal)
|
||||
* [ ] I have written unit and/or e2e tests for my change. PRs without these are unlikely to be merged.
|
||||
* [ ] My build is green ([troubleshooting builds](https://argo-cd.readthedocs.io/en/latest/developer-guide/ci/)).
|
||||
* [ ] My build is green ([troubleshooting builds](https://argo-cd.readthedocs.io/en/latest/developer-guide/ci/)).
|
||||
* [ ] My new feature complies with the [feature status](https://github.com/argoproj/argoproj/blob/master/community/feature-status.md) guidelines.
|
||||
* [ ] I have added a brief description of why this PR is necessary and/or what this PR solves.
|
||||
|
||||
Please see [Contribution FAQs](https://argo-cd.readthedocs.io/en/latest/developer-guide/faq/) if you have questions about your pull-request.
|
||||
<!-- Please see [Contribution FAQs](https://argo-cd.readthedocs.io/en/latest/developer-guide/faq/) if you have questions about your pull-request. -->
|
||||
|
||||
2
.github/workflows/README.md
vendored
2
.github/workflows/README.md
vendored
@@ -16,7 +16,7 @@
|
||||
## image-reuse.yaml
|
||||
|
||||
- The resuable workflow can be used to publish or build images with multiple container registries(Quay,GHCR, dockerhub), and then sign them with cosign when an image is published.
|
||||
- A GO version `must` be specified e.g. 1.19
|
||||
- A GO version `must` be specified e.g. 1.21
|
||||
- The image name for each registry *must* contain the tag. Note: multiple tags are allowed for each registry using a CSV type.
|
||||
- Multiple platforms can be specified e.g. linux/amd64,linux/arm64
|
||||
- Images are not published by default. A boolean value must be set to `true` to push images.
|
||||
|
||||
68
.github/workflows/ci-build.yaml
vendored
68
.github/workflows/ci-build.yaml
vendored
@@ -13,7 +13,7 @@ on:
|
||||
|
||||
env:
|
||||
# Golang version to use across CI steps
|
||||
GOLANG_VERSION: '1.20'
|
||||
GOLANG_VERSION: '1.21'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
@@ -28,9 +28,9 @@ jobs:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
|
||||
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.0
|
||||
uses: actions/setup-go@93397bea11091df50f3d7e59dc26a7711a8bcfbe # v4.0.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Download all Go modules
|
||||
@@ -46,13 +46,13 @@ jobs:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
|
||||
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.0
|
||||
uses: actions/setup-go@93397bea11091df50f3d7e59dc26a7711a8bcfbe # v4.0.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Restore go build cache
|
||||
uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1
|
||||
uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2
|
||||
with:
|
||||
path: ~/.cache/go-build
|
||||
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
|
||||
@@ -70,16 +70,16 @@ jobs:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
|
||||
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.0
|
||||
uses: actions/setup-go@93397bea11091df50f3d7e59dc26a7711a8bcfbe # v4.0.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Run golangci-lint
|
||||
uses: golangci/golangci-lint-action@639cd343e1d3b897ff35927a75193d57cfcba299 # v3.6.0
|
||||
uses: golangci/golangci-lint-action@3a919529898de77ec3da873e3063ca4b10e7f5cc # v3.7.0
|
||||
with:
|
||||
version: v1.51.0
|
||||
args: --timeout 10m --exclude SA5011 --verbose
|
||||
version: v1.54.0
|
||||
args: --enable gofmt --timeout 10m --exclude SA5011 --verbose --max-issues-per-linter 0 --max-same-issues 0
|
||||
|
||||
test-go:
|
||||
name: Run unit tests for Go packages
|
||||
@@ -93,11 +93,11 @@ jobs:
|
||||
- name: Create checkout directory
|
||||
run: mkdir -p ~/go/src/github.com/argoproj
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
|
||||
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0
|
||||
- name: Create symlink in GOPATH
|
||||
run: ln -s $(pwd) ~/go/src/github.com/argoproj/argo-cd
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.0
|
||||
uses: actions/setup-go@93397bea11091df50f3d7e59dc26a7711a8bcfbe # v4.0.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Install required packages
|
||||
@@ -117,7 +117,7 @@ jobs:
|
||||
run: |
|
||||
echo "/usr/local/bin" >> $GITHUB_PATH
|
||||
- name: Restore go build cache
|
||||
uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1
|
||||
uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2
|
||||
with:
|
||||
path: ~/.cache/go-build
|
||||
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
|
||||
@@ -138,12 +138,12 @@ jobs:
|
||||
- name: Run all unit tests
|
||||
run: make test-local
|
||||
- name: Generate code coverage artifacts
|
||||
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2
|
||||
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3
|
||||
with:
|
||||
name: code-coverage
|
||||
path: coverage.out
|
||||
- name: Generate test results artifacts
|
||||
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2
|
||||
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3
|
||||
with:
|
||||
name: test-results
|
||||
path: test-results/
|
||||
@@ -160,11 +160,11 @@ jobs:
|
||||
- name: Create checkout directory
|
||||
run: mkdir -p ~/go/src/github.com/argoproj
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
|
||||
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0
|
||||
- name: Create symlink in GOPATH
|
||||
run: ln -s $(pwd) ~/go/src/github.com/argoproj/argo-cd
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.0
|
||||
uses: actions/setup-go@93397bea11091df50f3d7e59dc26a7711a8bcfbe # v4.0.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Install required packages
|
||||
@@ -184,7 +184,7 @@ jobs:
|
||||
run: |
|
||||
echo "/usr/local/bin" >> $GITHUB_PATH
|
||||
- name: Restore go build cache
|
||||
uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1
|
||||
uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2
|
||||
with:
|
||||
path: ~/.cache/go-build
|
||||
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
|
||||
@@ -205,7 +205,7 @@ jobs:
|
||||
- name: Run all unit tests
|
||||
run: make test-race-local
|
||||
- name: Generate test results artifacts
|
||||
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2
|
||||
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3
|
||||
with:
|
||||
name: race-results
|
||||
path: test-results/
|
||||
@@ -215,9 +215,9 @@ jobs:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
|
||||
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.0
|
||||
uses: actions/setup-go@93397bea11091df50f3d7e59dc26a7711a8bcfbe # v4.0.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Create symlink in GOPATH
|
||||
@@ -263,14 +263,14 @@ jobs:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
|
||||
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0
|
||||
- name: Setup NodeJS
|
||||
uses: actions/setup-node@64ed1c7eab4cce3362f8c340dee64e5eaeef8f7c # v3.6.0
|
||||
uses: actions/setup-node@5e21ff4d9bc1a8cf6de233a3057d20ec6b3fb69d # v3.8.1
|
||||
with:
|
||||
node-version: '20.3.1'
|
||||
node-version: '20.7.0'
|
||||
- name: Restore node dependency cache
|
||||
id: cache-dependencies
|
||||
uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1
|
||||
uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2
|
||||
with:
|
||||
path: ui/node_modules
|
||||
key: ${{ runner.os }}-node-dep-v2-${{ hashFiles('**/yarn.lock') }}
|
||||
@@ -300,12 +300,12 @@ jobs:
|
||||
sonar_secret: ${{ secrets.SONAR_TOKEN }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
|
||||
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Restore node dependency cache
|
||||
id: cache-dependencies
|
||||
uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1
|
||||
uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2
|
||||
with:
|
||||
path: ui/node_modules
|
||||
key: ${{ runner.os }}-node-dep-v2-${{ hashFiles('**/yarn.lock') }}
|
||||
@@ -361,7 +361,7 @@ jobs:
|
||||
runs-on: ubuntu-22.04
|
||||
strategy:
|
||||
matrix:
|
||||
k3s-version: [v1.27.2, v1.26.0, v1.25.4, v1.24.3]
|
||||
k3s-version: [v1.28.2, v1.27.6, v1.26.9, v1.25.14]
|
||||
needs:
|
||||
- build-go
|
||||
env:
|
||||
@@ -379,9 +379,9 @@ jobs:
|
||||
GITLAB_TOKEN: ${{ secrets.E2E_TEST_GITLAB_TOKEN }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
|
||||
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.0
|
||||
uses: actions/setup-go@93397bea11091df50f3d7e59dc26a7711a8bcfbe # v4.0.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: GH actions workaround - Kill XSP4 process
|
||||
@@ -400,7 +400,7 @@ jobs:
|
||||
sudo chmod go-r $HOME/.kube/config
|
||||
kubectl version
|
||||
- name: Restore go build cache
|
||||
uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1
|
||||
uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2
|
||||
with:
|
||||
path: ~/.cache/go-build
|
||||
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
|
||||
@@ -426,7 +426,7 @@ jobs:
|
||||
git config --global user.email "john.doe@example.com"
|
||||
- name: Pull Docker image required for tests
|
||||
run: |
|
||||
docker pull ghcr.io/dexidp/dex:v2.36.0
|
||||
docker pull ghcr.io/dexidp/dex:v2.37.0
|
||||
docker pull argoproj/argo-cd-ci-builder:v1.0.0
|
||||
docker pull redis:7.0.11-alpine
|
||||
- name: Create target directory for binaries in the build-process
|
||||
@@ -456,7 +456,7 @@ jobs:
|
||||
set -x
|
||||
make test-e2e-local
|
||||
- name: Upload e2e-server logs
|
||||
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2
|
||||
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3
|
||||
with:
|
||||
name: e2e-server-k8s${{ matrix.k3s-version }}.log
|
||||
path: /tmp/e2e-server.log
|
||||
|
||||
2
.github/workflows/codeql.yml
vendored
2
.github/workflows/codeql.yml
vendored
@@ -30,7 +30,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
|
||||
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
|
||||
20
.github/workflows/image-reuse.yaml
vendored
20
.github/workflows/image-reuse.yaml
vendored
@@ -58,28 +58,28 @@ jobs:
|
||||
image-digest: ${{ steps.image.outputs.digest }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.3.0
|
||||
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
if: ${{ github.ref_type == 'tag'}}
|
||||
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.3.0
|
||||
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0
|
||||
if: ${{ github.ref_type != 'tag'}}
|
||||
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1
|
||||
uses: actions/setup-go@93397bea11091df50f3d7e59dc26a7711a8bcfbe # v4.1.0
|
||||
with:
|
||||
go-version: ${{ inputs.go-version }}
|
||||
|
||||
- name: Install cosign
|
||||
uses: sigstore/cosign-installer@d13028333d784fcc802b67ec924bcebe75aa0a5f # v3.1.0
|
||||
uses: sigstore/cosign-installer@11086d25041f77fe8fe7b9ea4e48e3b9192b8f19 # v3.1.2
|
||||
with:
|
||||
cosign-release: 'v2.0.0'
|
||||
cosign-release: 'v2.0.2'
|
||||
|
||||
- uses: docker/setup-qemu-action@2b82ce82d56a2a04d2637cd93a637ae1b359c0a7 # v2.2.0
|
||||
- uses: docker/setup-buildx-action@ecf95283f03858871ff00b787d79c419715afc34 # v2.7.0
|
||||
- uses: docker/setup-buildx-action@f95db51fddba0c2d1ec667646a06c2ce06100226 # v3.0.0
|
||||
|
||||
- name: Setup tags for container image as a CSV type
|
||||
run: |
|
||||
@@ -135,6 +135,14 @@ jobs:
|
||||
echo "BUILD_DATE=$(date -u +'%Y-%m-%dT%H:%M:%SZ')" >> $GITHUB_ENV
|
||||
echo "GIT_TREE_STATE=$(if [ -z "`git status --porcelain`" ]; then echo "clean" ; else echo "dirty"; fi)" >> $GITHUB_ENV
|
||||
|
||||
- name: Free Disk Space (Ubuntu)
|
||||
uses: jlumbroso/free-disk-space@4d9e71b726748f254fe64fa44d273194bd18ec91
|
||||
with:
|
||||
large-packages: false
|
||||
docker-images: false
|
||||
swap-storage: false
|
||||
tool-cache: false
|
||||
|
||||
- name: Build and push container image
|
||||
id: image
|
||||
uses: docker/build-push-action@2eb1c1961a95fc15694676618e422e8ba1d63825 #v4.1.1
|
||||
|
||||
8
.github/workflows/image.yaml
vendored
8
.github/workflows/image.yaml
vendored
@@ -25,7 +25,7 @@ jobs:
|
||||
image-tag: ${{ steps.image.outputs.tag}}
|
||||
platforms: ${{ steps.platforms.outputs.platforms }}
|
||||
steps:
|
||||
- uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0
|
||||
|
||||
- name: Set image tag for ghcr
|
||||
run: echo "tag=$(cat ./VERSION)-${GITHUB_SHA::8}" >> $GITHUB_OUTPUT
|
||||
@@ -52,7 +52,7 @@ jobs:
|
||||
uses: ./.github/workflows/image-reuse.yaml
|
||||
with:
|
||||
# Note: cannot use env variables to set go-version (https://docs.github.com/en/actions/using-workflows/reusing-workflows#limitations)
|
||||
go-version: 1.20
|
||||
go-version: 1.21
|
||||
platforms: ${{ needs.set-vars.outputs.platforms }}
|
||||
push: false
|
||||
|
||||
@@ -68,7 +68,7 @@ jobs:
|
||||
quay_image_name: quay.io/argoproj/argocd:latest
|
||||
ghcr_image_name: ghcr.io/argoproj/argo-cd/argocd:${{ needs.set-vars.outputs.image-tag }}
|
||||
# Note: cannot use env variables to set go-version (https://docs.github.com/en/actions/using-workflows/reusing-workflows#limitations)
|
||||
go-version: 1.20
|
||||
go-version: 1.21
|
||||
platforms: ${{ needs.set-vars.outputs.platforms }}
|
||||
push: true
|
||||
secrets:
|
||||
@@ -104,7 +104,7 @@ jobs:
|
||||
if: ${{ github.repository == 'argoproj/argo-cd' && github.event_name == 'push' }}
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.3.0
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0
|
||||
- run: git clone "https://$TOKEN@github.com/argoproj/argoproj-deployments"
|
||||
env:
|
||||
TOKEN: ${{ secrets.TOKEN }}
|
||||
|
||||
2
.github/workflows/init-release.yaml
vendored
2
.github/workflows/init-release.yaml
vendored
@@ -23,7 +23,7 @@ jobs:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.2.0
|
||||
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
67
.github/workflows/release.yaml
vendored
67
.github/workflows/release.yaml
vendored
@@ -10,7 +10,7 @@ on:
|
||||
permissions: {}
|
||||
|
||||
env:
|
||||
GOLANG_VERSION: '1.20' # Note: go-version must also be set in job argocd-image.with.go-version
|
||||
GOLANG_VERSION: '1.21' # Note: go-version must also be set in job argocd-image.with.go-version
|
||||
|
||||
jobs:
|
||||
argocd-image:
|
||||
@@ -23,7 +23,7 @@ jobs:
|
||||
with:
|
||||
quay_image_name: quay.io/argoproj/argocd:${{ github.ref_name }}
|
||||
# Note: cannot use env variables to set go-version (https://docs.github.com/en/actions/using-workflows/reusing-workflows#limitations)
|
||||
go-version: 1.20
|
||||
go-version: 1.21
|
||||
platforms: linux/amd64,linux/arm64,linux/s390x,linux/ppc64le
|
||||
push: true
|
||||
secrets:
|
||||
@@ -38,7 +38,7 @@ jobs:
|
||||
packages: write # for uploading attestations. (https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#known-issues)
|
||||
# Must be refernced by a tag. https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#referencing-the-slsa-generator
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v1.7.0
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v1.9.0
|
||||
with:
|
||||
image: quay.io/argoproj/argocd
|
||||
digest: ${{ needs.argocd-image.outputs.image-digest }}
|
||||
@@ -59,7 +59,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
|
||||
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
@@ -77,7 +77,7 @@ jobs:
|
||||
fi
|
||||
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.0
|
||||
uses: actions/setup-go@93397bea11091df50f3d7e59dc26a7711a8bcfbe # v4.0.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
|
||||
@@ -88,7 +88,7 @@ jobs:
|
||||
echo "GIT_TREE_STATE=$(if [ -z "`git status --porcelain`" ]; then echo "clean" ; else echo "dirty"; fi)" >> $GITHUB_ENV
|
||||
|
||||
- name: Run GoReleaser
|
||||
uses: goreleaser/goreleaser-action@336e29918d653399e599bfca99fadc1d7ffbc9f7 # v4.3.0
|
||||
uses: goreleaser/goreleaser-action@7ec5c2b0c6cdda6e8bbb49444bc797dd33d74dd8 # v5.0.0
|
||||
id: run-goreleaser
|
||||
with:
|
||||
version: latest
|
||||
@@ -120,39 +120,35 @@ jobs:
|
||||
contents: write # Needed for release uploads
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
# Must be refernced by a tag. https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#referencing-the-slsa-generator
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v1.7.0
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v1.9.0
|
||||
with:
|
||||
base64-subjects: "${{ needs.goreleaser.outputs.hashes }}"
|
||||
provenance-name: "argocd-cli.intoto.jsonl"
|
||||
upload-assets: true
|
||||
|
||||
generate-sbom:
|
||||
name: Create Sbom and sign assets
|
||||
name: Create SBOM and generate hash
|
||||
needs:
|
||||
- argocd-image
|
||||
- goreleaser
|
||||
permissions:
|
||||
contents: write # Needed for release uploads
|
||||
id-token: write # Needed for signing Sbom
|
||||
outputs:
|
||||
hashes: ${{ steps.sbom-hash.outputs.hashes}}
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.2.0
|
||||
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1
|
||||
uses: actions/setup-go@93397bea11091df50f3d7e59dc26a7711a8bcfbe # v4.1.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
|
||||
- name: Install cosign
|
||||
uses: sigstore/cosign-installer@d13028333d784fcc802b67ec924bcebe75aa0a5f # v3.1.0
|
||||
with:
|
||||
cosign-release: 'v2.0.0'
|
||||
|
||||
- name: Generate SBOM (spdx)
|
||||
id: spdx-builder
|
||||
env:
|
||||
@@ -182,23 +178,38 @@ jobs:
|
||||
fi
|
||||
|
||||
cd /tmp && tar -zcf sbom.tar.gz *.spdx
|
||||
|
||||
- name: Sign SBOM
|
||||
|
||||
- name: Generate SBOM hash
|
||||
shell: bash
|
||||
id: sbom-hash
|
||||
run: |
|
||||
cosign sign-blob \
|
||||
--output-certificate=/tmp/sbom.tar.gz.pem \
|
||||
--output-signature=/tmp/sbom.tar.gz.sig \
|
||||
-y \
|
||||
/tmp/sbom.tar.gz
|
||||
|
||||
- name: Upload SBOM and signature assets
|
||||
# sha256sum generates sha256 hash for sbom.
|
||||
# base64 -w0 encodes to base64 and outputs on a single line.
|
||||
# sha256sum /tmp/sbom.tar.gz ... | base64 -w0
|
||||
echo "hashes=$(sha256sum /tmp/sbom.tar.gz | base64 -w0)" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Upload SBOM
|
||||
uses: softprops/action-gh-release@de2c0eb89ae2a093876385947365aca7b0e5f844 # v0.1.15
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
files: |
|
||||
/tmp/sbom.tar.*
|
||||
|
||||
/tmp/sbom.tar.gz
|
||||
|
||||
sbom-provenance:
|
||||
needs: [generate-sbom]
|
||||
permissions:
|
||||
actions: read # for detecting the Github Actions environment
|
||||
id-token: write # Needed for provenance signing and ID
|
||||
contents: write # Needed for release uploads
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
# Must be refernced by a tag. https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#referencing-the-slsa-generator
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v1.9.0
|
||||
with:
|
||||
base64-subjects: "${{ needs.generate-sbom.outputs.hashes }}"
|
||||
provenance-name: "argocd-sbom.intoto.jsonl"
|
||||
upload-assets: true
|
||||
|
||||
post-release:
|
||||
needs:
|
||||
- argocd-image
|
||||
@@ -211,7 +222,7 @@ jobs:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.2.0
|
||||
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
4
.github/workflows/scorecard.yaml
vendored
4
.github/workflows/scorecard.yaml
vendored
@@ -30,7 +30,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: "Checkout code"
|
||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
|
||||
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
@@ -54,7 +54,7 @@ jobs:
|
||||
# Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
|
||||
# format to the repository Actions tab.
|
||||
- name: "Upload artifact"
|
||||
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2
|
||||
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3
|
||||
with:
|
||||
name: SARIF file
|
||||
path: results.sarif
|
||||
|
||||
2
.github/workflows/update-snyk.yaml
vendored
2
.github/workflows/update-snyk.yaml
vendored
@@ -17,7 +17,7 @@ jobs:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
|
||||
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Build reports
|
||||
|
||||
2
.gitpod.Dockerfile
vendored
2
.gitpod.Dockerfile
vendored
@@ -1,4 +1,4 @@
|
||||
FROM gitpod/workspace-full@sha256:d5787229cd062aceae91109f1690013d3f25062916492fb7f444d13de3186178
|
||||
FROM gitpod/workspace-full@sha256:511cecde4dc129ca9eb4cc4c479d61f95e5485ebe320a07f5b902f11899956a3
|
||||
|
||||
USER root
|
||||
|
||||
|
||||
9
CODEOWNERS
Normal file
9
CODEOWNERS
Normal file
@@ -0,0 +1,9 @@
|
||||
# All
|
||||
** @argoproj/argocd-approvers
|
||||
|
||||
# Docs
|
||||
/docs/** @argoproj/argocd-approvers @argoproj/argocd-approvers-docs
|
||||
|
||||
# CI
|
||||
/.github/** @argoproj/argocd-approvers @argoproj/argocd-approvers-ci
|
||||
/.goreleaser.yaml @argoproj/argocd-approvers @argoproj/argocd-approvers-ci
|
||||
@@ -1,10 +1,10 @@
|
||||
ARG BASE_IMAGE=docker.io/library/ubuntu:22.04@sha256:ac58ff7fe25edc58bdf0067ca99df00014dbd032e2246d30a722fa348fd799a5
|
||||
ARG BASE_IMAGE=docker.io/library/ubuntu:22.04@sha256:0bced47fffa3361afa981854fcabcd4577cd43cebbb808cea2b1f33a3dd7f508
|
||||
####################################################################################################
|
||||
# Builder image
|
||||
# Initial stage which pulls prepares build dependencies and CLI tooling we need for our final image
|
||||
# Also used as the image in CI jobs so needs all dependencies
|
||||
####################################################################################################
|
||||
FROM docker.io/library/golang:1.20.5@sha256:4b1fc02d16fca272e5e6e6adc98396219b43ef663a377eef4a97e881d364393f AS builder
|
||||
FROM docker.io/library/golang:1.21.3@sha256:02d7116222536a5cf0fcf631f90b507758b669648e0f20186d2dc94a9b419a9b AS builder
|
||||
|
||||
RUN echo 'deb http://deb.debian.org/debian buster-backports main' >> /etc/apt/sources.list
|
||||
|
||||
@@ -83,7 +83,7 @@ WORKDIR /home/argocd
|
||||
####################################################################################################
|
||||
# Argo CD UI stage
|
||||
####################################################################################################
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/node:20.3.1@sha256:2f0b0c15f97441defa812268ee943bbfaaf666ea6cf7cac62ee3f127906b35c6 AS argocd-ui
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/node:20.6.1@sha256:14bd39208dbc0eb171cbfb26ccb9ac09fa1b2eba04ccd528ab5d12983fd9ee24 AS argocd-ui
|
||||
|
||||
WORKDIR /src
|
||||
COPY ["ui/package.json", "ui/yarn.lock", "./"]
|
||||
@@ -101,7 +101,7 @@ RUN HOST_ARCH=$TARGETARCH NODE_ENV='production' NODE_ONLINE_ENV='online' NODE_OP
|
||||
####################################################################################################
|
||||
# Argo CD Build stage which performs the actual build of Argo CD binaries
|
||||
####################################################################################################
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/golang:1.20.5@sha256:4b1fc02d16fca272e5e6e6adc98396219b43ef663a377eef4a97e881d364393f AS argocd-build
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/golang:1.21.3@sha256:02d7116222536a5cf0fcf631f90b507758b669648e0f20186d2dc94a9b419a9b AS argocd-build
|
||||
|
||||
WORKDIR /go/src/github.com/argoproj/argo-cd
|
||||
|
||||
|
||||
5
Makefile
5
Makefile
@@ -352,7 +352,7 @@ lint-local:
|
||||
golangci-lint --version
|
||||
# NOTE: If you get a "Killed" OOM message, try reducing the value of GOGC
|
||||
# See https://github.com/golangci/golangci-lint#memory-usage-of-golangci-lint
|
||||
GOGC=$(ARGOCD_LINT_GOGC) GOMAXPROCS=2 golangci-lint run --fix --verbose --timeout 3000s
|
||||
GOGC=$(ARGOCD_LINT_GOGC) GOMAXPROCS=2 golangci-lint run --enable gofmt --fix --verbose --timeout 3000s --max-issues-per-linter 0 --max-same-issues 0
|
||||
|
||||
.PHONY: lint-ui
|
||||
lint-ui: test-tools-image
|
||||
@@ -460,6 +460,7 @@ start-e2e-local: mod-vendor-local dep-ui-local cli-local
|
||||
BIN_MODE=$(ARGOCD_BIN_MODE) \
|
||||
ARGOCD_APPLICATION_NAMESPACES=argocd-e2e-external \
|
||||
ARGOCD_APPLICATIONSET_CONTROLLER_NAMESPACES=argocd-e2e-external \
|
||||
ARGOCD_APPLICATIONSET_CONTROLLER_ALLOWED_SCM_PROVIDERS=http://127.0.0.1:8341,http://127.0.0.1:8342,http://127.0.0.1:8343,http://127.0.0.1:8344 \
|
||||
ARGOCD_E2E_TEST=true \
|
||||
goreman -f $(ARGOCD_PROCFILE) start ${ARGOCD_START}
|
||||
|
||||
@@ -651,4 +652,4 @@ help:
|
||||
@echo 'codegen:'
|
||||
@echo ' codegen(-local) -- if using -local, run the following targets first'
|
||||
@echo ' install-codegen-tools-local -- run this to install the codegen tools'
|
||||
@echo ' install-go-tools-local -- run this to install go libraries for codegen'
|
||||
@echo ' install-go-tools-local -- run this to install go libraries for codegen'
|
||||
|
||||
@@ -56,7 +56,7 @@ Participation in the Argo CD project is governed by the [CNCF Code of Conduct](h
|
||||
### Blogs and Presentations
|
||||
|
||||
1. [Awesome-Argo: A Curated List of Awesome Projects and Resources Related to Argo](https://github.com/terrytangyuan/awesome-argo)
|
||||
1. [Unveil the Secret Ingredients of Continuous Delivery at Enterprise Scale with Argo CD](https://blog.akuity.io/unveil-the-secret-ingredients-of-continuous-delivery-at-enterprise-scale-with-argo-cd-7c5b4057ee49)
|
||||
1. [Unveil the Secret Ingredients of Continuous Delivery at Enterprise Scale with Argo CD](https://akuity.io/blog/unveil-the-secret-ingredients-of-continuous-delivery-at-enterprise-scale-with-argocd-kubecon-china-2021/)
|
||||
1. [GitOps Without Pipelines With ArgoCD Image Updater](https://youtu.be/avPUQin9kzU)
|
||||
1. [Combining Argo CD (GitOps), Crossplane (Control Plane), And KubeVela (OAM)](https://youtu.be/eEcgn_gU3SM)
|
||||
1. [How to Apply GitOps to Everything - Combining Argo CD and Crossplane](https://youtu.be/yrj4lmScKHQ)
|
||||
|
||||
@@ -35,9 +35,7 @@ impact on Argo CD before opening an issue at least roughly.
|
||||
|
||||
## Supported Versions
|
||||
|
||||
We currently support the most recent release (`N`, e.g. `1.8`) and the release
|
||||
previous to the most recent one (`N-1`, e.g. `1.7`). With the release of
|
||||
`N+1`, `N-1` drops out of support and `N` becomes `N-1`.
|
||||
We currently support the last 3 minor versions of Argo CD with security and bug fixes.
|
||||
|
||||
We regularly perform patch releases (e.g. `1.8.5` and `1.7.12`) for the
|
||||
supported versions, which will contain fixes for security vulnerabilities and
|
||||
@@ -52,7 +50,7 @@ of releasing it within a patch branch for the currently supported releases.
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
If you find a security related bug in ArgoCD, we kindly ask you for responsible
|
||||
If you find a security related bug in Argo CD, we kindly ask you for responsible
|
||||
disclosure and for giving us appropriate time to react, analyze and develop a
|
||||
fix to mitigate the found security vulnerability.
|
||||
|
||||
|
||||
17
USERS.md
17
USERS.md
@@ -7,6 +7,7 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
|
||||
1. [127Labs](https://127labs.com/)
|
||||
1. [3Rein](https://www.3rein.com/)
|
||||
1. [4data](https://4data.ch/)
|
||||
1. [7shifts](https://www.7shifts.com/)
|
||||
1. [Adevinta](https://www.adevinta.com/)
|
||||
1. [Adfinis](https://adfinis.com)
|
||||
@@ -24,7 +25,9 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [AppDirect](https://www.appdirect.com)
|
||||
1. [Arctiq Inc.](https://www.arctiq.ca)
|
||||
1. [ARZ Allgemeines Rechenzentrum GmbH](https://www.arz.at/)
|
||||
2. [Autodesk](https://www.autodesk.com)
|
||||
1. [Axual B.V.](https://axual.com)
|
||||
1. [Back Market](https://www.backmarket.com)
|
||||
1. [Baloise](https://www.baloise.com)
|
||||
1. [BCDevExchange DevOps Platform](https://bcdevexchange.org/DevOpsPlatform)
|
||||
1. [Beat](https://thebeat.co/en/)
|
||||
@@ -41,6 +44,7 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Capital One](https://www.capitalone.com)
|
||||
1. [CARFAX](https://www.carfax.com)
|
||||
1. [CARFAX Europe](https://www.carfax.eu)
|
||||
1. [Carrefour Group](https://www.carrefour.com)
|
||||
1. [Casavo](https://casavo.com)
|
||||
1. [Celonis](https://www.celonis.com/)
|
||||
1. [CERN](https://home.cern/)
|
||||
@@ -70,6 +74,7 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Devtron Labs](https://github.com/devtron-labs/devtron)
|
||||
1. [DigitalOcean](https://www.digitalocean.com)
|
||||
1. [Divistant](https://divistant.com)
|
||||
1. [Dott](https://ridedott.com)
|
||||
1. [Doximity](https://www.doximity.com/)
|
||||
1. [EDF Renewables](https://www.edf-re.com/)
|
||||
1. [edX](https://edx.org)
|
||||
@@ -81,9 +86,11 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Energisme](https://energisme.com/)
|
||||
1. [enigmo](https://enigmo.co.jp/)
|
||||
1. [Envoy](https://envoy.com/)
|
||||
1. [Factorial](https://factorialhr.com/)
|
||||
1. [Farfetch](https://www.farfetch.com)
|
||||
1. [Faro](https://www.faro.com/)
|
||||
1. [Fave](https://myfave.com)
|
||||
1. [Flexport](https://www.flexport.com/)
|
||||
1. [Flip](https://flip.id)
|
||||
1. [Fonoa](https://www.fonoa.com/)
|
||||
1. [freee](https://corp.freee.co.jp/en/company/)
|
||||
@@ -99,6 +106,7 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [gloat](https://gloat.com/)
|
||||
1. [GLOBIS](https://globis.com)
|
||||
1. [Glovo](https://www.glovoapp.com)
|
||||
1. [GlueOps](https://glueops.dev)
|
||||
1. [GMETRI](https://gmetri.com/)
|
||||
1. [Gojek](https://www.gojek.io/)
|
||||
1. [GoTo](https://www.goto.com/)
|
||||
@@ -124,6 +132,7 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Info Support](https://www.infosupport.com/)
|
||||
1. [InsideBoard](https://www.insideboard.com)
|
||||
1. [Intuit](https://www.intuit.com/)
|
||||
1. [Jellysmack](https://www.jellysmack.com)
|
||||
1. [Joblift](https://joblift.com/)
|
||||
1. [JovianX](https://www.jovianx.com/)
|
||||
1. [Kaltura](https://corp.kaltura.com/)
|
||||
@@ -137,8 +146,10 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Kinguin](https://www.kinguin.net/)
|
||||
1. [KintoHub](https://www.kintohub.com/)
|
||||
1. [KompiTech GmbH](https://www.kompitech.com/)
|
||||
1. [KPMG](https://kpmg.com/uk)
|
||||
1. [KubeSphere](https://github.com/kubesphere)
|
||||
1. [Kurly](https://www.kurly.com/)
|
||||
1. [Kvist](https://kvistsolutions.com)
|
||||
1. [LexisNexis](https://www.lexisnexis.com/)
|
||||
1. [Lian Chu Securities](https://lczq.com)
|
||||
1. [Liatrio](https://www.liatrio.com)
|
||||
@@ -190,6 +201,7 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [OpsVerse](https://opsverse.io)
|
||||
1. [Optoro](https://www.optoro.com/)
|
||||
1. [Orbital Insight](https://orbitalinsight.com/)
|
||||
1. [Oscar Health Insurance](https://hioscar.com/)
|
||||
1. [p3r](https://www.p3r.one/)
|
||||
1. [Packlink](https://www.packlink.com/)
|
||||
1. [PagerDuty](https://www.pagerduty.com/)
|
||||
@@ -197,6 +209,7 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Patreon](https://www.patreon.com/)
|
||||
1. [PayPay](https://paypay.ne.jp/)
|
||||
1. [Peloton Interactive](https://www.onepeloton.com/)
|
||||
1. [PGS](https://www.pgs.com)
|
||||
1. [Pigment](https://www.gopigment.com/)
|
||||
1. [Pipefy](https://www.pipefy.com/)
|
||||
1. [Pismo](https://pismo.io/)
|
||||
@@ -268,17 +281,21 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Trendyol](https://www.trendyol.com/)
|
||||
1. [tru.ID](https://tru.id)
|
||||
1. [Trusting Social](https://trustingsocial.com/)
|
||||
1. [Twilio Segment](https://segment.com/)
|
||||
1. [Twilio SendGrid](https://sendgrid.com)
|
||||
1. [tZERO](https://www.tzero.com/)
|
||||
1. [U.S. Veterans Affairs Department](https://www.va.gov/)
|
||||
1. [UBIO](https://ub.io/)
|
||||
1. [UFirstGroup](https://www.ufirstgroup.com/en/)
|
||||
1. [ungleich.ch](https://ungleich.ch/)
|
||||
1. [Unifonic Inc](https://www.unifonic.com/)
|
||||
1. [Universidad Mesoamericana](https://www.umes.edu.gt/)
|
||||
1. [Upsider Inc.](https://up-sider.com/lp/)
|
||||
1. [Urbantz](https://urbantz.com/)
|
||||
1. [Vectra](https://www.vectra.ai)
|
||||
1. [Veepee](https://www.veepee.com)
|
||||
1. [Viaduct](https://www.viaduct.ai/)
|
||||
1. [VietMoney](https://vietmoney.vn/)
|
||||
1. [Vinted](https://vinted.com/)
|
||||
1. [Virtuo](https://www.govirtuo.com/)
|
||||
1. [VISITS Technologies](https://visits.world/en)
|
||||
|
||||
@@ -28,9 +28,11 @@ import (
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
k8scache "k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/builder"
|
||||
"sigs.k8s.io/controller-runtime/pkg/cache"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
@@ -58,10 +60,6 @@ const (
|
||||
// https://github.com/argoproj-labs/argocd-notifications/blob/33d345fa838829bb50fca5c08523aba380d2c12b/pkg/controller/state.go#L17
|
||||
NotifiedAnnotationKey = "notified.notifications.argoproj.io"
|
||||
ReconcileRequeueOnValidationError = time.Minute * 3
|
||||
|
||||
// LabelKeyAppSetInstance is the label key to use to uniquely identify the apps of an applicationset
|
||||
// The ArgoCD applicationset name is used as the instance name
|
||||
LabelKeyAppSetInstance = "argocd.argoproj.io/application-set-name"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -83,9 +81,13 @@ type ApplicationSetReconciler struct {
|
||||
Policy argov1alpha1.ApplicationsSyncPolicy
|
||||
EnablePolicyOverride bool
|
||||
utils.Renderer
|
||||
ArgoCDNamespace string
|
||||
ApplicationSetNamespaces []string
|
||||
EnableProgressiveSyncs bool
|
||||
ArgoCDNamespace string
|
||||
ApplicationSetNamespaces []string
|
||||
EnableProgressiveSyncs bool
|
||||
SCMRootCAPath string
|
||||
GlobalPreservedAnnotations []string
|
||||
GlobalPreservedLabels []string
|
||||
Cache cache.Cache
|
||||
}
|
||||
|
||||
// +kubebuilder:rbac:groups=argoproj.io,resources=applicationsets,verbs=get;list;watch;create;update;patch;delete
|
||||
@@ -158,13 +160,15 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
||||
|
||||
if r.EnableProgressiveSyncs {
|
||||
if applicationSetInfo.Spec.Strategy == nil && len(applicationSetInfo.Status.ApplicationStatus) > 0 {
|
||||
// If appset used progressive sync but stopped, clean up the progressive sync application statuses
|
||||
log.Infof("Removing %v unnecessary AppStatus entries from ApplicationSet %v", len(applicationSetInfo.Status.ApplicationStatus), applicationSetInfo.Name)
|
||||
|
||||
err := r.setAppSetApplicationStatus(ctx, &applicationSetInfo, []argov1alpha1.ApplicationSetApplicationStatus{})
|
||||
if err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("failed to clear previous AppSet application statuses for %v: %w", applicationSetInfo.Name, err)
|
||||
}
|
||||
} else {
|
||||
} else if applicationSetInfo.Spec.Strategy != nil {
|
||||
// appset uses progressive sync
|
||||
applications, err := r.getCurrentApplications(ctx, applicationSetInfo)
|
||||
if err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("failed to get current applications for application set: %w", err)
|
||||
@@ -294,7 +298,6 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
||||
}
|
||||
|
||||
requeueAfter := r.getMinRequeueAfter(&applicationSetInfo)
|
||||
logCtx.WithField("requeueAfter", requeueAfter).Info("end reconcile")
|
||||
|
||||
if len(validateErrors) == 0 {
|
||||
if err := r.setApplicationSetStatusCondition(ctx,
|
||||
@@ -308,8 +311,13 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
||||
); err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
} else if requeueAfter == time.Duration(0) {
|
||||
// Ensure that the request is requeued if there are validation errors.
|
||||
requeueAfter = ReconcileRequeueOnValidationError
|
||||
}
|
||||
|
||||
logCtx.WithField("requeueAfter", requeueAfter).Info("end reconcile")
|
||||
|
||||
return ctrl.Result{
|
||||
RequeueAfter: requeueAfter,
|
||||
}, nil
|
||||
@@ -430,8 +438,7 @@ func (r *ApplicationSetReconciler) validateGeneratedApplications(ctx context.Con
|
||||
errorsByIndex[i] = fmt.Errorf("ApplicationSet %s contains applications with duplicate name: %s", applicationSetInfo.Name, app.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
proj, err := r.ArgoAppClientset.ArgoprojV1alpha1().AppProjects(r.ArgoCDNamespace).Get(ctx, app.Spec.GetProject(), metav1.GetOptions{})
|
||||
_, err := r.ArgoAppClientset.ArgoprojV1alpha1().AppProjects(r.ArgoCDNamespace).Get(ctx, app.Spec.GetProject(), metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if apierr.IsNotFound(err) {
|
||||
errorsByIndex[i] = fmt.Errorf("application references project %s which does not exist", app.Spec.Project)
|
||||
@@ -445,15 +452,6 @@ func (r *ApplicationSetReconciler) validateGeneratedApplications(ctx context.Con
|
||||
continue
|
||||
}
|
||||
|
||||
conditions, err := argoutil.ValidatePermissions(ctx, &app.Spec, proj, r.ArgoDB)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(conditions) > 0 {
|
||||
errorsByIndex[i] = fmt.Errorf("application spec is invalid: %s", argoutil.FormatAppConditions(conditions))
|
||||
continue
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return errorsByIndex, nil
|
||||
@@ -511,10 +509,6 @@ func (r *ApplicationSetReconciler) generateApplications(applicationSetInfo argov
|
||||
|
||||
for _, a := range t {
|
||||
tmplApplication := getTempApplication(a.Template)
|
||||
if tmplApplication.Labels == nil {
|
||||
tmplApplication.Labels = make(map[string]string)
|
||||
}
|
||||
tmplApplication.Labels[LabelKeyAppSetInstance] = applicationSetInfo.Name
|
||||
|
||||
for _, p := range a.Params {
|
||||
app, err := r.Renderer.RenderTemplateParams(tmplApplication, applicationSetInfo.Spec.SyncPolicy, p, applicationSetInfo.Spec.GoTemplate, applicationSetInfo.Spec.GoTemplateOptions)
|
||||
@@ -583,6 +577,25 @@ func (r *ApplicationSetReconciler) SetupWithManager(mgr ctrl.Manager, enableProg
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
func (r *ApplicationSetReconciler) updateCache(ctx context.Context, obj client.Object, logger *log.Entry) {
|
||||
informer, err := r.Cache.GetInformer(ctx, obj)
|
||||
if err != nil {
|
||||
logger.Errorf("failed to get informer: %v", err)
|
||||
return
|
||||
}
|
||||
// The controller runtime abstract away informers creation
|
||||
// so unfortunately could not find any other way to access informer store.
|
||||
k8sInformer, ok := informer.(k8scache.SharedInformer)
|
||||
if !ok {
|
||||
logger.Error("informer is not a kubernetes informer")
|
||||
return
|
||||
}
|
||||
if err := k8sInformer.GetStore().Update(obj); err != nil {
|
||||
logger.Errorf("failed to update cache: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// createOrUpdateInCluster will create / update application resources in the cluster.
|
||||
// - For new applications, it will call create
|
||||
// - For existing application, it will call update
|
||||
@@ -596,6 +609,9 @@ func (r *ApplicationSetReconciler) createOrUpdateInCluster(ctx context.Context,
|
||||
appLog := log.WithFields(log.Fields{"app": generatedApp.Name, "appSet": applicationSet.Name})
|
||||
generatedApp.Namespace = applicationSet.Namespace
|
||||
|
||||
// Normalize to avoid fighting with the application controller.
|
||||
generatedApp.Spec = *argoutil.NormalizeApplicationSpec(&generatedApp.Spec)
|
||||
|
||||
found := &argov1alpha1.Application{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: generatedApp.Name,
|
||||
@@ -607,7 +623,7 @@ func (r *ApplicationSetReconciler) createOrUpdateInCluster(ctx context.Context,
|
||||
},
|
||||
}
|
||||
|
||||
action, err := utils.CreateOrUpdate(ctx, r.Client, found, func() error {
|
||||
action, err := utils.CreateOrUpdate(ctx, appLog, r.Client, applicationSet.Spec.IgnoreApplicationDifferences, found, func() error {
|
||||
// Copy only the Application/ObjectMeta fields that are significant, from the generatedApp
|
||||
found.Spec = generatedApp.Spec
|
||||
|
||||
@@ -617,9 +633,21 @@ func (r *ApplicationSetReconciler) createOrUpdateInCluster(ctx context.Context,
|
||||
}
|
||||
|
||||
preservedAnnotations := make([]string, 0)
|
||||
preservedLabels := make([]string, 0)
|
||||
|
||||
if applicationSet.Spec.PreservedFields != nil {
|
||||
preservedAnnotations = append(preservedAnnotations, applicationSet.Spec.PreservedFields.Annotations...)
|
||||
preservedLabels = append(preservedLabels, applicationSet.Spec.PreservedFields.Labels...)
|
||||
}
|
||||
|
||||
if len(r.GlobalPreservedAnnotations) > 0 {
|
||||
preservedAnnotations = append(preservedAnnotations, r.GlobalPreservedAnnotations...)
|
||||
}
|
||||
|
||||
if len(r.GlobalPreservedLabels) > 0 {
|
||||
preservedLabels = append(preservedLabels, r.GlobalPreservedLabels...)
|
||||
}
|
||||
|
||||
// Preserve specially treated argo cd annotations:
|
||||
// * https://github.com/argoproj/applicationset/issues/180
|
||||
// * https://github.com/argoproj/argo-cd/issues/10500
|
||||
@@ -633,10 +661,21 @@ func (r *ApplicationSetReconciler) createOrUpdateInCluster(ctx context.Context,
|
||||
generatedApp.Annotations[key] = state
|
||||
}
|
||||
}
|
||||
|
||||
for _, key := range preservedLabels {
|
||||
if state, exists := found.ObjectMeta.Labels[key]; exists {
|
||||
if generatedApp.Labels == nil {
|
||||
generatedApp.Labels = map[string]string{}
|
||||
}
|
||||
generatedApp.Labels[key] = state
|
||||
}
|
||||
}
|
||||
|
||||
found.ObjectMeta.Annotations = generatedApp.Annotations
|
||||
|
||||
found.ObjectMeta.Finalizers = generatedApp.Finalizers
|
||||
found.ObjectMeta.Labels = generatedApp.Labels
|
||||
|
||||
return controllerutil.SetControllerReference(&applicationSet, found, r.Scheme)
|
||||
})
|
||||
|
||||
@@ -647,7 +686,7 @@ func (r *ApplicationSetReconciler) createOrUpdateInCluster(ctx context.Context,
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
r.updateCache(ctx, found, appLog)
|
||||
r.Recorder.Eventf(&applicationSet, corev1.EventTypeNormal, fmt.Sprint(action), "%s Application %q", action, generatedApp.Name)
|
||||
appLog.Logf(log.InfoLevel, "%s Application", action)
|
||||
}
|
||||
@@ -688,7 +727,7 @@ func (r *ApplicationSetReconciler) getCurrentApplications(_ context.Context, app
|
||||
err := r.Client.List(context.Background(), ¤t, client.MatchingFields{".metadata.controller": applicationSet.Name})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("error retrieving applications: %w", err)
|
||||
}
|
||||
|
||||
return current.Items, nil
|
||||
@@ -805,15 +844,21 @@ func (r *ApplicationSetReconciler) removeFinalizerOnInvalidDestination(ctx conte
|
||||
|
||||
// If the finalizer length changed (due to filtering out an Argo finalizer), update the finalizer list on the app
|
||||
if len(newFinalizers) != len(app.Finalizers) {
|
||||
app.Finalizers = newFinalizers
|
||||
updated := app.DeepCopy()
|
||||
updated.Finalizers = newFinalizers
|
||||
patch := client.MergeFrom(app)
|
||||
if log.IsLevelEnabled(log.DebugLevel) {
|
||||
utils.LogPatch(appLog, patch, updated)
|
||||
}
|
||||
if err := r.Client.Patch(ctx, updated, patch); err != nil {
|
||||
return fmt.Errorf("error updating finalizers: %w", err)
|
||||
}
|
||||
r.updateCache(ctx, updated, appLog)
|
||||
// Application must have updated list of finalizers
|
||||
updated.DeepCopyInto(app)
|
||||
|
||||
r.Recorder.Eventf(&applicationSet, corev1.EventTypeNormal, "Updated", "Updated Application %q finalizer before deletion, because application has an invalid destination", app.Name)
|
||||
appLog.Log(log.InfoLevel, "Updating application finalizer before deletion, because application has an invalid destination")
|
||||
|
||||
err := r.Client.Update(ctx, app, &client.UpdateOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error updating finalizers: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -19,18 +19,21 @@ import (
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
kubefake "k8s.io/client-go/kubernetes/fake"
|
||||
k8scache "k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/cache"
|
||||
crtclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/generators"
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/utils"
|
||||
"github.com/argoproj/gitops-engine/pkg/health"
|
||||
"github.com/argoproj/gitops-engine/pkg/sync/common"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/generators"
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/utils"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
appclientset "github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned/fake"
|
||||
"github.com/argoproj/argo-cd/v2/util/collections"
|
||||
@@ -39,6 +42,34 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application"
|
||||
)
|
||||
|
||||
type fakeStore struct {
|
||||
k8scache.Store
|
||||
}
|
||||
|
||||
func (f *fakeStore) Update(obj interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type fakeInformer struct {
|
||||
k8scache.SharedInformer
|
||||
}
|
||||
|
||||
func (f *fakeInformer) AddIndexers(indexers k8scache.Indexers) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *fakeInformer) GetStore() k8scache.Store {
|
||||
return &fakeStore{}
|
||||
}
|
||||
|
||||
type fakeCache struct {
|
||||
cache.Cache
|
||||
}
|
||||
|
||||
func (f *fakeCache) GetInformer(ctx context.Context, obj crtclient.Object) (cache.Informer, error) {
|
||||
return &fakeInformer{}, nil
|
||||
}
|
||||
|
||||
type generatorMock struct {
|
||||
mock.Mock
|
||||
}
|
||||
@@ -164,9 +195,6 @@ func TestExtractApplications(t *testing.T) {
|
||||
if cc.generateParamsError == nil {
|
||||
for _, p := range cc.params {
|
||||
|
||||
tmpApplication := getTempApplication(cc.template)
|
||||
tmpApplication.Labels[LabelKeyAppSetInstance] = appSet.Name
|
||||
|
||||
if cc.rendererError != nil {
|
||||
rendererMock.On("RenderTemplateParams", getTempApplication(cc.template), p, false, []string(nil)).
|
||||
Return(nil, cc.rendererError)
|
||||
@@ -187,6 +215,7 @@ func TestExtractApplications(t *testing.T) {
|
||||
},
|
||||
Renderer: &rendererMock,
|
||||
KubeClientset: kubefake.NewSimpleClientset(),
|
||||
Cache: &fakeCache{},
|
||||
}
|
||||
|
||||
got, reason, err := r.generateApplications(v1alpha1.ApplicationSet{
|
||||
@@ -288,21 +317,7 @@ func TestMergeTemplateApplications(t *testing.T) {
|
||||
|
||||
rendererMock := rendererMock{}
|
||||
|
||||
appSet := &v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "name",
|
||||
Namespace: "namespace",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Generators: []v1alpha1.ApplicationSetGenerator{generator},
|
||||
Template: cc.template,
|
||||
},
|
||||
}
|
||||
|
||||
tmpApplication := getTempApplication(cc.expectedMerged)
|
||||
tmpApplication.Labels[LabelKeyAppSetInstance] = appSet.Name
|
||||
|
||||
rendererMock.On("RenderTemplateParams", tmpApplication, cc.params[0], false, []string(nil)).
|
||||
rendererMock.On("RenderTemplateParams", getTempApplication(cc.expectedMerged), cc.params[0], false, []string(nil)).
|
||||
Return(&cc.expectedApps[0], nil)
|
||||
|
||||
r := ApplicationSetReconciler{
|
||||
@@ -316,7 +331,17 @@ func TestMergeTemplateApplications(t *testing.T) {
|
||||
KubeClientset: kubefake.NewSimpleClientset(),
|
||||
}
|
||||
|
||||
got, _, _ := r.generateApplications(*appSet)
|
||||
got, _, _ := r.generateApplications(v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "name",
|
||||
Namespace: "namespace",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Generators: []v1alpha1.ApplicationSetGenerator{generator},
|
||||
Template: cc.template,
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
assert.Equal(t, cc.expectedApps, got)
|
||||
})
|
||||
@@ -372,6 +397,7 @@ func TestCreateOrUpdateInCluster(t *testing.T) {
|
||||
Namespace: "namespace",
|
||||
ResourceVersion: "1",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{Project: "default"},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -899,6 +925,350 @@ func TestCreateOrUpdateInCluster(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
}, {
|
||||
name: "Ensure that the app spec is normalized before applying",
|
||||
appSet: v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "name",
|
||||
Namespace: "namespace",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "project",
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
Directory: &v1alpha1.ApplicationSourceDirectory{
|
||||
Jsonnet: v1alpha1.ApplicationSourceJsonnet{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
desiredApps: []v1alpha1.Application{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app1",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "project",
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
Directory: &v1alpha1.ApplicationSourceDirectory{
|
||||
Jsonnet: v1alpha1.ApplicationSourceJsonnet{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []v1alpha1.Application{
|
||||
{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Application",
|
||||
APIVersion: "argoproj.io/v1alpha1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app1",
|
||||
Namespace: "namespace",
|
||||
ResourceVersion: "1",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "project",
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
// Directory and jsonnet block are removed
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, {
|
||||
// For this use case: https://github.com/argoproj/argo-cd/issues/9101#issuecomment-1191138278
|
||||
name: "Ensure that ignored targetRevision difference doesn't cause an update, even if another field changes",
|
||||
appSet: v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "name",
|
||||
Namespace: "namespace",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
IgnoreApplicationDifferences: v1alpha1.ApplicationSetIgnoreDifferences{
|
||||
{JQPathExpressions: []string{".spec.source.targetRevision"}},
|
||||
},
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "project",
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
RepoURL: "https://git.example.com/test-org/test-repo.git",
|
||||
TargetRevision: "foo",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
existingApps: []v1alpha1.Application{
|
||||
{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Application",
|
||||
APIVersion: "argoproj.io/v1alpha1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app1",
|
||||
Namespace: "namespace",
|
||||
ResourceVersion: "2",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "project",
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
RepoURL: "https://git.example.com/test-org/test-repo.git",
|
||||
TargetRevision: "bar",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
desiredApps: []v1alpha1.Application{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app1",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "project",
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
RepoURL: "https://git.example.com/test-org/test-repo.git",
|
||||
// The targetRevision is ignored, so this should not be updated.
|
||||
TargetRevision: "foo",
|
||||
// This should be updated.
|
||||
Helm: &v1alpha1.ApplicationSourceHelm{
|
||||
Parameters: []v1alpha1.HelmParameter{
|
||||
{Name: "hi", Value: "there"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []v1alpha1.Application{
|
||||
{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Application",
|
||||
APIVersion: "argoproj.io/v1alpha1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app1",
|
||||
Namespace: "namespace",
|
||||
ResourceVersion: "3",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "project",
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
RepoURL: "https://git.example.com/test-org/test-repo.git",
|
||||
// This is the existing value from the cluster, which should not be updated because the field is ignored.
|
||||
TargetRevision: "bar",
|
||||
// This was missing on the cluster, so it should be added.
|
||||
Helm: &v1alpha1.ApplicationSourceHelm{
|
||||
Parameters: []v1alpha1.HelmParameter{
|
||||
{Name: "hi", Value: "there"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, {
|
||||
// For this use case: https://github.com/argoproj/argo-cd/pull/14743#issuecomment-1761954799
|
||||
name: "ignore parameters added to a multi-source app in the cluster",
|
||||
appSet: v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "name",
|
||||
Namespace: "namespace",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
IgnoreApplicationDifferences: v1alpha1.ApplicationSetIgnoreDifferences{
|
||||
{JQPathExpressions: []string{`.spec.sources[] | select(.repoURL | contains("test-repo")).helm.parameters`}},
|
||||
},
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "project",
|
||||
Sources: []v1alpha1.ApplicationSource{
|
||||
{
|
||||
RepoURL: "https://git.example.com/test-org/test-repo.git",
|
||||
Helm: &v1alpha1.ApplicationSourceHelm{
|
||||
Values: "foo: bar",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
existingApps: []v1alpha1.Application{
|
||||
{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Application",
|
||||
APIVersion: "argoproj.io/v1alpha1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app1",
|
||||
Namespace: "namespace",
|
||||
ResourceVersion: "2",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "project",
|
||||
Sources: []v1alpha1.ApplicationSource{
|
||||
{
|
||||
RepoURL: "https://git.example.com/test-org/test-repo.git",
|
||||
Helm: &v1alpha1.ApplicationSourceHelm{
|
||||
Values: "foo: bar",
|
||||
Parameters: []v1alpha1.HelmParameter{
|
||||
{Name: "hi", Value: "there"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
desiredApps: []v1alpha1.Application{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app1",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "project",
|
||||
Sources: []v1alpha1.ApplicationSource{
|
||||
{
|
||||
RepoURL: "https://git.example.com/test-org/test-repo.git",
|
||||
Helm: &v1alpha1.ApplicationSourceHelm{
|
||||
Values: "foo: bar",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []v1alpha1.Application{
|
||||
{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Application",
|
||||
APIVersion: "argoproj.io/v1alpha1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app1",
|
||||
Namespace: "namespace",
|
||||
// This should not be updated, because reconciliation shouldn't modify the App.
|
||||
ResourceVersion: "2",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "project",
|
||||
Sources: []v1alpha1.ApplicationSource{
|
||||
{
|
||||
RepoURL: "https://git.example.com/test-org/test-repo.git",
|
||||
Helm: &v1alpha1.ApplicationSourceHelm{
|
||||
Values: "foo: bar",
|
||||
Parameters: []v1alpha1.HelmParameter{
|
||||
// This existed only in the cluster, but it shouldn't be removed, because the field is ignored.
|
||||
{Name: "hi", Value: "there"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, {
|
||||
name: "Demonstrate limitation of MergePatch", // Maybe we can fix this in Argo CD 3.0: https://github.com/argoproj/argo-cd/issues/15975
|
||||
appSet: v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "name",
|
||||
Namespace: "namespace",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
IgnoreApplicationDifferences: v1alpha1.ApplicationSetIgnoreDifferences{
|
||||
{JQPathExpressions: []string{`.spec.sources[] | select(.repoURL | contains("test-repo")).helm.parameters`}},
|
||||
},
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "project",
|
||||
Sources: []v1alpha1.ApplicationSource{
|
||||
{
|
||||
RepoURL: "https://git.example.com/test-org/test-repo.git",
|
||||
Helm: &v1alpha1.ApplicationSourceHelm{
|
||||
Values: "new: values",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
existingApps: []v1alpha1.Application{
|
||||
{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Application",
|
||||
APIVersion: "argoproj.io/v1alpha1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app1",
|
||||
Namespace: "namespace",
|
||||
ResourceVersion: "2",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "project",
|
||||
Sources: []v1alpha1.ApplicationSource{
|
||||
{
|
||||
RepoURL: "https://git.example.com/test-org/test-repo.git",
|
||||
Helm: &v1alpha1.ApplicationSourceHelm{
|
||||
Values: "foo: bar",
|
||||
Parameters: []v1alpha1.HelmParameter{
|
||||
{Name: "hi", Value: "there"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
desiredApps: []v1alpha1.Application{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app1",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "project",
|
||||
Sources: []v1alpha1.ApplicationSource{
|
||||
{
|
||||
RepoURL: "https://git.example.com/test-org/test-repo.git",
|
||||
Helm: &v1alpha1.ApplicationSourceHelm{
|
||||
Values: "new: values",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []v1alpha1.Application{
|
||||
{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Application",
|
||||
APIVersion: "argoproj.io/v1alpha1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app1",
|
||||
Namespace: "namespace",
|
||||
ResourceVersion: "3",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "project",
|
||||
Sources: []v1alpha1.ApplicationSource{
|
||||
{
|
||||
RepoURL: "https://git.example.com/test-org/test-repo.git",
|
||||
Helm: &v1alpha1.ApplicationSourceHelm{
|
||||
Values: "new: values",
|
||||
// The Parameters field got blown away, because the values field changed. MergePatch
|
||||
// doesn't merge list items, it replaces the whole list if an item changes.
|
||||
// If we eventually add a `name` field to Sources, we can use StrategicMergePatch.
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
} {
|
||||
|
||||
@@ -918,6 +1288,7 @@ func TestCreateOrUpdateInCluster(t *testing.T) {
|
||||
Client: client,
|
||||
Scheme: scheme,
|
||||
Recorder: record.NewFakeRecorder(len(initObjs) + len(c.expected)),
|
||||
Cache: &fakeCache{},
|
||||
}
|
||||
|
||||
err = r.createOrUpdateInCluster(context.TODO(), c.appSet, c.desiredApps)
|
||||
@@ -931,7 +1302,6 @@ func TestCreateOrUpdateInCluster(t *testing.T) {
|
||||
}, got)
|
||||
|
||||
err = controllerutil.SetControllerReference(&c.appSet, &obj, r.Scheme)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, obj, *got)
|
||||
}
|
||||
})
|
||||
@@ -1031,6 +1401,7 @@ func TestRemoveFinalizerOnInvalidDestination_FinalizerTypes(t *testing.T) {
|
||||
Scheme: scheme,
|
||||
Recorder: record.NewFakeRecorder(10),
|
||||
KubeClientset: kubeclientset,
|
||||
Cache: &fakeCache{},
|
||||
}
|
||||
//settingsMgr := settings.NewSettingsManager(context.TODO(), kubeclientset, "namespace")
|
||||
//argoDB := db.NewDB("namespace", settingsMgr, r.KubeClientset)
|
||||
@@ -1192,6 +1563,7 @@ func TestRemoveFinalizerOnInvalidDestination_DestinationTypes(t *testing.T) {
|
||||
Scheme: scheme,
|
||||
Recorder: record.NewFakeRecorder(10),
|
||||
KubeClientset: kubeclientset,
|
||||
Cache: &fakeCache{},
|
||||
}
|
||||
// settingsMgr := settings.NewSettingsManager(context.TODO(), kubeclientset, "argocd")
|
||||
// argoDB := db.NewDB("argocd", settingsMgr, r.KubeClientset)
|
||||
@@ -1230,13 +1602,15 @@ func TestCreateApplications(t *testing.T) {
|
||||
err = v1alpha1.AddToScheme(scheme)
|
||||
assert.Nil(t, err)
|
||||
|
||||
for _, c := range []struct {
|
||||
testCases := []struct {
|
||||
name string
|
||||
appSet v1alpha1.ApplicationSet
|
||||
existsApps []v1alpha1.Application
|
||||
apps []v1alpha1.Application
|
||||
expected []v1alpha1.Application
|
||||
}{
|
||||
{
|
||||
name: "no existing apps",
|
||||
appSet: v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "name",
|
||||
@@ -1262,10 +1636,14 @@ func TestCreateApplications(t *testing.T) {
|
||||
Namespace: "namespace",
|
||||
ResourceVersion: "1",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "default",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "existing apps",
|
||||
appSet: v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "name",
|
||||
@@ -1323,6 +1701,7 @@ func TestCreateApplications(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "existing apps with different project",
|
||||
appSet: v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "name",
|
||||
@@ -1379,39 +1758,43 @@ func TestCreateApplications(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
} {
|
||||
initObjs := []crtclient.Object{&c.appSet}
|
||||
for _, a := range c.existsApps {
|
||||
err = controllerutil.SetControllerReference(&c.appSet, &a, scheme)
|
||||
assert.Nil(t, err)
|
||||
initObjs = append(initObjs, &a)
|
||||
}
|
||||
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(initObjs...).Build()
|
||||
|
||||
r := ApplicationSetReconciler{
|
||||
Client: client,
|
||||
Scheme: scheme,
|
||||
Recorder: record.NewFakeRecorder(len(initObjs) + len(c.expected)),
|
||||
}
|
||||
|
||||
err = r.createInCluster(context.TODO(), c.appSet, c.apps)
|
||||
assert.Nil(t, err)
|
||||
|
||||
for _, obj := range c.expected {
|
||||
got := &v1alpha1.Application{}
|
||||
_ = client.Get(context.Background(), crtclient.ObjectKey{
|
||||
Namespace: obj.Namespace,
|
||||
Name: obj.Name,
|
||||
}, got)
|
||||
|
||||
err = controllerutil.SetControllerReference(&c.appSet, &obj, r.Scheme)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.Equal(t, obj, *got)
|
||||
}
|
||||
}
|
||||
|
||||
for _, c := range testCases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
initObjs := []crtclient.Object{&c.appSet}
|
||||
for _, a := range c.existsApps {
|
||||
err = controllerutil.SetControllerReference(&c.appSet, &a, scheme)
|
||||
assert.Nil(t, err)
|
||||
initObjs = append(initObjs, &a)
|
||||
}
|
||||
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(initObjs...).Build()
|
||||
|
||||
r := ApplicationSetReconciler{
|
||||
Client: client,
|
||||
Scheme: scheme,
|
||||
Recorder: record.NewFakeRecorder(len(initObjs) + len(c.expected)),
|
||||
Cache: &fakeCache{},
|
||||
}
|
||||
|
||||
err = r.createInCluster(context.TODO(), c.appSet, c.apps)
|
||||
assert.Nil(t, err)
|
||||
|
||||
for _, obj := range c.expected {
|
||||
got := &v1alpha1.Application{}
|
||||
_ = client.Get(context.Background(), crtclient.ObjectKey{
|
||||
Namespace: obj.Namespace,
|
||||
Name: obj.Name,
|
||||
}, got)
|
||||
|
||||
err = controllerutil.SetControllerReference(&c.appSet, &obj, r.Scheme)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.Equal(t, obj, *got)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeleteInCluster(t *testing.T) {
|
||||
@@ -1600,6 +1983,7 @@ func TestGetMinRequeueAfter(t *testing.T) {
|
||||
Client: client,
|
||||
Scheme: scheme,
|
||||
Recorder: record.NewFakeRecorder(0),
|
||||
Cache: &fakeCache{},
|
||||
Generators: map[string]generators.Generator{
|
||||
"List": &generatorMock10,
|
||||
"Git": &generatorMock1,
|
||||
@@ -1813,6 +2197,7 @@ func TestValidateGeneratedApplications(t *testing.T) {
|
||||
Client: client,
|
||||
Scheme: scheme,
|
||||
Recorder: record.NewFakeRecorder(1),
|
||||
Cache: &fakeCache{},
|
||||
Generators: map[string]generators.Generator{},
|
||||
ArgoDB: &argoDBMock,
|
||||
ArgoCDNamespace: "namespace",
|
||||
@@ -1855,7 +2240,7 @@ func TestValidateGeneratedApplications(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestReconcilerValidationErrorBehaviour(t *testing.T) {
|
||||
func TestReconcilerValidationProjectErrorBehaviour(t *testing.T) {
|
||||
|
||||
scheme := runtime.NewScheme()
|
||||
err := v1alpha1.AddToScheme(scheme)
|
||||
@@ -1863,9 +2248,8 @@ func TestReconcilerValidationErrorBehaviour(t *testing.T) {
|
||||
err = v1alpha1.AddToScheme(scheme)
|
||||
assert.Nil(t, err)
|
||||
|
||||
defaultProject := v1alpha1.AppProject{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "default", Namespace: "argocd"},
|
||||
Spec: v1alpha1.AppProjectSpec{SourceRepos: []string{"*"}, Destinations: []v1alpha1.ApplicationDestination{{Namespace: "*", Server: "https://good-cluster"}}},
|
||||
project := v1alpha1.AppProject{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "good-project", Namespace: "argocd"},
|
||||
}
|
||||
appSet := v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -1878,22 +2262,22 @@ func TestReconcilerValidationErrorBehaviour(t *testing.T) {
|
||||
{
|
||||
List: &v1alpha1.ListGenerator{
|
||||
Elements: []apiextensionsv1.JSON{{
|
||||
Raw: []byte(`{"cluster": "good-cluster","url": "https://good-cluster"}`),
|
||||
Raw: []byte(`{"project": "good-project"}`),
|
||||
}, {
|
||||
Raw: []byte(`{"cluster": "bad-cluster","url": "https://bad-cluster"}`),
|
||||
Raw: []byte(`{"project": "bad-project"}`),
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
Template: v1alpha1.ApplicationSetTemplate{
|
||||
ApplicationSetTemplateMeta: v1alpha1.ApplicationSetTemplateMeta{
|
||||
Name: "{{.cluster}}",
|
||||
Name: "{{.project}}",
|
||||
Namespace: "argocd",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Source: &v1alpha1.ApplicationSource{RepoURL: "https://github.com/argoproj/argocd-example-apps", Path: "guestbook"},
|
||||
Project: "default",
|
||||
Destination: v1alpha1.ApplicationDestination{Server: "{{.url}}"},
|
||||
Project: "{{.project}}",
|
||||
Destination: v1alpha1.ApplicationDestination{Server: "https://kubernetes.default.svc"},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -1901,22 +2285,15 @@ func TestReconcilerValidationErrorBehaviour(t *testing.T) {
|
||||
|
||||
kubeclientset := kubefake.NewSimpleClientset()
|
||||
argoDBMock := dbmocks.ArgoDB{}
|
||||
argoObjs := []runtime.Object{&defaultProject}
|
||||
argoObjs := []runtime.Object{&project}
|
||||
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet).Build()
|
||||
goodCluster := v1alpha1.Cluster{Server: "https://good-cluster", Name: "good-cluster"}
|
||||
badCluster := v1alpha1.Cluster{Server: "https://bad-cluster", Name: "bad-cluster"}
|
||||
argoDBMock.On("GetCluster", mock.Anything, "https://good-cluster").Return(&goodCluster, nil)
|
||||
argoDBMock.On("GetCluster", mock.Anything, "https://bad-cluster").Return(&badCluster, nil)
|
||||
argoDBMock.On("ListClusters", mock.Anything).Return(&v1alpha1.ClusterList{Items: []v1alpha1.Cluster{
|
||||
goodCluster,
|
||||
}}, nil)
|
||||
|
||||
r := ApplicationSetReconciler{
|
||||
Client: client,
|
||||
Scheme: scheme,
|
||||
Renderer: &utils.Render{},
|
||||
Recorder: record.NewFakeRecorder(1),
|
||||
Cache: &fakeCache{},
|
||||
Generators: map[string]generators.Generator{
|
||||
"List": generators.NewListGenerator(),
|
||||
},
|
||||
@@ -1937,17 +2314,17 @@ func TestReconcilerValidationErrorBehaviour(t *testing.T) {
|
||||
// Verify that on validation error, no error is returned, but the object is requeued
|
||||
res, err := r.Reconcile(context.Background(), req)
|
||||
assert.Nil(t, err)
|
||||
assert.True(t, res.RequeueAfter == 0)
|
||||
assert.True(t, res.RequeueAfter == ReconcileRequeueOnValidationError)
|
||||
|
||||
var app v1alpha1.Application
|
||||
|
||||
// make sure good app got created
|
||||
err = r.Client.Get(context.TODO(), crtclient.ObjectKey{Namespace: "argocd", Name: "good-cluster"}, &app)
|
||||
err = r.Client.Get(context.TODO(), crtclient.ObjectKey{Namespace: "argocd", Name: "good-project"}, &app)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, app.Name, "good-cluster")
|
||||
assert.Equal(t, app.Name, "good-project")
|
||||
|
||||
// make sure bad app was not created
|
||||
err = r.Client.Get(context.TODO(), crtclient.ObjectKey{Namespace: "argocd", Name: "bad-cluster"}, &app)
|
||||
err = r.Client.Get(context.TODO(), crtclient.ObjectKey{Namespace: "argocd", Name: "bad-project"}, &app)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
@@ -1993,6 +2370,7 @@ func TestSetApplicationSetStatusCondition(t *testing.T) {
|
||||
Scheme: scheme,
|
||||
Renderer: &utils.Render{},
|
||||
Recorder: record.NewFakeRecorder(1),
|
||||
Cache: &fakeCache{},
|
||||
Generators: map[string]generators.Generator{
|
||||
"List": generators.NewListGenerator(),
|
||||
},
|
||||
@@ -2067,6 +2445,7 @@ func applicationsUpdateSyncPolicyTest(t *testing.T, applicationsSyncPolicy v1alp
|
||||
Scheme: scheme,
|
||||
Renderer: &utils.Render{},
|
||||
Recorder: record.NewFakeRecorder(recordBuffer),
|
||||
Cache: &fakeCache{},
|
||||
Generators: map[string]generators.Generator{
|
||||
"List": generators.NewListGenerator(),
|
||||
},
|
||||
@@ -2103,7 +2482,7 @@ func applicationsUpdateSyncPolicyTest(t *testing.T, applicationsSyncPolicy v1alp
|
||||
assert.Nil(t, err)
|
||||
|
||||
retrievedApplicationSet.Spec.Template.Annotations = map[string]string{"annotation-key": "annotation-value"}
|
||||
retrievedApplicationSet.Spec.Template.Labels = map[string]string{"argocd.argoproj.io/application-set-name": "name", "label-key": "label-value"}
|
||||
retrievedApplicationSet.Spec.Template.Labels = map[string]string{"label-key": "label-value"}
|
||||
|
||||
retrievedApplicationSet.Spec.Template.Spec.Source.Helm = &v1alpha1.ApplicationSourceHelm{
|
||||
Values: "global.test: test",
|
||||
@@ -2131,7 +2510,6 @@ func TestUpdateNotPerformedWithSyncPolicyCreateOnly(t *testing.T) {
|
||||
|
||||
assert.Nil(t, app.Spec.Source.Helm)
|
||||
assert.Nil(t, app.ObjectMeta.Annotations)
|
||||
assert.Equal(t, map[string]string{"argocd.argoproj.io/application-set-name": "name"}, app.ObjectMeta.Labels)
|
||||
}
|
||||
|
||||
func TestUpdateNotPerformedWithSyncPolicyCreateDelete(t *testing.T) {
|
||||
@@ -2142,7 +2520,6 @@ func TestUpdateNotPerformedWithSyncPolicyCreateDelete(t *testing.T) {
|
||||
|
||||
assert.Nil(t, app.Spec.Source.Helm)
|
||||
assert.Nil(t, app.ObjectMeta.Annotations)
|
||||
assert.Equal(t, map[string]string{"argocd.argoproj.io/application-set-name": "name"}, app.ObjectMeta.Labels)
|
||||
}
|
||||
|
||||
func TestUpdatePerformedWithSyncPolicyCreateUpdate(t *testing.T) {
|
||||
@@ -2153,7 +2530,7 @@ func TestUpdatePerformedWithSyncPolicyCreateUpdate(t *testing.T) {
|
||||
|
||||
assert.Equal(t, "global.test: test", app.Spec.Source.Helm.Values)
|
||||
assert.Equal(t, map[string]string{"annotation-key": "annotation-value"}, app.ObjectMeta.Annotations)
|
||||
assert.Equal(t, map[string]string{"argocd.argoproj.io/application-set-name": "name", "label-key": "label-value"}, app.ObjectMeta.Labels)
|
||||
assert.Equal(t, map[string]string{"label-key": "label-value"}, app.ObjectMeta.Labels)
|
||||
}
|
||||
|
||||
func TestUpdatePerformedWithSyncPolicySync(t *testing.T) {
|
||||
@@ -2164,7 +2541,7 @@ func TestUpdatePerformedWithSyncPolicySync(t *testing.T) {
|
||||
|
||||
assert.Equal(t, "global.test: test", app.Spec.Source.Helm.Values)
|
||||
assert.Equal(t, map[string]string{"annotation-key": "annotation-value"}, app.ObjectMeta.Annotations)
|
||||
assert.Equal(t, map[string]string{"argocd.argoproj.io/application-set-name": "name", "label-key": "label-value"}, app.ObjectMeta.Labels)
|
||||
assert.Equal(t, map[string]string{"label-key": "label-value"}, app.ObjectMeta.Labels)
|
||||
}
|
||||
|
||||
func TestUpdatePerformedWithSyncPolicyCreateOnlyAndAllowPolicyOverrideFalse(t *testing.T) {
|
||||
@@ -2175,7 +2552,7 @@ func TestUpdatePerformedWithSyncPolicyCreateOnlyAndAllowPolicyOverrideFalse(t *t
|
||||
|
||||
assert.Equal(t, "global.test: test", app.Spec.Source.Helm.Values)
|
||||
assert.Equal(t, map[string]string{"annotation-key": "annotation-value"}, app.ObjectMeta.Annotations)
|
||||
assert.Equal(t, map[string]string{"argocd.argoproj.io/application-set-name": "name", "label-key": "label-value"}, app.ObjectMeta.Labels)
|
||||
assert.Equal(t, map[string]string{"label-key": "label-value"}, app.ObjectMeta.Labels)
|
||||
}
|
||||
|
||||
func applicationsDeleteSyncPolicyTest(t *testing.T, applicationsSyncPolicy v1alpha1.ApplicationsSyncPolicy, recordBuffer int, allowPolicyOverride bool) v1alpha1.ApplicationList {
|
||||
@@ -2238,6 +2615,7 @@ func applicationsDeleteSyncPolicyTest(t *testing.T, applicationsSyncPolicy v1alp
|
||||
Scheme: scheme,
|
||||
Renderer: &utils.Render{},
|
||||
Recorder: record.NewFakeRecorder(recordBuffer),
|
||||
Cache: &fakeCache{},
|
||||
Generators: map[string]generators.Generator{
|
||||
"List": generators.NewListGenerator(),
|
||||
},
|
||||
@@ -2383,8 +2761,7 @@ func TestGenerateAppsUsingPullRequestGenerator(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "AppSet-branch1-1",
|
||||
Labels: map[string]string{
|
||||
"app1": "label1",
|
||||
LabelKeyAppSetInstance: "",
|
||||
"app1": "label1",
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
@@ -2419,6 +2796,7 @@ func TestGenerateAppsUsingPullRequestGenerator(t *testing.T) {
|
||||
Client: client,
|
||||
Scheme: scheme,
|
||||
Recorder: record.NewFakeRecorder(1),
|
||||
Cache: &fakeCache{},
|
||||
Generators: map[string]generators.Generator{
|
||||
"PullRequest": &generatorMock,
|
||||
},
|
||||
@@ -2543,6 +2921,7 @@ func TestPolicies(t *testing.T) {
|
||||
Scheme: scheme,
|
||||
Renderer: &utils.Render{},
|
||||
Recorder: record.NewFakeRecorder(10),
|
||||
Cache: &fakeCache{},
|
||||
Generators: map[string]generators.Generator{
|
||||
"List": generators.NewListGenerator(),
|
||||
},
|
||||
@@ -2705,6 +3084,7 @@ func TestSetApplicationSetApplicationStatus(t *testing.T) {
|
||||
Scheme: scheme,
|
||||
Renderer: &utils.Render{},
|
||||
Recorder: record.NewFakeRecorder(1),
|
||||
Cache: &fakeCache{},
|
||||
Generators: map[string]generators.Generator{
|
||||
"List": generators.NewListGenerator(),
|
||||
},
|
||||
@@ -3469,6 +3849,7 @@ func TestBuildAppDependencyList(t *testing.T) {
|
||||
Client: client,
|
||||
Scheme: scheme,
|
||||
Recorder: record.NewFakeRecorder(1),
|
||||
Cache: &fakeCache{},
|
||||
Generators: map[string]generators.Generator{},
|
||||
ArgoDB: &argoDBMock,
|
||||
ArgoAppClientset: appclientset.NewSimpleClientset(argoObjs...),
|
||||
@@ -4062,6 +4443,7 @@ func TestBuildAppSyncMap(t *testing.T) {
|
||||
Client: client,
|
||||
Scheme: scheme,
|
||||
Recorder: record.NewFakeRecorder(1),
|
||||
Cache: &fakeCache{},
|
||||
Generators: map[string]generators.Generator{},
|
||||
ArgoDB: &argoDBMock,
|
||||
ArgoAppClientset: appclientset.NewSimpleClientset(argoObjs...),
|
||||
@@ -4721,6 +5103,7 @@ func TestUpdateApplicationSetApplicationStatus(t *testing.T) {
|
||||
Client: client,
|
||||
Scheme: scheme,
|
||||
Recorder: record.NewFakeRecorder(1),
|
||||
Cache: &fakeCache{},
|
||||
Generators: map[string]generators.Generator{},
|
||||
ArgoDB: &argoDBMock,
|
||||
ArgoAppClientset: appclientset.NewSimpleClientset(argoObjs...),
|
||||
@@ -5474,6 +5857,7 @@ func TestUpdateApplicationSetApplicationStatusProgress(t *testing.T) {
|
||||
Client: client,
|
||||
Scheme: scheme,
|
||||
Recorder: record.NewFakeRecorder(1),
|
||||
Cache: &fakeCache{},
|
||||
Generators: map[string]generators.Generator{},
|
||||
ArgoDB: &argoDBMock,
|
||||
ArgoAppClientset: appclientset.NewSimpleClientset(argoObjs...),
|
||||
|
||||
@@ -5,9 +5,6 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/generators"
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/services/mocks"
|
||||
argov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
"github.com/stretchr/testify/assert"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
@@ -17,6 +14,10 @@ import (
|
||||
kubefake "k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/generators"
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/services/mocks"
|
||||
argov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
)
|
||||
|
||||
func TestRequeueAfter(t *testing.T) {
|
||||
@@ -59,9 +60,9 @@ func TestRequeueAfter(t *testing.T) {
|
||||
"List": generators.NewListGenerator(),
|
||||
"Clusters": generators.NewClusterGenerator(k8sClient, ctx, appClientset, "argocd"),
|
||||
"Git": generators.NewGitGenerator(mockServer),
|
||||
"SCMProvider": generators.NewSCMProviderGenerator(fake.NewClientBuilder().WithObjects(&corev1.Secret{}).Build(), generators.SCMAuthProviders{}),
|
||||
"SCMProvider": generators.NewSCMProviderGenerator(fake.NewClientBuilder().WithObjects(&corev1.Secret{}).Build(), generators.SCMAuthProviders{}, "", []string{""}),
|
||||
"ClusterDecisionResource": generators.NewDuckTypeGenerator(ctx, fakeDynClient, appClientset, "argocd"),
|
||||
"PullRequest": generators.NewPullRequestGenerator(k8sClient, generators.SCMAuthProviders{}),
|
||||
"PullRequest": generators.NewPullRequestGenerator(k8sClient, generators.SCMAuthProviders{}, "", []string{""}),
|
||||
}
|
||||
|
||||
nestedGenerators := map[string]generators.Generator{
|
||||
|
||||
@@ -0,0 +1,26 @@
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: guestbook
|
||||
spec:
|
||||
generators:
|
||||
- scmProvider:
|
||||
gitlab:
|
||||
api: https://gitlab.com
|
||||
group: test-argocd-proton
|
||||
includeSubgroups: true
|
||||
cloneProtocol: https
|
||||
filters:
|
||||
- repositoryMatch: test-app
|
||||
template:
|
||||
metadata:
|
||||
name: '{{ repository }}-guestbook'
|
||||
spec:
|
||||
project: "default"
|
||||
source:
|
||||
repoURL: '{{ url }}'
|
||||
targetRevision: '{{ branch }}'
|
||||
path: guestbook
|
||||
destination:
|
||||
server: https://kubernetes.default.svc
|
||||
namespace: guestbook
|
||||
@@ -61,8 +61,7 @@ func (g *ClusterGenerator) GetTemplate(appSetGenerator *argoappsetv1alpha1.Appli
|
||||
return &appSetGenerator.Clusters.Template
|
||||
}
|
||||
|
||||
func (g *ClusterGenerator) GenerateParams(
|
||||
appSetGenerator *argoappsetv1alpha1.ApplicationSetGenerator, appSet *argoappsetv1alpha1.ApplicationSet) ([]map[string]interface{}, error) {
|
||||
func (g *ClusterGenerator) GenerateParams(appSetGenerator *argoappsetv1alpha1.ApplicationSetGenerator, appSet *argoappsetv1alpha1.ApplicationSet) ([]map[string]interface{}, error) {
|
||||
|
||||
if appSetGenerator == nil {
|
||||
return nil, EmptyAppSetGeneratorError
|
||||
@@ -79,7 +78,7 @@ func (g *ClusterGenerator) GenerateParams(
|
||||
// ListCluster from Argo CD's util/db package will include the local cluster in the list of clusters
|
||||
clustersFromArgoCD, err := utils.ListClusters(g.ctx, g.clientset, g.namespace)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("error listing clusters: %w", err)
|
||||
}
|
||||
|
||||
if clustersFromArgoCD == nil {
|
||||
|
||||
@@ -74,7 +74,7 @@ func (g *DuckTypeGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha1.A
|
||||
// ListCluster from Argo CD's util/db package will include the local cluster in the list of clusters
|
||||
clustersFromArgoCD, err := utils.ListClusters(g.ctx, g.clientset, g.namespace)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("error listing clusters: %w", err)
|
||||
}
|
||||
|
||||
if clustersFromArgoCD == nil {
|
||||
@@ -85,7 +85,7 @@ func (g *DuckTypeGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha1.A
|
||||
cm, err := g.clientset.CoreV1().ConfigMaps(g.namespace).Get(g.ctx, appSetGenerator.ClusterDecisionResource.ConfigMapRef, metav1.GetOptions{})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("error reading configMapRef: %w", err)
|
||||
}
|
||||
|
||||
// Extract GVK data for the dynamic client to use
|
||||
|
||||
@@ -3,6 +3,7 @@ package generators
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
@@ -15,8 +16,6 @@ import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
|
||||
"testing"
|
||||
)
|
||||
|
||||
const resourceApiVersion = "mallard.io/v1"
|
||||
|
||||
@@ -4,9 +4,10 @@ import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/utils"
|
||||
"github.com/jeremywohl/flatten"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/utils"
|
||||
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
@@ -124,7 +125,7 @@ func GetRelevantGenerators(requestedGenerator *argoprojiov1alpha1.ApplicationSet
|
||||
func flattenParameters(in map[string]interface{}) (map[string]string, error) {
|
||||
flat, err := flatten.Flatten(in, "", flatten.DotStyle)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("error flatenning parameters: %w", err)
|
||||
}
|
||||
|
||||
out := make(map[string]string, len(flat))
|
||||
@@ -152,7 +153,7 @@ func InterpolateGenerator(requestedGenerator *argoprojiov1alpha1.ApplicationSetG
|
||||
interpolatedGenerator, err := render.RenderGeneratorParams(requestedGenerator, params, useGoTemplate, goTemplateOptions)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("interpolatedGenerator", interpolatedGenerator).Error("error interpolating generator with other generator's parameter")
|
||||
return *interpolatedGenerator, err
|
||||
return argoprojiov1alpha1.ApplicationSetGenerator{}, err
|
||||
}
|
||||
|
||||
return *interpolatedGenerator, nil
|
||||
|
||||
@@ -4,13 +4,14 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/services/mocks"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/services/mocks"
|
||||
|
||||
argov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
|
||||
"github.com/stretchr/testify/mock"
|
||||
@@ -500,3 +501,60 @@ func TestInterpolateGenerator_go(t *testing.T) {
|
||||
assert.Equal(t, "production_01/west", interpolatedGenerator.Git.Files[0].Path)
|
||||
assert.Equal(t, "https://production-01.example.com", interpolatedGenerator.Git.Files[1].Path)
|
||||
}
|
||||
|
||||
func TestInterpolateGeneratorError(t *testing.T) {
|
||||
type args struct {
|
||||
requestedGenerator *argov1alpha1.ApplicationSetGenerator
|
||||
params map[string]interface{}
|
||||
useGoTemplate bool
|
||||
goTemplateOptions []string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want argov1alpha1.ApplicationSetGenerator
|
||||
expectedErrStr string
|
||||
}{
|
||||
{name: "Empty Gen", args: args{
|
||||
requestedGenerator: nil,
|
||||
params: nil,
|
||||
useGoTemplate: false,
|
||||
goTemplateOptions: nil,
|
||||
}, want: argov1alpha1.ApplicationSetGenerator{}, expectedErrStr: "generator is empty"},
|
||||
{name: "No Params", args: args{
|
||||
requestedGenerator: &argov1alpha1.ApplicationSetGenerator{},
|
||||
params: map[string]interface{}{},
|
||||
useGoTemplate: false,
|
||||
goTemplateOptions: nil,
|
||||
}, want: argov1alpha1.ApplicationSetGenerator{}, expectedErrStr: ""},
|
||||
{name: "Error templating", args: args{
|
||||
requestedGenerator: &argov1alpha1.ApplicationSetGenerator{Git: &argov1alpha1.GitGenerator{
|
||||
RepoURL: "foo",
|
||||
Files: []argov1alpha1.GitFileGeneratorItem{{Path: "bar/"}},
|
||||
Revision: "main",
|
||||
Values: map[string]string{
|
||||
"git_test": "{{ toPrettyJson . }}",
|
||||
"selection": "{{ default .override .test }}",
|
||||
"resolved": "{{ index .rmap (default .override .test) }}",
|
||||
},
|
||||
}},
|
||||
params: map[string]interface{}{
|
||||
"name": "in-cluster",
|
||||
"override": "foo",
|
||||
},
|
||||
useGoTemplate: true,
|
||||
goTemplateOptions: []string{},
|
||||
}, want: argov1alpha1.ApplicationSetGenerator{}, expectedErrStr: "failed to replace parameters in generator: failed to execute go template {{ index .rmap (default .override .test) }}: template: :1:3: executing \"\" at <index .rmap (default .override .test)>: error calling index: index of untyped nil"},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := InterpolateGenerator(tt.args.requestedGenerator, tt.args.params, tt.args.useGoTemplate, tt.args.goTemplateOptions)
|
||||
if tt.expectedErrStr != "" {
|
||||
assert.EqualError(t, err, tt.expectedErrStr)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
assert.Equalf(t, tt.want, got, "InterpolateGenerator(%v, %v, %v, %v)", tt.args.requestedGenerator, tt.args.params, tt.args.useGoTemplate, tt.args.goTemplateOptions)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -66,7 +66,7 @@ func (g *GitGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha1.Applic
|
||||
return nil, EmptyAppSetGeneratorError
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("error generating params from git: %w", err)
|
||||
}
|
||||
|
||||
return res, nil
|
||||
@@ -77,7 +77,7 @@ func (g *GitGenerator) generateParamsForGitDirectories(appSetGenerator *argoproj
|
||||
// Directories, not files
|
||||
allPaths, err := g.repos.GetDirectories(context.TODO(), appSetGenerator.Git.RepoURL, appSetGenerator.Git.Revision)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("error getting directories from repo: %w", err)
|
||||
}
|
||||
|
||||
log.WithFields(log.Fields{
|
||||
@@ -92,7 +92,7 @@ func (g *GitGenerator) generateParamsForGitDirectories(appSetGenerator *argoproj
|
||||
|
||||
res, err := g.generateParamsFromApps(requestedApps, appSetGenerator, useGoTemplate, goTemplateOptions)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to generate params from apps: %w", err)
|
||||
return nil, fmt.Errorf("error generating params from apps: %w", err)
|
||||
}
|
||||
|
||||
return res, nil
|
||||
@@ -177,7 +177,7 @@ func (g *GitGenerator) generateParamsFromGitFile(filePath string, fileContent []
|
||||
} else {
|
||||
flat, err := flatten.Flatten(objectFound, "", flatten.DotStyle)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("error flattening object: %w", err)
|
||||
}
|
||||
for k, v := range flat {
|
||||
params[k] = fmt.Sprintf("%v", v)
|
||||
|
||||
@@ -251,7 +251,7 @@ func TestGitGenerateParamsFromDirectories(t *testing.T) {
|
||||
repoApps: []string{},
|
||||
repoError: fmt.Errorf("error"),
|
||||
expected: []map[string]interface{}{},
|
||||
expectedError: fmt.Errorf("error"),
|
||||
expectedError: fmt.Errorf("error generating params from git: error getting directories from repo: error"),
|
||||
},
|
||||
}
|
||||
|
||||
@@ -547,7 +547,7 @@ func TestGitGenerateParamsFromDirectoriesGoTemplate(t *testing.T) {
|
||||
repoApps: []string{},
|
||||
repoError: fmt.Errorf("error"),
|
||||
expected: []map[string]interface{}{},
|
||||
expectedError: fmt.Errorf("error"),
|
||||
expectedError: fmt.Errorf("error generating params from git: error getting directories from repo: error"),
|
||||
},
|
||||
}
|
||||
|
||||
@@ -742,7 +742,7 @@ func TestGitGenerateParamsFromFiles(t *testing.T) {
|
||||
repoFileContents: map[string][]byte{},
|
||||
repoPathsError: fmt.Errorf("paths error"),
|
||||
expected: []map[string]interface{}{},
|
||||
expectedError: fmt.Errorf("paths error"),
|
||||
expectedError: fmt.Errorf("error generating params from git: paths error"),
|
||||
},
|
||||
{
|
||||
name: "test invalid JSON file returns error",
|
||||
@@ -752,7 +752,7 @@ func TestGitGenerateParamsFromFiles(t *testing.T) {
|
||||
},
|
||||
repoPathsError: nil,
|
||||
expected: []map[string]interface{}{},
|
||||
expectedError: fmt.Errorf("unable to process file 'cluster-config/production/config.json': unable to parse file: error unmarshaling JSON: while decoding JSON: json: cannot unmarshal string into Go value of type map[string]interface {}"),
|
||||
expectedError: fmt.Errorf("error generating params from git: unable to process file 'cluster-config/production/config.json': unable to parse file: error unmarshaling JSON: while decoding JSON: json: cannot unmarshal string into Go value of type map[string]interface {}"),
|
||||
},
|
||||
{
|
||||
name: "test JSON array",
|
||||
@@ -1048,7 +1048,7 @@ func TestGitGenerateParamsFromFilesGoTemplate(t *testing.T) {
|
||||
repoFileContents: map[string][]byte{},
|
||||
repoPathsError: fmt.Errorf("paths error"),
|
||||
expected: []map[string]interface{}{},
|
||||
expectedError: fmt.Errorf("paths error"),
|
||||
expectedError: fmt.Errorf("error generating params from git: paths error"),
|
||||
},
|
||||
{
|
||||
name: "test invalid JSON file returns error",
|
||||
@@ -1058,7 +1058,7 @@ func TestGitGenerateParamsFromFilesGoTemplate(t *testing.T) {
|
||||
},
|
||||
repoPathsError: nil,
|
||||
expected: []map[string]interface{}{},
|
||||
expectedError: fmt.Errorf("unable to process file 'cluster-config/production/config.json': unable to parse file: error unmarshaling JSON: while decoding JSON: json: cannot unmarshal string into Go value of type map[string]interface {}"),
|
||||
expectedError: fmt.Errorf("error generating params from git: unable to process file 'cluster-config/production/config.json': unable to parse file: error unmarshaling JSON: while decoding JSON: json: cannot unmarshal string into Go value of type map[string]interface {}"),
|
||||
},
|
||||
{
|
||||
name: "test JSON array",
|
||||
|
||||
@@ -5,8 +5,9 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
"sigs.k8s.io/yaml"
|
||||
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
)
|
||||
|
||||
var _ Generator = (*ListGenerator)(nil)
|
||||
@@ -82,7 +83,7 @@ func (g *ListGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha1.Appli
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error unmarshling decoded ElementsYaml %v", err)
|
||||
}
|
||||
res = append(res, yamlElements...)
|
||||
res = append(res, yamlElements...)
|
||||
}
|
||||
|
||||
return res, nil
|
||||
|
||||
@@ -50,7 +50,7 @@ func (m *MatrixGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha1.App
|
||||
|
||||
g0, err := m.getParams(appSetGenerator.Matrix.Generators[0], appSet, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("error failed to get params for first generator in matrix generator: %w", err)
|
||||
}
|
||||
for _, a := range g0 {
|
||||
g1, err := m.getParams(appSetGenerator.Matrix.Generators[1], appSet, a)
|
||||
@@ -61,11 +61,11 @@ func (m *MatrixGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha1.App
|
||||
|
||||
if appSet.Spec.GoTemplate {
|
||||
tmp := map[string]interface{}{}
|
||||
if err := mergo.Merge(&tmp, a); err != nil {
|
||||
return nil, fmt.Errorf("failed to merge params from the first generator in the matrix generator with temp map: %w", err)
|
||||
if err := mergo.Merge(&tmp, b, mergo.WithOverride); err != nil {
|
||||
return nil, fmt.Errorf("failed to merge params from the second generator in the matrix generator with temp map: %w", err)
|
||||
}
|
||||
if err := mergo.Merge(&tmp, b); err != nil {
|
||||
return nil, fmt.Errorf("failed to merge params from the first generator in the matrix generator with the second: %w", err)
|
||||
if err := mergo.Merge(&tmp, a, mergo.WithOverride); err != nil {
|
||||
return nil, fmt.Errorf("failed to merge params from the second generator in the matrix generator with the first: %w", err)
|
||||
}
|
||||
res = append(res, tmp)
|
||||
} else {
|
||||
@@ -94,7 +94,7 @@ func (m *MatrixGenerator) getParams(appSetBaseGenerator argoprojiov1alpha1.Appli
|
||||
}
|
||||
mergeGen, err := getMergeGenerator(appSetBaseGenerator)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("error retrieving merge generator: %w", err)
|
||||
}
|
||||
if mergeGen != nil && !appSet.Spec.ApplyNestedSelectors {
|
||||
foundSelector := dropDisabledNestedSelectors(mergeGen.Generators)
|
||||
@@ -146,13 +146,15 @@ func (m *MatrixGenerator) GetRequeueAfter(appSetGenerator *argoprojiov1alpha1.Ap
|
||||
matrixGen, _ := getMatrixGenerator(r)
|
||||
mergeGen, _ := getMergeGenerator(r)
|
||||
base := &argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
List: r.List,
|
||||
Clusters: r.Clusters,
|
||||
Git: r.Git,
|
||||
PullRequest: r.PullRequest,
|
||||
Plugin: r.Plugin,
|
||||
Matrix: matrixGen,
|
||||
Merge: mergeGen,
|
||||
List: r.List,
|
||||
Clusters: r.Clusters,
|
||||
Git: r.Git,
|
||||
PullRequest: r.PullRequest,
|
||||
Plugin: r.Plugin,
|
||||
SCMProvider: r.SCMProvider,
|
||||
ClusterDecisionResource: r.ClusterDecisionResource,
|
||||
Matrix: matrixGen,
|
||||
Merge: mergeGen,
|
||||
}
|
||||
generators := GetRelevantGenerators(base, m.supportedGenerators)
|
||||
|
||||
|
||||
@@ -271,6 +271,28 @@ func TestMatrixGenerateGoTemplate(t *testing.T) {
|
||||
{"a": "2", "b": "2"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "parameter override: first list elements take precedence",
|
||||
baseGenerators: []argoprojiov1alpha1.ApplicationSetNestedGenerator{
|
||||
{
|
||||
List: &argoprojiov1alpha1.ListGenerator{
|
||||
Elements: []apiextensionsv1.JSON{
|
||||
{Raw: []byte(`{"booleanFalse": false, "booleanTrue": true, "stringFalse": "false", "stringTrue": "true"}`)},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
List: &argoprojiov1alpha1.ListGenerator{
|
||||
Elements: []apiextensionsv1.JSON{
|
||||
{Raw: []byte(`{"booleanFalse": true, "booleanTrue": false, "stringFalse": "true", "stringTrue": "false"}`)},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []map[string]interface{}{
|
||||
{"booleanFalse": false, "booleanTrue": true, "stringFalse": "false", "stringTrue": "true"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "returns error if there is less than two base generators",
|
||||
baseGenerators: []argoprojiov1alpha1.ApplicationSetNestedGenerator{
|
||||
@@ -404,6 +426,10 @@ func TestMatrixGetRequeueAfter(t *testing.T) {
|
||||
|
||||
pullRequestGenerator := &argoprojiov1alpha1.PullRequestGenerator{}
|
||||
|
||||
scmGenerator := &argoprojiov1alpha1.SCMProviderGenerator{}
|
||||
|
||||
duckTypeGenerator := &argoprojiov1alpha1.DuckTypeGenerator{}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
baseGenerators []argoprojiov1alpha1.ApplicationSetNestedGenerator
|
||||
@@ -461,6 +487,30 @@ func TestMatrixGetRequeueAfter(t *testing.T) {
|
||||
},
|
||||
expected: time.Duration(30 * time.Minute),
|
||||
},
|
||||
{
|
||||
name: "returns the default time for duck type generator",
|
||||
baseGenerators: []argoprojiov1alpha1.ApplicationSetNestedGenerator{
|
||||
{
|
||||
Git: gitGenerator,
|
||||
},
|
||||
{
|
||||
ClusterDecisionResource: duckTypeGenerator,
|
||||
},
|
||||
},
|
||||
expected: time.Duration(3 * time.Minute),
|
||||
},
|
||||
{
|
||||
name: "returns the default time for scm generator",
|
||||
baseGenerators: []argoprojiov1alpha1.ApplicationSetNestedGenerator{
|
||||
{
|
||||
Git: gitGenerator,
|
||||
},
|
||||
{
|
||||
SCMProvider: scmGenerator,
|
||||
},
|
||||
},
|
||||
expected: time.Duration(30 * time.Minute),
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
@@ -471,18 +521,22 @@ func TestMatrixGetRequeueAfter(t *testing.T) {
|
||||
|
||||
for _, g := range testCaseCopy.baseGenerators {
|
||||
gitGeneratorSpec := argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
Git: g.Git,
|
||||
List: g.List,
|
||||
PullRequest: g.PullRequest,
|
||||
Git: g.Git,
|
||||
List: g.List,
|
||||
PullRequest: g.PullRequest,
|
||||
SCMProvider: g.SCMProvider,
|
||||
ClusterDecisionResource: g.ClusterDecisionResource,
|
||||
}
|
||||
mock.On("GetRequeueAfter", &gitGeneratorSpec).Return(testCaseCopy.gitGetRequeueAfter, nil)
|
||||
}
|
||||
|
||||
var matrixGenerator = NewMatrixGenerator(
|
||||
map[string]Generator{
|
||||
"Git": mock,
|
||||
"List": &ListGenerator{},
|
||||
"PullRequest": &PullRequestGenerator{},
|
||||
"Git": mock,
|
||||
"List": &ListGenerator{},
|
||||
"PullRequest": &PullRequestGenerator{},
|
||||
"SCMProvider": &SCMProviderGenerator{},
|
||||
"ClusterDecisionResource": &DuckTypeGenerator{},
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@@ -38,10 +38,10 @@ func NewMergeGenerator(supportedGenerators map[string]Generator) Generator {
|
||||
// in slices ordered according to the order of the given generators.
|
||||
func (m *MergeGenerator) getParamSetsForAllGenerators(generators []argoprojiov1alpha1.ApplicationSetNestedGenerator, appSet *argoprojiov1alpha1.ApplicationSet) ([][]map[string]interface{}, error) {
|
||||
var paramSets [][]map[string]interface{}
|
||||
for _, generator := range generators {
|
||||
for i, generator := range generators {
|
||||
generatorParamSets, err := m.getParams(generator, appSet)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("error getting params from generator %d of %d: %w", i+1, len(generators), err)
|
||||
}
|
||||
// concatenate param lists produced by each generator
|
||||
paramSets = append(paramSets, generatorParamSets)
|
||||
@@ -61,18 +61,18 @@ func (m *MergeGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha1.Appl
|
||||
|
||||
paramSetsFromGenerators, err := m.getParamSetsForAllGenerators(appSetGenerator.Merge.Generators, appSet)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("error getting param sets from generators: %w", err)
|
||||
}
|
||||
|
||||
baseParamSetsByMergeKey, err := getParamSetsByMergeKey(appSetGenerator.Merge.MergeKeys, paramSetsFromGenerators[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("error getting param sets by merge key: %w", err)
|
||||
}
|
||||
|
||||
for _, paramSets := range paramSetsFromGenerators[1:] {
|
||||
paramSetsByMergeKey, err := getParamSetsByMergeKey(appSetGenerator.Merge.MergeKeys, paramSets)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("error getting param sets by merge key: %w", err)
|
||||
}
|
||||
|
||||
for mergeKeyValue, baseParamSet := range baseParamSetsByMergeKey {
|
||||
@@ -80,13 +80,13 @@ func (m *MergeGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha1.Appl
|
||||
|
||||
if appSet.Spec.GoTemplate {
|
||||
if err := mergo.Merge(&baseParamSet, overrideParamSet, mergo.WithOverride); err != nil {
|
||||
return nil, fmt.Errorf("failed to merge base param set with override param set: %w", err)
|
||||
return nil, fmt.Errorf("error merging base param set with override param set: %w", err)
|
||||
}
|
||||
baseParamSetsByMergeKey[mergeKeyValue] = baseParamSet
|
||||
} else {
|
||||
overriddenParamSet, err := utils.CombineStringMapsAllowDuplicates(baseParamSet, overrideParamSet)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("error combining string maps: %w", err)
|
||||
}
|
||||
baseParamSetsByMergeKey[mergeKeyValue] = utils.ConvertToMapStringInterface(overriddenParamSet)
|
||||
}
|
||||
@@ -125,7 +125,7 @@ func getParamSetsByMergeKey(mergeKeys []string, paramSets []map[string]interface
|
||||
}
|
||||
paramSetKeyJson, err := json.Marshal(paramSetKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("error marshalling param set key json: %w", err)
|
||||
}
|
||||
paramSetKeyString := string(paramSetKeyJson)
|
||||
if _, exists := paramSetsByMergeKey[paramSetKeyString]; exists {
|
||||
@@ -201,13 +201,15 @@ func (m *MergeGenerator) GetRequeueAfter(appSetGenerator *argoprojiov1alpha1.App
|
||||
matrixGen, _ := getMatrixGenerator(r)
|
||||
mergeGen, _ := getMergeGenerator(r)
|
||||
base := &argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
List: r.List,
|
||||
Clusters: r.Clusters,
|
||||
Git: r.Git,
|
||||
PullRequest: r.PullRequest,
|
||||
Plugin: r.Plugin,
|
||||
Matrix: matrixGen,
|
||||
Merge: mergeGen,
|
||||
List: r.List,
|
||||
Clusters: r.Clusters,
|
||||
Git: r.Git,
|
||||
PullRequest: r.PullRequest,
|
||||
Plugin: r.Plugin,
|
||||
SCMProvider: r.SCMProvider,
|
||||
ClusterDecisionResource: r.ClusterDecisionResource,
|
||||
Matrix: matrixGen,
|
||||
Merge: mergeGen,
|
||||
}
|
||||
generators := GetRelevantGenerators(base, m.supportedGenerators)
|
||||
|
||||
@@ -234,7 +236,7 @@ func getMergeGenerator(r argoprojiov1alpha1.ApplicationSetNestedGenerator) (*arg
|
||||
}
|
||||
merge, err := argoprojiov1alpha1.ToNestedMergeGenerator(r.Merge)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("error converting to nested merge generator: %w", err)
|
||||
}
|
||||
return merge.ToMergeGenerator(), nil
|
||||
}
|
||||
|
||||
@@ -71,7 +71,7 @@ func (g *PluginGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha1.App
|
||||
|
||||
pluginClient, err := g.getPluginFromGenerator(ctx, applicationSetInfo.Name, providerConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("error getting plugin from generator: %w", err)
|
||||
}
|
||||
|
||||
list, err := pluginClient.List(ctx, providerConfig.Input.Parameters)
|
||||
@@ -81,7 +81,7 @@ func (g *PluginGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha1.App
|
||||
|
||||
res, err := g.generateParams(appSetGenerator, applicationSetInfo, list.Output.Parameters, appSetGenerator.Plugin.Input.Parameters, applicationSetInfo.Spec.GoTemplate)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("error generating params: %w", err)
|
||||
}
|
||||
|
||||
return res, nil
|
||||
@@ -108,7 +108,7 @@ func (g *PluginGenerator) getPluginFromGenerator(ctx context.Context, appSetName
|
||||
|
||||
pluginClient, err := plugin.NewPluginService(ctx, appSetName, cm["baseUrl"], token, requestTimeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("error initializing plugin client: %w", err)
|
||||
}
|
||||
return pluginClient, nil
|
||||
}
|
||||
|
||||
@@ -475,7 +475,7 @@ func TestPluginGenerateParams(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedError: fmt.Errorf("error fetching Secret token: error fetching secret default/argocd-secret: secrets \"argocd-secret\" not found"),
|
||||
expectedError: fmt.Errorf("error getting plugin from generator: error fetching Secret token: error fetching secret default/argocd-secret: secrets \"argocd-secret\" not found"),
|
||||
},
|
||||
{
|
||||
name: "no configmap",
|
||||
@@ -522,7 +522,7 @@ func TestPluginGenerateParams(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedError: fmt.Errorf("error fetching ConfigMap: configmaps \"\" not found"),
|
||||
expectedError: fmt.Errorf("error getting plugin from generator: error fetching ConfigMap: configmaps \"\" not found"),
|
||||
},
|
||||
{
|
||||
name: "no baseUrl",
|
||||
@@ -577,7 +577,7 @@ func TestPluginGenerateParams(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedError: fmt.Errorf("error fetching ConfigMap: baseUrl not found in ConfigMap"),
|
||||
expectedError: fmt.Errorf("error getting plugin from generator: error fetching ConfigMap: baseUrl not found in ConfigMap"),
|
||||
},
|
||||
{
|
||||
name: "no token",
|
||||
@@ -624,7 +624,7 @@ func TestPluginGenerateParams(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedError: fmt.Errorf("error fetching ConfigMap: token not found in ConfigMap"),
|
||||
expectedError: fmt.Errorf("error getting plugin from generator: error fetching ConfigMap: token not found in ConfigMap"),
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -25,12 +25,16 @@ type PullRequestGenerator struct {
|
||||
client client.Client
|
||||
selectServiceProviderFunc func(context.Context, *argoprojiov1alpha1.PullRequestGenerator, *argoprojiov1alpha1.ApplicationSet) (pullrequest.PullRequestService, error)
|
||||
auth SCMAuthProviders
|
||||
scmRootCAPath string
|
||||
allowedSCMProviders []string
|
||||
}
|
||||
|
||||
func NewPullRequestGenerator(client client.Client, auth SCMAuthProviders) Generator {
|
||||
func NewPullRequestGenerator(client client.Client, auth SCMAuthProviders, scmRootCAPath string, allowedScmProviders []string) Generator {
|
||||
g := &PullRequestGenerator{
|
||||
client: client,
|
||||
auth: auth,
|
||||
client: client,
|
||||
auth: auth,
|
||||
scmRootCAPath: scmRootCAPath,
|
||||
allowedSCMProviders: allowedScmProviders,
|
||||
}
|
||||
g.selectServiceProviderFunc = g.selectServiceProvider
|
||||
return g
|
||||
@@ -118,18 +122,27 @@ func (g *PullRequestGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha
|
||||
// selectServiceProvider selects the provider to get pull requests from the configuration
|
||||
func (g *PullRequestGenerator) selectServiceProvider(ctx context.Context, generatorConfig *argoprojiov1alpha1.PullRequestGenerator, applicationSetInfo *argoprojiov1alpha1.ApplicationSet) (pullrequest.PullRequestService, error) {
|
||||
if generatorConfig.Github != nil {
|
||||
if !ScmProviderAllowed(applicationSetInfo, generatorConfig.Github.API, g.allowedSCMProviders) {
|
||||
return nil, fmt.Errorf("scm provider not allowed: %s", generatorConfig.Github.API)
|
||||
}
|
||||
return g.github(ctx, generatorConfig.Github, applicationSetInfo)
|
||||
}
|
||||
if generatorConfig.GitLab != nil {
|
||||
providerConfig := generatorConfig.GitLab
|
||||
if !ScmProviderAllowed(applicationSetInfo, providerConfig.API, g.allowedSCMProviders) {
|
||||
return nil, fmt.Errorf("scm provider not allowed: %s", providerConfig.API)
|
||||
}
|
||||
token, err := g.getSecretRef(ctx, providerConfig.TokenRef, applicationSetInfo.Namespace)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error fetching Secret token: %v", err)
|
||||
}
|
||||
return pullrequest.NewGitLabService(ctx, token, providerConfig.API, providerConfig.Project, providerConfig.Labels, providerConfig.PullRequestState)
|
||||
return pullrequest.NewGitLabService(ctx, token, providerConfig.API, providerConfig.Project, providerConfig.Labels, providerConfig.PullRequestState, g.scmRootCAPath, providerConfig.Insecure)
|
||||
}
|
||||
if generatorConfig.Gitea != nil {
|
||||
providerConfig := generatorConfig.Gitea
|
||||
if !ScmProviderAllowed(applicationSetInfo, providerConfig.API, g.allowedSCMProviders) {
|
||||
return nil, fmt.Errorf("scm provider not allowed: %s", generatorConfig.Gitea.API)
|
||||
}
|
||||
token, err := g.getSecretRef(ctx, providerConfig.TokenRef, applicationSetInfo.Namespace)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error fetching Secret token: %v", err)
|
||||
@@ -138,6 +151,9 @@ func (g *PullRequestGenerator) selectServiceProvider(ctx context.Context, genera
|
||||
}
|
||||
if generatorConfig.BitbucketServer != nil {
|
||||
providerConfig := generatorConfig.BitbucketServer
|
||||
if !ScmProviderAllowed(applicationSetInfo, providerConfig.API, g.allowedSCMProviders) {
|
||||
return nil, fmt.Errorf("scm provider not allowed: %s", providerConfig.API)
|
||||
}
|
||||
if providerConfig.BasicAuth != nil {
|
||||
password, err := g.getSecretRef(ctx, providerConfig.BasicAuth.PasswordRef, applicationSetInfo.Namespace)
|
||||
if err != nil {
|
||||
|
||||
@@ -27,7 +27,7 @@ func TestPullRequestGithubGenerateParams(t *testing.T) {
|
||||
return pullrequest.NewFakeService(
|
||||
ctx,
|
||||
[]*pullrequest.PullRequest{
|
||||
&pullrequest.PullRequest{
|
||||
{
|
||||
Number: 1,
|
||||
Branch: "branch1",
|
||||
TargetBranch: "master",
|
||||
@@ -56,7 +56,7 @@ func TestPullRequestGithubGenerateParams(t *testing.T) {
|
||||
return pullrequest.NewFakeService(
|
||||
ctx,
|
||||
[]*pullrequest.PullRequest{
|
||||
&pullrequest.PullRequest{
|
||||
{
|
||||
Number: 2,
|
||||
Branch: "feat/areally+long_pull_request_name_to_test_argo_slugification_and_branch_name_shortening_feature",
|
||||
TargetBranch: "feat/anotherreally+long_pull_request_name_to_test_argo_slugification_and_branch_name_shortening_feature",
|
||||
@@ -85,7 +85,7 @@ func TestPullRequestGithubGenerateParams(t *testing.T) {
|
||||
return pullrequest.NewFakeService(
|
||||
ctx,
|
||||
[]*pullrequest.PullRequest{
|
||||
&pullrequest.PullRequest{
|
||||
{
|
||||
Number: 1,
|
||||
Branch: "a-very-short-sha",
|
||||
TargetBranch: "master",
|
||||
@@ -125,7 +125,7 @@ func TestPullRequestGithubGenerateParams(t *testing.T) {
|
||||
return pullrequest.NewFakeService(
|
||||
ctx,
|
||||
[]*pullrequest.PullRequest{
|
||||
&pullrequest.PullRequest{
|
||||
{
|
||||
Number: 1,
|
||||
Branch: "branch1",
|
||||
TargetBranch: "master",
|
||||
@@ -162,7 +162,7 @@ func TestPullRequestGithubGenerateParams(t *testing.T) {
|
||||
return pullrequest.NewFakeService(
|
||||
ctx,
|
||||
[]*pullrequest.PullRequest{
|
||||
&pullrequest.PullRequest{
|
||||
{
|
||||
Number: 1,
|
||||
Branch: "branch1",
|
||||
TargetBranch: "master",
|
||||
@@ -273,3 +273,80 @@ func TestPullRequestGetSecretRef(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAllowedSCMProviderPullRequest(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
providerConfig *argoprojiov1alpha1.PullRequestGenerator
|
||||
expectedError string
|
||||
}{
|
||||
{
|
||||
name: "Error Github",
|
||||
providerConfig: &argoprojiov1alpha1.PullRequestGenerator{
|
||||
Github: &argoprojiov1alpha1.PullRequestGeneratorGithub{
|
||||
API: "https://myservice.mynamespace.svc.cluster.local",
|
||||
},
|
||||
},
|
||||
expectedError: "failed to select pull request service provider: scm provider not allowed: https://myservice.mynamespace.svc.cluster.local",
|
||||
},
|
||||
{
|
||||
name: "Error Gitlab",
|
||||
providerConfig: &argoprojiov1alpha1.PullRequestGenerator{
|
||||
GitLab: &argoprojiov1alpha1.PullRequestGeneratorGitLab{
|
||||
API: "https://myservice.mynamespace.svc.cluster.local",
|
||||
},
|
||||
},
|
||||
expectedError: "failed to select pull request service provider: scm provider not allowed: https://myservice.mynamespace.svc.cluster.local",
|
||||
},
|
||||
{
|
||||
name: "Error Gitea",
|
||||
providerConfig: &argoprojiov1alpha1.PullRequestGenerator{
|
||||
Gitea: &argoprojiov1alpha1.PullRequestGeneratorGitea{
|
||||
API: "https://myservice.mynamespace.svc.cluster.local",
|
||||
},
|
||||
},
|
||||
expectedError: "failed to select pull request service provider: scm provider not allowed: https://myservice.mynamespace.svc.cluster.local",
|
||||
},
|
||||
{
|
||||
name: "Error Bitbucket",
|
||||
providerConfig: &argoprojiov1alpha1.PullRequestGenerator{
|
||||
BitbucketServer: &argoprojiov1alpha1.PullRequestGeneratorBitbucketServer{
|
||||
API: "https://myservice.mynamespace.svc.cluster.local",
|
||||
},
|
||||
},
|
||||
expectedError: "failed to select pull request service provider: scm provider not allowed: https://myservice.mynamespace.svc.cluster.local",
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range cases {
|
||||
testCaseCopy := testCase
|
||||
|
||||
t.Run(testCaseCopy.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
pullRequestGenerator := NewPullRequestGenerator(nil, SCMAuthProviders{}, "", []string{
|
||||
"github.myorg.com",
|
||||
"gitlab.myorg.com",
|
||||
"gitea.myorg.com",
|
||||
"bitbucket.myorg.com",
|
||||
"azuredevops.myorg.com",
|
||||
})
|
||||
|
||||
applicationSetInfo := argoprojiov1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set",
|
||||
},
|
||||
Spec: argoprojiov1alpha1.ApplicationSetSpec{
|
||||
Generators: []argoprojiov1alpha1.ApplicationSetGenerator{{
|
||||
PullRequest: testCaseCopy.providerConfig,
|
||||
}},
|
||||
},
|
||||
}
|
||||
|
||||
_, err := pullRequestGenerator.GenerateParams(&applicationSetInfo.Spec.Generators[0], &applicationSetInfo)
|
||||
|
||||
assert.Error(t, err, "Must return an error")
|
||||
assert.Equal(t, testCaseCopy.expectedError, err.Error())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,9 +9,12 @@ import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/services/github_app_auth"
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/services/scm_provider"
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/utils"
|
||||
"github.com/argoproj/argo-cd/v2/common"
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
)
|
||||
|
||||
@@ -26,16 +29,20 @@ type SCMProviderGenerator struct {
|
||||
// Testing hooks.
|
||||
overrideProvider scm_provider.SCMProviderService
|
||||
SCMAuthProviders
|
||||
scmRootCAPath string
|
||||
allowedSCMProviders []string
|
||||
}
|
||||
|
||||
type SCMAuthProviders struct {
|
||||
GitHubApps github_app_auth.Credentials
|
||||
}
|
||||
|
||||
func NewSCMProviderGenerator(client client.Client, providers SCMAuthProviders) Generator {
|
||||
func NewSCMProviderGenerator(client client.Client, providers SCMAuthProviders, scmRootCAPath string, allowedSCMProviders []string) Generator {
|
||||
return &SCMProviderGenerator{
|
||||
client: client,
|
||||
SCMAuthProviders: providers,
|
||||
client: client,
|
||||
SCMAuthProviders: providers,
|
||||
scmRootCAPath: scmRootCAPath,
|
||||
allowedSCMProviders: allowedSCMProviders,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -58,6 +65,26 @@ func (g *SCMProviderGenerator) GetTemplate(appSetGenerator *argoprojiov1alpha1.A
|
||||
return &appSetGenerator.SCMProvider.Template
|
||||
}
|
||||
|
||||
func ScmProviderAllowed(applicationSetInfo *argoprojiov1alpha1.ApplicationSet, url string, allowedScmProviders []string) bool {
|
||||
if url == "" || len(allowedScmProviders) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
for _, allowedScmProvider := range allowedScmProviders {
|
||||
if url == allowedScmProvider {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
log.WithFields(log.Fields{
|
||||
common.SecurityField: common.SecurityMedium,
|
||||
"applicationset": applicationSetInfo.Name,
|
||||
"appSetNamespace": applicationSetInfo.Namespace,
|
||||
}).Debugf("attempted to use disallowed SCM %q", url)
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (g *SCMProviderGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator, applicationSetInfo *argoprojiov1alpha1.ApplicationSet) ([]map[string]interface{}, error) {
|
||||
if appSetGenerator == nil {
|
||||
return nil, EmptyAppSetGeneratorError
|
||||
@@ -75,21 +102,30 @@ func (g *SCMProviderGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha
|
||||
if g.overrideProvider != nil {
|
||||
provider = g.overrideProvider
|
||||
} else if providerConfig.Github != nil {
|
||||
if !ScmProviderAllowed(applicationSetInfo, providerConfig.Github.API, g.allowedSCMProviders) {
|
||||
return nil, fmt.Errorf("scm provider not allowed: %s", providerConfig.Github.API)
|
||||
}
|
||||
var err error
|
||||
provider, err = g.githubProvider(ctx, providerConfig.Github, applicationSetInfo)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("scm provider: %w", err)
|
||||
}
|
||||
} else if providerConfig.Gitlab != nil {
|
||||
if !ScmProviderAllowed(applicationSetInfo, providerConfig.Gitlab.API, g.allowedSCMProviders) {
|
||||
return nil, fmt.Errorf("scm provider not allowed: %s", providerConfig.Gitlab.API)
|
||||
}
|
||||
token, err := g.getSecretRef(ctx, providerConfig.Gitlab.TokenRef, applicationSetInfo.Namespace)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error fetching Gitlab token: %v", err)
|
||||
}
|
||||
provider, err = scm_provider.NewGitlabProvider(ctx, providerConfig.Gitlab.Group, token, providerConfig.Gitlab.API, providerConfig.Gitlab.AllBranches, providerConfig.Gitlab.IncludeSubgroups)
|
||||
provider, err = scm_provider.NewGitlabProvider(ctx, providerConfig.Gitlab.Group, token, providerConfig.Gitlab.API, providerConfig.Gitlab.AllBranches, providerConfig.Gitlab.IncludeSubgroups, providerConfig.Gitlab.WillIncludeSharedProjects(), providerConfig.Gitlab.Insecure, g.scmRootCAPath, providerConfig.Gitlab.Topic)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error initializing Gitlab service: %v", err)
|
||||
}
|
||||
} else if providerConfig.Gitea != nil {
|
||||
if !ScmProviderAllowed(applicationSetInfo, providerConfig.Gitea.API, g.allowedSCMProviders) {
|
||||
return nil, fmt.Errorf("scm provider not allowed: %s", providerConfig.Gitea.API)
|
||||
}
|
||||
token, err := g.getSecretRef(ctx, providerConfig.Gitea.TokenRef, applicationSetInfo.Namespace)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error fetching Gitea token: %v", err)
|
||||
@@ -100,6 +136,9 @@ func (g *SCMProviderGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha
|
||||
}
|
||||
} else if providerConfig.BitbucketServer != nil {
|
||||
providerConfig := providerConfig.BitbucketServer
|
||||
if !ScmProviderAllowed(applicationSetInfo, providerConfig.API, g.allowedSCMProviders) {
|
||||
return nil, fmt.Errorf("scm provider not allowed: %s", providerConfig.API)
|
||||
}
|
||||
var scmError error
|
||||
if providerConfig.BasicAuth != nil {
|
||||
password, err := g.getSecretRef(ctx, providerConfig.BasicAuth.PasswordRef, applicationSetInfo.Namespace)
|
||||
@@ -114,6 +153,9 @@ func (g *SCMProviderGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha
|
||||
return nil, fmt.Errorf("error initializing Bitbucket Server service: %v", scmError)
|
||||
}
|
||||
} else if providerConfig.AzureDevOps != nil {
|
||||
if !ScmProviderAllowed(applicationSetInfo, providerConfig.AzureDevOps.API, g.allowedSCMProviders) {
|
||||
return nil, fmt.Errorf("scm provider not allowed: %s", providerConfig.AzureDevOps.API)
|
||||
}
|
||||
token, err := g.getSecretRef(ctx, providerConfig.AzureDevOps.AccessTokenRef, applicationSetInfo.Namespace)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error fetching Azure Devops access token: %v", err)
|
||||
|
||||
@@ -108,26 +108,26 @@ func TestSCMProviderGenerateParams(t *testing.T) {
|
||||
},
|
||||
expected: []map[string]interface{}{
|
||||
{
|
||||
"organization": "myorg",
|
||||
"repository": "repo1",
|
||||
"url": "git@github.com:myorg/repo1.git",
|
||||
"branch": "main",
|
||||
"organization": "myorg",
|
||||
"repository": "repo1",
|
||||
"url": "git@github.com:myorg/repo1.git",
|
||||
"branch": "main",
|
||||
"branchNormalized": "main",
|
||||
"sha": "0bc57212c3cbbec69d20b34c507284bd300def5b",
|
||||
"short_sha": "0bc57212",
|
||||
"short_sha_7": "0bc5721",
|
||||
"labels": "prod,staging",
|
||||
"sha": "0bc57212c3cbbec69d20b34c507284bd300def5b",
|
||||
"short_sha": "0bc57212",
|
||||
"short_sha_7": "0bc5721",
|
||||
"labels": "prod,staging",
|
||||
},
|
||||
{
|
||||
"organization": "myorg",
|
||||
"repository": "repo2",
|
||||
"url": "git@github.com:myorg/repo2.git",
|
||||
"branch": "main",
|
||||
"organization": "myorg",
|
||||
"repository": "repo2",
|
||||
"url": "git@github.com:myorg/repo2.git",
|
||||
"branch": "main",
|
||||
"branchNormalized": "main",
|
||||
"sha": "59d0",
|
||||
"short_sha": "59d0",
|
||||
"short_sha_7": "59d0",
|
||||
"labels": "",
|
||||
"sha": "59d0",
|
||||
"short_sha": "59d0",
|
||||
"short_sha_7": "59d0",
|
||||
"labels": "",
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -200,3 +200,89 @@ func TestSCMProviderGenerateParams(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAllowedSCMProvider(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
providerConfig *argoprojiov1alpha1.SCMProviderGenerator
|
||||
expectedError string
|
||||
}{
|
||||
{
|
||||
name: "Error Github",
|
||||
providerConfig: &argoprojiov1alpha1.SCMProviderGenerator{
|
||||
Github: &argoprojiov1alpha1.SCMProviderGeneratorGithub{
|
||||
API: "https://myservice.mynamespace.svc.cluster.local",
|
||||
},
|
||||
},
|
||||
expectedError: "scm provider not allowed: https://myservice.mynamespace.svc.cluster.local",
|
||||
},
|
||||
{
|
||||
name: "Error Gitlab",
|
||||
providerConfig: &argoprojiov1alpha1.SCMProviderGenerator{
|
||||
Gitlab: &argoprojiov1alpha1.SCMProviderGeneratorGitlab{
|
||||
API: "https://myservice.mynamespace.svc.cluster.local",
|
||||
},
|
||||
},
|
||||
expectedError: "scm provider not allowed: https://myservice.mynamespace.svc.cluster.local",
|
||||
},
|
||||
{
|
||||
name: "Error Gitea",
|
||||
providerConfig: &argoprojiov1alpha1.SCMProviderGenerator{
|
||||
Gitea: &argoprojiov1alpha1.SCMProviderGeneratorGitea{
|
||||
API: "https://myservice.mynamespace.svc.cluster.local",
|
||||
},
|
||||
},
|
||||
expectedError: "scm provider not allowed: https://myservice.mynamespace.svc.cluster.local",
|
||||
},
|
||||
{
|
||||
name: "Error Bitbucket",
|
||||
providerConfig: &argoprojiov1alpha1.SCMProviderGenerator{
|
||||
BitbucketServer: &argoprojiov1alpha1.SCMProviderGeneratorBitbucketServer{
|
||||
API: "https://myservice.mynamespace.svc.cluster.local",
|
||||
},
|
||||
},
|
||||
expectedError: "scm provider not allowed: https://myservice.mynamespace.svc.cluster.local",
|
||||
},
|
||||
{
|
||||
name: "Error AzureDevops",
|
||||
providerConfig: &argoprojiov1alpha1.SCMProviderGenerator{
|
||||
AzureDevOps: &argoprojiov1alpha1.SCMProviderGeneratorAzureDevOps{
|
||||
API: "https://myservice.mynamespace.svc.cluster.local",
|
||||
},
|
||||
},
|
||||
expectedError: "scm provider not allowed: https://myservice.mynamespace.svc.cluster.local",
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range cases {
|
||||
testCaseCopy := testCase
|
||||
|
||||
t.Run(testCaseCopy.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
scmGenerator := &SCMProviderGenerator{allowedSCMProviders: []string{
|
||||
"github.myorg.com",
|
||||
"gitlab.myorg.com",
|
||||
"gitea.myorg.com",
|
||||
"bitbucket.myorg.com",
|
||||
"azuredevops.myorg.com",
|
||||
}}
|
||||
|
||||
applicationSetInfo := argoprojiov1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "set",
|
||||
},
|
||||
Spec: argoprojiov1alpha1.ApplicationSetSpec{
|
||||
Generators: []argoprojiov1alpha1.ApplicationSetGenerator{{
|
||||
SCMProvider: testCaseCopy.providerConfig,
|
||||
}},
|
||||
},
|
||||
}
|
||||
|
||||
_, err := scmGenerator.GenerateParams(&applicationSetInfo.Spec.Generators[0], &applicationSetInfo)
|
||||
|
||||
assert.Error(t, err, "Must return an error")
|
||||
assert.Equal(t, testCaseCopy.expectedError, err.Error())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -26,11 +26,13 @@ func NewGiteaService(ctx context.Context, token, url, owner, repo string, insecu
|
||||
if insecure {
|
||||
cookieJar, _ := cookiejar.New(nil)
|
||||
|
||||
tr := http.DefaultTransport.(*http.Transport).Clone()
|
||||
tr.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
|
||||
|
||||
httpClient = &http.Client{
|
||||
Jar: cookieJar,
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
|
||||
}}
|
||||
Jar: cookieJar,
|
||||
Transport: tr,
|
||||
}
|
||||
}
|
||||
client, err := gitea.NewClient(url, gitea.SetToken(token), gitea.SetHTTPClient(httpClient))
|
||||
if err != nil {
|
||||
|
||||
@@ -269,9 +269,9 @@ func TestGetGiteaPRLabelNames(t *testing.T) {
|
||||
{
|
||||
Name: "PR has labels",
|
||||
PullLabels: []*gitea.Label{
|
||||
&gitea.Label{Name: "label1"},
|
||||
&gitea.Label{Name: "label2"},
|
||||
&gitea.Label{Name: "label3"},
|
||||
{Name: "label1"},
|
||||
{Name: "label2"},
|
||||
{Name: "label3"},
|
||||
},
|
||||
ExpectedResult: []string{"label1", "label2", "label3"},
|
||||
},
|
||||
|
||||
@@ -22,9 +22,9 @@ func TestContainLabels(t *testing.T) {
|
||||
Name: "Match labels",
|
||||
Labels: []string{"label1", "label2"},
|
||||
PullLabels: []*github.Label{
|
||||
&github.Label{Name: toPtr("label1")},
|
||||
&github.Label{Name: toPtr("label2")},
|
||||
&github.Label{Name: toPtr("label3")},
|
||||
{Name: toPtr("label1")},
|
||||
{Name: toPtr("label2")},
|
||||
{Name: toPtr("label3")},
|
||||
},
|
||||
Expect: true,
|
||||
},
|
||||
@@ -32,9 +32,9 @@ func TestContainLabels(t *testing.T) {
|
||||
Name: "Not match labels",
|
||||
Labels: []string{"label1", "label4"},
|
||||
PullLabels: []*github.Label{
|
||||
&github.Label{Name: toPtr("label1")},
|
||||
&github.Label{Name: toPtr("label2")},
|
||||
&github.Label{Name: toPtr("label3")},
|
||||
{Name: toPtr("label1")},
|
||||
{Name: toPtr("label2")},
|
||||
{Name: toPtr("label3")},
|
||||
},
|
||||
Expect: false,
|
||||
},
|
||||
@@ -42,9 +42,9 @@ func TestContainLabels(t *testing.T) {
|
||||
Name: "No specify",
|
||||
Labels: []string{},
|
||||
PullLabels: []*github.Label{
|
||||
&github.Label{Name: toPtr("label1")},
|
||||
&github.Label{Name: toPtr("label2")},
|
||||
&github.Label{Name: toPtr("label3")},
|
||||
{Name: toPtr("label1")},
|
||||
{Name: toPtr("label2")},
|
||||
{Name: toPtr("label3")},
|
||||
},
|
||||
Expect: true,
|
||||
},
|
||||
@@ -68,9 +68,9 @@ func TestGetGitHubPRLabelNames(t *testing.T) {
|
||||
{
|
||||
Name: "PR has labels",
|
||||
PullLabels: []*github.Label{
|
||||
&github.Label{Name: toPtr("label1")},
|
||||
&github.Label{Name: toPtr("label2")},
|
||||
&github.Label{Name: toPtr("label3")},
|
||||
{Name: toPtr("label1")},
|
||||
{Name: toPtr("label2")},
|
||||
{Name: toPtr("label3")},
|
||||
},
|
||||
ExpectedResult: []string{"label1", "label2", "label3"},
|
||||
},
|
||||
|
||||
@@ -3,8 +3,11 @@ package pull_request
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/utils"
|
||||
"github.com/hashicorp/go-retryablehttp"
|
||||
gitlab "github.com/xanzy/go-gitlab"
|
||||
)
|
||||
|
||||
@@ -17,7 +20,7 @@ type GitLabService struct {
|
||||
|
||||
var _ PullRequestService = (*GitLabService)(nil)
|
||||
|
||||
func NewGitLabService(ctx context.Context, token, url, project string, labels []string, pullRequestState string) (PullRequestService, error) {
|
||||
func NewGitLabService(ctx context.Context, token, url, project string, labels []string, pullRequestState string, scmRootCAPath string, insecure bool) (PullRequestService, error) {
|
||||
var clientOptionFns []gitlab.ClientOptionFunc
|
||||
|
||||
// Set a custom Gitlab base URL if one is provided
|
||||
@@ -29,6 +32,14 @@ func NewGitLabService(ctx context.Context, token, url, project string, labels []
|
||||
token = os.Getenv("GITLAB_TOKEN")
|
||||
}
|
||||
|
||||
tr := http.DefaultTransport.(*http.Transport).Clone()
|
||||
tr.TLSClientConfig = utils.GetTlsConfig(scmRootCAPath, insecure)
|
||||
|
||||
retryClient := retryablehttp.NewClient()
|
||||
retryClient.HTTPClient.Transport = tr
|
||||
|
||||
clientOptionFns = append(clientOptionFns, gitlab.WithHTTPClient(retryClient.HTTPClient))
|
||||
|
||||
client, err := gitlab.NewClient(token, clientOptionFns...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating Gitlab client: %v", err)
|
||||
|
||||
@@ -34,7 +34,7 @@ func TestGitLabServiceCustomBaseURL(t *testing.T) {
|
||||
writeMRListResponse(t, w)
|
||||
})
|
||||
|
||||
svc, err := NewGitLabService(context.Background(), "", server.URL, "278964", nil, "")
|
||||
svc, err := NewGitLabService(context.Background(), "", server.URL, "278964", nil, "", "", false)
|
||||
assert.NoError(t, err)
|
||||
|
||||
_, err = svc.List(context.Background())
|
||||
@@ -53,7 +53,7 @@ func TestGitLabServiceToken(t *testing.T) {
|
||||
writeMRListResponse(t, w)
|
||||
})
|
||||
|
||||
svc, err := NewGitLabService(context.Background(), "token-123", server.URL, "278964", nil, "")
|
||||
svc, err := NewGitLabService(context.Background(), "token-123", server.URL, "278964", nil, "", "", false)
|
||||
assert.NoError(t, err)
|
||||
|
||||
_, err = svc.List(context.Background())
|
||||
@@ -72,7 +72,7 @@ func TestList(t *testing.T) {
|
||||
writeMRListResponse(t, w)
|
||||
})
|
||||
|
||||
svc, err := NewGitLabService(context.Background(), "", server.URL, "278964", []string{}, "")
|
||||
svc, err := NewGitLabService(context.Background(), "", server.URL, "278964", []string{}, "", "", false)
|
||||
assert.NoError(t, err)
|
||||
|
||||
prs, err := svc.List(context.Background())
|
||||
@@ -96,7 +96,7 @@ func TestListWithLabels(t *testing.T) {
|
||||
writeMRListResponse(t, w)
|
||||
})
|
||||
|
||||
svc, err := NewGitLabService(context.Background(), "", server.URL, "278964", []string{"feature", "ready"}, "")
|
||||
svc, err := NewGitLabService(context.Background(), "", server.URL, "278964", []string{"feature", "ready"}, "", "", false)
|
||||
assert.NoError(t, err)
|
||||
|
||||
_, err = svc.List(context.Background())
|
||||
@@ -115,7 +115,7 @@ func TestListWithState(t *testing.T) {
|
||||
writeMRListResponse(t, w)
|
||||
})
|
||||
|
||||
svc, err := NewGitLabService(context.Background(), "", server.URL, "278964", []string{}, "opened")
|
||||
svc, err := NewGitLabService(context.Background(), "", server.URL, "278964", []string{}, "opened", "", false)
|
||||
assert.NoError(t, err)
|
||||
|
||||
_, err = svc.List(context.Background())
|
||||
|
||||
@@ -58,13 +58,13 @@ func (a *argoCDService) GetFiles(ctx context.Context, repoURL string, revision s
|
||||
}
|
||||
closer, client, err := a.repoServerClientSet.NewRepoServerClient()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("error initialising new repo server client: %w", err)
|
||||
}
|
||||
defer io.Close(closer)
|
||||
|
||||
fileResponse, err := client.GetGitFiles(ctx, fileRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("error retrieving Git files: %w", err)
|
||||
}
|
||||
return fileResponse.GetMap(), nil
|
||||
}
|
||||
@@ -83,13 +83,13 @@ func (a *argoCDService) GetDirectories(ctx context.Context, repoURL string, revi
|
||||
|
||||
closer, client, err := a.repoServerClientSet.NewRepoServerClient()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("error initialising new repo server client: %w", err)
|
||||
}
|
||||
defer io.Close(closer)
|
||||
|
||||
dirResponse, err := client.GetGitDirectories(ctx, dirRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("error retrieving Git Directories: %w", err)
|
||||
}
|
||||
return dirResponse.GetPaths(), nil
|
||||
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -62,7 +61,7 @@ func TestBitbucketHasRepo(t *testing.T) {
|
||||
}))
|
||||
defer func() { testServer.Close() }()
|
||||
|
||||
os.Setenv("BITBUCKET_API_BASE_URL", testServer.URL)
|
||||
t.Setenv("BITBUCKET_API_BASE_URL", testServer.URL)
|
||||
cases := []struct {
|
||||
name, path, repo, owner, sha string
|
||||
status int
|
||||
@@ -449,7 +448,7 @@ func TestBitbucketListRepos(t *testing.T) {
|
||||
}))
|
||||
defer func() { testServer.Close() }()
|
||||
|
||||
os.Setenv("BITBUCKET_API_BASE_URL", testServer.URL)
|
||||
t.Setenv("BITBUCKET_API_BASE_URL", testServer.URL)
|
||||
cases := []struct {
|
||||
name, proto, owner string
|
||||
hasError, allBranches bool
|
||||
|
||||
@@ -3,6 +3,7 @@ package scm_provider
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/utils"
|
||||
bitbucketv1 "github.com/gfleury/go-bitbucket-v1"
|
||||
@@ -183,8 +184,9 @@ func (b *BitbucketServerProvider) listBranches(repo *Repository) ([]bitbucketv1.
|
||||
|
||||
func (b *BitbucketServerProvider) getDefaultBranch(org string, repo string) (*bitbucketv1.Branch, error) {
|
||||
response, err := b.client.DefaultApi.GetDefaultBranch(org, repo)
|
||||
if response != nil && response.StatusCode == 404 {
|
||||
// There's no default branch i.e. empty repo, not an error
|
||||
// The API will return 404 if a default branch is set but doesn't exist. In case the repo is empty and default branch is unset,
|
||||
// we will get an EOF and a nil response.
|
||||
if (response != nil && response.StatusCode == 404) || (response == nil && err == io.EOF) {
|
||||
return nil, nil
|
||||
}
|
||||
if err != nil {
|
||||
|
||||
@@ -365,6 +365,28 @@ func TestGetBranchesMissingDefault(t *testing.T) {
|
||||
assert.Empty(t, repos)
|
||||
}
|
||||
|
||||
func TestGetBranchesEmptyRepo(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
assert.Empty(t, r.Header.Get("Authorization"))
|
||||
switch r.RequestURI {
|
||||
case "/rest/api/1.0/projects/PROJECT/repos/REPO/branches/default":
|
||||
return
|
||||
}
|
||||
}))
|
||||
defer ts.Close()
|
||||
provider, err := NewBitbucketServerProviderNoAuth(context.Background(), ts.URL, "PROJECT", false)
|
||||
assert.NoError(t, err)
|
||||
repos, err := provider.GetBranches(context.Background(), &Repository{
|
||||
Organization: "PROJECT",
|
||||
Repository: "REPO",
|
||||
URL: "ssh://git@mycompany.bitbucket.org/PROJECT/REPO.git",
|
||||
Labels: []string{},
|
||||
RepositoryId: 1,
|
||||
})
|
||||
assert.Empty(t, repos)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestGetBranchesErrorDefaultBranch(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
assert.Empty(t, r.Header.Get("Authorization"))
|
||||
|
||||
@@ -27,11 +27,13 @@ func NewGiteaProvider(ctx context.Context, owner, token, url string, allBranches
|
||||
if insecure {
|
||||
cookieJar, _ := cookiejar.New(nil)
|
||||
|
||||
tr := http.DefaultTransport.(*http.Transport).Clone()
|
||||
tr.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
|
||||
|
||||
httpClient = &http.Client{
|
||||
Jar: cookieJar,
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
|
||||
}}
|
||||
Jar: cookieJar,
|
||||
Transport: tr,
|
||||
}
|
||||
}
|
||||
client, err := gitea.NewClient(url, gitea.SetToken(token), gitea.SetHTTPClient(httpClient))
|
||||
if err != nil {
|
||||
|
||||
@@ -7,38 +7,50 @@ import (
|
||||
"os"
|
||||
pathpkg "path"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/utils"
|
||||
"github.com/hashicorp/go-retryablehttp"
|
||||
"github.com/xanzy/go-gitlab"
|
||||
)
|
||||
|
||||
type GitlabProvider struct {
|
||||
client *gitlab.Client
|
||||
organization string
|
||||
allBranches bool
|
||||
includeSubgroups bool
|
||||
client *gitlab.Client
|
||||
organization string
|
||||
allBranches bool
|
||||
includeSubgroups bool
|
||||
includeSharedProjects bool
|
||||
topic string
|
||||
}
|
||||
|
||||
var _ SCMProviderService = &GitlabProvider{}
|
||||
|
||||
func NewGitlabProvider(ctx context.Context, organization string, token string, url string, allBranches, includeSubgroups bool) (*GitlabProvider, error) {
|
||||
func NewGitlabProvider(ctx context.Context, organization string, token string, url string, allBranches, includeSubgroups, includeSharedProjects, insecure bool, scmRootCAPath, topic string) (*GitlabProvider, error) {
|
||||
// Undocumented environment variable to set a default token, to be used in testing to dodge anonymous rate limits.
|
||||
if token == "" {
|
||||
token = os.Getenv("GITLAB_TOKEN")
|
||||
}
|
||||
var client *gitlab.Client
|
||||
|
||||
tr := http.DefaultTransport.(*http.Transport).Clone()
|
||||
tr.TLSClientConfig = utils.GetTlsConfig(scmRootCAPath, insecure)
|
||||
|
||||
retryClient := retryablehttp.NewClient()
|
||||
retryClient.HTTPClient.Transport = tr
|
||||
|
||||
if url == "" {
|
||||
var err error
|
||||
client, err = gitlab.NewClient(token)
|
||||
client, err = gitlab.NewClient(token, gitlab.WithHTTPClient(retryClient.HTTPClient))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
var err error
|
||||
client, err = gitlab.NewClient(token, gitlab.WithBaseURL(url))
|
||||
client, err = gitlab.NewClient(token, gitlab.WithBaseURL(url), gitlab.WithHTTPClient(retryClient.HTTPClient))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return &GitlabProvider{client: client, organization: organization, allBranches: allBranches, includeSubgroups: includeSubgroups}, nil
|
||||
|
||||
return &GitlabProvider{client: client, organization: organization, allBranches: allBranches, includeSubgroups: includeSubgroups, includeSharedProjects: includeSharedProjects, topic: topic}, nil
|
||||
}
|
||||
|
||||
func (g *GitlabProvider) GetBranches(ctx context.Context, repo *Repository) ([]*Repository, error) {
|
||||
@@ -66,7 +78,10 @@ func (g *GitlabProvider) ListRepos(ctx context.Context, cloneProtocol string) ([
|
||||
opt := &gitlab.ListGroupProjectsOptions{
|
||||
ListOptions: gitlab.ListOptions{PerPage: 100},
|
||||
IncludeSubGroups: &g.includeSubgroups,
|
||||
WithShared: &g.includeSharedProjects,
|
||||
Topic: &g.topic,
|
||||
}
|
||||
|
||||
repos := []*Repository{}
|
||||
for {
|
||||
gitlabRepos, resp, err := g.client.Groups.ListGroupProjects(g.organization, opt)
|
||||
|
||||
@@ -19,7 +19,7 @@ func gitlabMockHandler(t *testing.T) func(http.ResponseWriter, *http.Request) {
|
||||
switch r.RequestURI {
|
||||
case "/api/v4":
|
||||
fmt.Println("here1")
|
||||
case "/api/v4/groups/test-argocd-proton/projects?include_subgroups=false&per_page=100":
|
||||
case "/api/v4/groups/test-argocd-proton/projects?include_subgroups=false&per_page=100", "/api/v4/groups/test-argocd-proton/projects?include_subgroups=false&per_page=100&topic=&with_shared=false":
|
||||
fmt.Println("here")
|
||||
_, err := io.WriteString(w, `[{
|
||||
"id": 27084533,
|
||||
@@ -30,8 +30,12 @@ func gitlabMockHandler(t *testing.T) func(http.ResponseWriter, *http.Request) {
|
||||
"path_with_namespace": "test-argocd-proton/argocd",
|
||||
"created_at": "2021-06-01T17:30:44.724Z",
|
||||
"default_branch": "master",
|
||||
"tag_list": [],
|
||||
"topics": [],
|
||||
"tag_list": [
|
||||
"test-topic"
|
||||
],
|
||||
"topics": [
|
||||
"test-topic"
|
||||
],
|
||||
"ssh_url_to_repo": "git@gitlab.com:test-argocd-proton/argocd.git",
|
||||
"http_url_to_repo": "https://gitlab.com/test-argocd-proton/argocd.git",
|
||||
"web_url": "https://gitlab.com/test-argocd-proton/argocd",
|
||||
@@ -143,6 +147,650 @@ func gitlabMockHandler(t *testing.T) func(http.ResponseWriter, *http.Request) {
|
||||
if err != nil {
|
||||
t.Fail()
|
||||
}
|
||||
case "/api/v4/groups/test-argocd-proton/projects?include_subgroups=true&per_page=100&topic=&with_shared=false":
|
||||
fmt.Println("here")
|
||||
_, err := io.WriteString(w, `[{
|
||||
"id": 27084533,
|
||||
"description": "",
|
||||
"name": "argocd",
|
||||
"name_with_namespace": "test argocd proton / argocd",
|
||||
"path": "argocd",
|
||||
"path_with_namespace": "test-argocd-proton/argocd",
|
||||
"created_at": "2021-06-01T17:30:44.724Z",
|
||||
"default_branch": "master",
|
||||
"tag_list": [
|
||||
"test-topic",
|
||||
"specific-topic"
|
||||
],
|
||||
"topics": [
|
||||
"test-topic",
|
||||
"specific-topic"
|
||||
],
|
||||
"ssh_url_to_repo": "git@gitlab.com:test-argocd-proton/argocd.git",
|
||||
"http_url_to_repo": "https://gitlab.com/test-argocd-proton/argocd.git",
|
||||
"web_url": "https://gitlab.com/test-argocd-proton/argocd",
|
||||
"readme_url": null,
|
||||
"avatar_url": null,
|
||||
"forks_count": 0,
|
||||
"star_count": 0,
|
||||
"last_activity_at": "2021-06-04T08:19:51.656Z",
|
||||
"namespace": {
|
||||
"id": 12258515,
|
||||
"name": "test argocd proton",
|
||||
"path": "test-argocd-proton",
|
||||
"kind": "gro* Connection #0 to host gitlab.com left intact up ",
|
||||
"full_path ": "test - argocd - proton ",
|
||||
"parent_id ": null,
|
||||
"avatar_url ": null,
|
||||
"web_url ": "https: //gitlab.com/groups/test-argocd-proton"
|
||||
},
|
||||
"container_registry_image_prefix": "registry.gitlab.com/test-argocd-proton/argocd",
|
||||
"_links": {
|
||||
"self": "https://gitlab.com/api/v4/projects/27084533",
|
||||
"issues": "https://gitlab.com/api/v4/projects/27084533/issues",
|
||||
"merge_requests": "https://gitlab.com/api/v4/projects/27084533/merge_requests",
|
||||
"repo_branches": "https://gitlab.com/api/v4/projects/27084533/repository/branches",
|
||||
"labels": "https://gitlab.com/api/v4/projects/27084533/labels",
|
||||
"events": "https://gitlab.com/api/v4/projects/27084533/events",
|
||||
"members": "https://gitlab.com/api/v4/projects/27084533/members",
|
||||
"cluster_agents": "https://gitlab.com/api/v4/projects/27084533/cluster_agents"
|
||||
},
|
||||
"packages_enabled": true,
|
||||
"empty_repo": false,
|
||||
"archived": false,
|
||||
"visibility": "public",
|
||||
"resolve_outdated_diff_discussions": false,
|
||||
"container_expiration_policy": {
|
||||
"cadence": "1d",
|
||||
"enabled": false,
|
||||
"keep_n": 10,
|
||||
"older_than": "90d",
|
||||
"name_regex": ".*",
|
||||
"name_regex_keep": null,
|
||||
"next_run_at": "2021-06-02T17:30:44.740Z"
|
||||
},
|
||||
"issues_enabled": true,
|
||||
"merge_requests_enabled": true,
|
||||
"wiki_enabled": true,
|
||||
"jobs_enabled": true,
|
||||
"snippets_enabled": true,
|
||||
"container_registry_enabled": true,
|
||||
"service_desk_enabled": true,
|
||||
"can_create_merge_request_in": false,
|
||||
"issues_access_level": "enabled",
|
||||
"repository_access_level": "enabled",
|
||||
"merge_requests_access_level": "enabled",
|
||||
"forking_access_level": "enabled",
|
||||
"wiki_access_level": "enabled",
|
||||
"builds_access_level": "enabled",
|
||||
"snippets_access_level": "enabled",
|
||||
"pages_access_level": "enabled",
|
||||
"operations_access_level": "enabled",
|
||||
"analytics_access_level": "enabled",
|
||||
"container_registry_access_level": "enabled",
|
||||
"security_and_compliance_access_level": "private",
|
||||
"emails_disabled": null,
|
||||
"shared_runners_enabled": true,
|
||||
"lfs_enabled": true,
|
||||
"creator_id": 2378866,
|
||||
"import_status": "none",
|
||||
"open_issues_count": 0,
|
||||
"ci_default_git_depth": 50,
|
||||
"ci_forward_deployment_enabled": true,
|
||||
"ci_job_token_scope_enabled": false,
|
||||
"public_jobs": true,
|
||||
"build_timeout": 3600,
|
||||
"auto_cancel_pending_pipelines": "enabled",
|
||||
"ci_config_path": "",
|
||||
"shared_with_groups": [],
|
||||
"only_allow_merge_if_pipeline_succeeds": false,
|
||||
"allow_merge_on_skipped_pipeline": null,
|
||||
"restrict_user_defined_variables": false,
|
||||
"request_access_enabled": true,
|
||||
"only_allow_merge_if_all_discussions_are_resolved": false,
|
||||
"remove_source_branch_after_merge": true,
|
||||
"printing_merge_request_link_enabled": true,
|
||||
"merge_method": "merge",
|
||||
"squash_option": "default_off",
|
||||
"suggestion_commit_message": null,
|
||||
"merge_commit_template": null,
|
||||
"squash_commit_template": null,
|
||||
"auto_devops_enabled": false,
|
||||
"auto_devops_deploy_strategy": "continuous",
|
||||
"autoclose_referenced_issues": true,
|
||||
"keep_latest_artifact": true,
|
||||
"runner_token_expiration_interval": null,
|
||||
"approvals_before_merge": 0,
|
||||
"mirror": false,
|
||||
"external_authorization_classification_label": "",
|
||||
"marked_for_deletion_at": null,
|
||||
"marked_for_deletion_on": null,
|
||||
"requirements_enabled": true,
|
||||
"requirements_access_level": "enabled",
|
||||
"security_and_compliance_enabled": false,
|
||||
"compliance_frameworks": [],
|
||||
"issues_template": null,
|
||||
"merge_requests_template": null,
|
||||
"merge_pipelines_enabled": false,
|
||||
"merge_trains_enabled": false
|
||||
},
|
||||
{
|
||||
"id": 27084538,
|
||||
"description": "This is a Project from a Subgroup",
|
||||
"name": "argocd-subgroup",
|
||||
"name_with_namespace": "test argocd proton / subgroup / argocd-subgroup",
|
||||
"path": "argocd-subgroup",
|
||||
"path_with_namespace": "test-argocd-proton/subgroup/argocd-subgroup",
|
||||
"created_at": "2021-06-01T17:30:44.724Z",
|
||||
"default_branch": "master",
|
||||
"tag_list": [
|
||||
"test-topic"
|
||||
],
|
||||
"topics": [
|
||||
"test-topic"
|
||||
],
|
||||
"ssh_url_to_repo": "git@gitlab.com:test-argocd-proton/subgroup/argocd-subgroup.git",
|
||||
"http_url_to_repo": "https://gitlab.com/test-argocd-proton/subgroup/argocd-subgroup.git",
|
||||
"web_url": "https://gitlab.com/test-argocd-proton/subgroup/argocd-subgroup",
|
||||
"readme_url": null,
|
||||
"avatar_url": null,
|
||||
"forks_count": 0,
|
||||
"star_count": 0,
|
||||
"last_activity_at": "2021-06-04T08:19:51.656Z",
|
||||
"namespace": {
|
||||
"id": 12258542,
|
||||
"name": "subgroup",
|
||||
"path": "subgroup",
|
||||
"kind": "group ",
|
||||
"full_path ": "test-argocd-proton/subgroup",
|
||||
"parent_id ": 12258515,
|
||||
"avatar_url ": null,
|
||||
"web_url ": "https: //gitlab.com/groups/test-argocd-proton/subgroup"
|
||||
},
|
||||
"container_registry_image_prefix": "registry.gitlab.com/test-argocd-proton/subgroup/argocd",
|
||||
"_links": {
|
||||
"self": "https://gitlab.com/api/v4/projects/27084538",
|
||||
"issues": "https://gitlab.com/api/v4/projects/27084538/issues",
|
||||
"merge_requests": "https://gitlab.com/api/v4/projects/27084538/merge_requests",
|
||||
"repo_branches": "https://gitlab.com/api/v4/projects/27084538/repository/branches",
|
||||
"labels": "https://gitlab.com/api/v4/projects/27084538/labels",
|
||||
"events": "https://gitlab.com/api/v4/projects/27084538/events",
|
||||
"members": "https://gitlab.com/api/v4/projects/27084538/members",
|
||||
"cluster_agents": "https://gitlab.com/api/v4/projects/27084538/cluster_agents"
|
||||
},
|
||||
"packages_enabled": true,
|
||||
"empty_repo": false,
|
||||
"archived": false,
|
||||
"visibility": "public",
|
||||
"resolve_outdated_diff_discussions": false,
|
||||
"container_expiration_policy": {
|
||||
"cadence": "1d",
|
||||
"enabled": false,
|
||||
"keep_n": 10,
|
||||
"older_than": "90d",
|
||||
"name_regex": ".*",
|
||||
"name_regex_keep": null,
|
||||
"next_run_at": "2021-06-02T17:30:44.740Z"
|
||||
},
|
||||
"issues_enabled": true,
|
||||
"merge_requests_enabled": true,
|
||||
"wiki_enabled": true,
|
||||
"jobs_enabled": true,
|
||||
"snippets_enabled": true,
|
||||
"container_registry_enabled": true,
|
||||
"service_desk_enabled": true,
|
||||
"can_create_merge_request_in": false,
|
||||
"issues_access_level": "enabled",
|
||||
"repository_access_level": "enabled",
|
||||
"merge_requests_access_level": "enabled",
|
||||
"forking_access_level": "enabled",
|
||||
"wiki_access_level": "enabled",
|
||||
"builds_access_level": "enabled",
|
||||
"snippets_access_level": "enabled",
|
||||
"pages_access_level": "enabled",
|
||||
"operations_access_level": "enabled",
|
||||
"analytics_access_level": "enabled",
|
||||
"container_registry_access_level": "enabled",
|
||||
"security_and_compliance_access_level": "private",
|
||||
"emails_disabled": null,
|
||||
"shared_runners_enabled": true,
|
||||
"lfs_enabled": true,
|
||||
"creator_id": 2378866,
|
||||
"import_status": "none",
|
||||
"open_issues_count": 0,
|
||||
"ci_default_git_depth": 50,
|
||||
"ci_forward_deployment_enabled": true,
|
||||
"ci_job_token_scope_enabled": false,
|
||||
"public_jobs": true,
|
||||
"build_timeout": 3600,
|
||||
"auto_cancel_pending_pipelines": "enabled",
|
||||
"ci_config_path": "",
|
||||
"shared_with_groups": [],
|
||||
"only_allow_merge_if_pipeline_succeeds": false,
|
||||
"allow_merge_on_skipped_pipeline": null,
|
||||
"restrict_user_defined_variables": false,
|
||||
"request_access_enabled": true,
|
||||
"only_allow_merge_if_all_discussions_are_resolved": false,
|
||||
"remove_source_branch_after_merge": true,
|
||||
"printing_merge_request_link_enabled": true,
|
||||
"merge_method": "merge",
|
||||
"squash_option": "default_off",
|
||||
"suggestion_commit_message": null,
|
||||
"merge_commit_template": null,
|
||||
"squash_commit_template": null,
|
||||
"auto_devops_enabled": false,
|
||||
"auto_devops_deploy_strategy": "continuous",
|
||||
"autoclose_referenced_issues": true,
|
||||
"keep_latest_artifact": true,
|
||||
"runner_token_expiration_interval": null,
|
||||
"approvals_before_merge": 0,
|
||||
"mirror": false,
|
||||
"external_authorization_classification_label": "",
|
||||
"marked_for_deletion_at": null,
|
||||
"marked_for_deletion_on": null,
|
||||
"requirements_enabled": true,
|
||||
"requirements_access_level": "enabled",
|
||||
"security_and_compliance_enabled": false,
|
||||
"compliance_frameworks": [],
|
||||
"issues_template": null,
|
||||
"merge_requests_template": null,
|
||||
"merge_pipelines_enabled": false,
|
||||
"merge_trains_enabled": false
|
||||
}
|
||||
]`)
|
||||
if err != nil {
|
||||
t.Fail()
|
||||
}
|
||||
case "/api/v4/groups/test-argocd-proton/projects?include_subgroups=false&per_page=100&topic=specific-topic&with_shared=false":
|
||||
fmt.Println("here")
|
||||
_, err := io.WriteString(w, `[{
|
||||
"id": 27084533,
|
||||
"description": "",
|
||||
"name": "argocd",
|
||||
"name_with_namespace": "test argocd proton / argocd",
|
||||
"path": "argocd",
|
||||
"path_with_namespace": "test-argocd-proton/argocd",
|
||||
"created_at": "2021-06-01T17:30:44.724Z",
|
||||
"default_branch": "master",
|
||||
"tag_list": [
|
||||
"test-topic",
|
||||
"specific-topic"
|
||||
],
|
||||
"topics": [
|
||||
"test-topic",
|
||||
"specific-topic"
|
||||
],
|
||||
"ssh_url_to_repo": "git@gitlab.com:test-argocd-proton/argocd.git",
|
||||
"http_url_to_repo": "https://gitlab.com/test-argocd-proton/argocd.git",
|
||||
"web_url": "https://gitlab.com/test-argocd-proton/argocd",
|
||||
"readme_url": null,
|
||||
"avatar_url": null,
|
||||
"forks_count": 0,
|
||||
"star_count": 0,
|
||||
"last_activity_at": "2021-06-04T08:19:51.656Z",
|
||||
"namespace": {
|
||||
"id": 12258515,
|
||||
"name": "test argocd proton",
|
||||
"path": "test-argocd-proton",
|
||||
"kind": "gro* Connection #0 to host gitlab.com left intact up ",
|
||||
"full_path ": "test - argocd - proton ",
|
||||
"parent_id ": null,
|
||||
"avatar_url ": null,
|
||||
"web_url ": "https: //gitlab.com/groups/test-argocd-proton"
|
||||
},
|
||||
"container_registry_image_prefix": "registry.gitlab.com/test-argocd-proton/argocd",
|
||||
"_links": {
|
||||
"self": "https://gitlab.com/api/v4/projects/27084533",
|
||||
"issues": "https://gitlab.com/api/v4/projects/27084533/issues",
|
||||
"merge_requests": "https://gitlab.com/api/v4/projects/27084533/merge_requests",
|
||||
"repo_branches": "https://gitlab.com/api/v4/projects/27084533/repository/branches",
|
||||
"labels": "https://gitlab.com/api/v4/projects/27084533/labels",
|
||||
"events": "https://gitlab.com/api/v4/projects/27084533/events",
|
||||
"members": "https://gitlab.com/api/v4/projects/27084533/members",
|
||||
"cluster_agents": "https://gitlab.com/api/v4/projects/27084533/cluster_agents"
|
||||
},
|
||||
"packages_enabled": true,
|
||||
"empty_repo": false,
|
||||
"archived": false,
|
||||
"visibility": "public",
|
||||
"resolve_outdated_diff_discussions": false,
|
||||
"container_expiration_policy": {
|
||||
"cadence": "1d",
|
||||
"enabled": false,
|
||||
"keep_n": 10,
|
||||
"older_than": "90d",
|
||||
"name_regex": ".*",
|
||||
"name_regex_keep": null,
|
||||
"next_run_at": "2021-06-02T17:30:44.740Z"
|
||||
},
|
||||
"issues_enabled": true,
|
||||
"merge_requests_enabled": true,
|
||||
"wiki_enabled": true,
|
||||
"jobs_enabled": true,
|
||||
"snippets_enabled": true,
|
||||
"container_registry_enabled": true,
|
||||
"service_desk_enabled": true,
|
||||
"can_create_merge_request_in": false,
|
||||
"issues_access_level": "enabled",
|
||||
"repository_access_level": "enabled",
|
||||
"merge_requests_access_level": "enabled",
|
||||
"forking_access_level": "enabled",
|
||||
"wiki_access_level": "enabled",
|
||||
"builds_access_level": "enabled",
|
||||
"snippets_access_level": "enabled",
|
||||
"pages_access_level": "enabled",
|
||||
"operations_access_level": "enabled",
|
||||
"analytics_access_level": "enabled",
|
||||
"container_registry_access_level": "enabled",
|
||||
"security_and_compliance_access_level": "private",
|
||||
"emails_disabled": null,
|
||||
"shared_runners_enabled": true,
|
||||
"lfs_enabled": true,
|
||||
"creator_id": 2378866,
|
||||
"import_status": "none",
|
||||
"open_issues_count": 0,
|
||||
"ci_default_git_depth": 50,
|
||||
"ci_forward_deployment_enabled": true,
|
||||
"ci_job_token_scope_enabled": false,
|
||||
"public_jobs": true,
|
||||
"build_timeout": 3600,
|
||||
"auto_cancel_pending_pipelines": "enabled",
|
||||
"ci_config_path": "",
|
||||
"shared_with_groups": [],
|
||||
"only_allow_merge_if_pipeline_succeeds": false,
|
||||
"allow_merge_on_skipped_pipeline": null,
|
||||
"restrict_user_defined_variables": false,
|
||||
"request_access_enabled": true,
|
||||
"only_allow_merge_if_all_discussions_are_resolved": false,
|
||||
"remove_source_branch_after_merge": true,
|
||||
"printing_merge_request_link_enabled": true,
|
||||
"merge_method": "merge",
|
||||
"squash_option": "default_off",
|
||||
"suggestion_commit_message": null,
|
||||
"merge_commit_template": null,
|
||||
"squash_commit_template": null,
|
||||
"auto_devops_enabled": false,
|
||||
"auto_devops_deploy_strategy": "continuous",
|
||||
"autoclose_referenced_issues": true,
|
||||
"keep_latest_artifact": true,
|
||||
"runner_token_expiration_interval": null,
|
||||
"approvals_before_merge": 0,
|
||||
"mirror": false,
|
||||
"external_authorization_classification_label": "",
|
||||
"marked_for_deletion_at": null,
|
||||
"marked_for_deletion_on": null,
|
||||
"requirements_enabled": true,
|
||||
"requirements_access_level": "enabled",
|
||||
"security_and_compliance_enabled": false,
|
||||
"compliance_frameworks": [],
|
||||
"issues_template": null,
|
||||
"merge_requests_template": null,
|
||||
"merge_pipelines_enabled": false,
|
||||
"merge_trains_enabled": false
|
||||
}
|
||||
]`)
|
||||
if err != nil {
|
||||
t.Fail()
|
||||
}
|
||||
case "/api/v4/groups/test-argocd-proton/projects?include_subgroups=true&per_page=100&topic=&with_shared=true":
|
||||
fmt.Println("here")
|
||||
_, err := io.WriteString(w, `[{
|
||||
"id": 27084533,
|
||||
"description": "",
|
||||
"name": "argocd",
|
||||
"name_with_namespace": "test argocd proton / argocd",
|
||||
"path": "argocd",
|
||||
"path_with_namespace": "test-argocd-proton/argocd",
|
||||
"created_at": "2021-06-01T17:30:44.724Z",
|
||||
"default_branch": "master",
|
||||
"tag_list": [
|
||||
"test-topic"
|
||||
],
|
||||
"topics": [
|
||||
"test-topic"
|
||||
],
|
||||
"ssh_url_to_repo": "git@gitlab.com:test-argocd-proton/argocd.git",
|
||||
"http_url_to_repo": "https://gitlab.com/test-argocd-proton/argocd.git",
|
||||
"web_url": "https://gitlab.com/test-argocd-proton/argocd",
|
||||
"readme_url": null,
|
||||
"avatar_url": null,
|
||||
"forks_count": 0,
|
||||
"star_count": 0,
|
||||
"last_activity_at": "2021-06-04T08:19:51.656Z",
|
||||
"namespace": {
|
||||
"id": 12258515,
|
||||
"name": "test argocd proton",
|
||||
"path": "test-argocd-proton",
|
||||
"kind": "gro* Connection #0 to host gitlab.com left intact up ",
|
||||
"full_path ": "test - argocd - proton ",
|
||||
"parent_id ": null,
|
||||
"avatar_url ": null,
|
||||
"web_url ": "https: //gitlab.com/groups/test-argocd-proton"
|
||||
},
|
||||
"container_registry_image_prefix": "registry.gitlab.com/test-argocd-proton/argocd",
|
||||
"_links": {
|
||||
"self": "https://gitlab.com/api/v4/projects/27084533",
|
||||
"issues": "https://gitlab.com/api/v4/projects/27084533/issues",
|
||||
"merge_requests": "https://gitlab.com/api/v4/projects/27084533/merge_requests",
|
||||
"repo_branches": "https://gitlab.com/api/v4/projects/27084533/repository/branches",
|
||||
"labels": "https://gitlab.com/api/v4/projects/27084533/labels",
|
||||
"events": "https://gitlab.com/api/v4/projects/27084533/events",
|
||||
"members": "https://gitlab.com/api/v4/projects/27084533/members",
|
||||
"cluster_agents": "https://gitlab.com/api/v4/projects/27084533/cluster_agents"
|
||||
},
|
||||
"packages_enabled": true,
|
||||
"empty_repo": false,
|
||||
"archived": false,
|
||||
"visibility": "public",
|
||||
"resolve_outdated_diff_discussions": false,
|
||||
"container_expiration_policy": {
|
||||
"cadence": "1d",
|
||||
"enabled": false,
|
||||
"keep_n": 10,
|
||||
"older_than": "90d",
|
||||
"name_regex": ".*",
|
||||
"name_regex_keep": null,
|
||||
"next_run_at": "2021-06-02T17:30:44.740Z"
|
||||
},
|
||||
"issues_enabled": true,
|
||||
"merge_requests_enabled": true,
|
||||
"wiki_enabled": true,
|
||||
"jobs_enabled": true,
|
||||
"snippets_enabled": true,
|
||||
"container_registry_enabled": true,
|
||||
"service_desk_enabled": true,
|
||||
"can_create_merge_request_in": false,
|
||||
"issues_access_level": "enabled",
|
||||
"repository_access_level": "enabled",
|
||||
"merge_requests_access_level": "enabled",
|
||||
"forking_access_level": "enabled",
|
||||
"wiki_access_level": "enabled",
|
||||
"builds_access_level": "enabled",
|
||||
"snippets_access_level": "enabled",
|
||||
"pages_access_level": "enabled",
|
||||
"operations_access_level": "enabled",
|
||||
"analytics_access_level": "enabled",
|
||||
"container_registry_access_level": "enabled",
|
||||
"security_and_compliance_access_level": "private",
|
||||
"emails_disabled": null,
|
||||
"shared_runners_enabled": true,
|
||||
"lfs_enabled": true,
|
||||
"creator_id": 2378866,
|
||||
"import_status": "none",
|
||||
"open_issues_count": 0,
|
||||
"ci_default_git_depth": 50,
|
||||
"ci_forward_deployment_enabled": true,
|
||||
"ci_job_token_scope_enabled": false,
|
||||
"public_jobs": true,
|
||||
"build_timeout": 3600,
|
||||
"auto_cancel_pending_pipelines": "enabled",
|
||||
"ci_config_path": "",
|
||||
"shared_with_groups": [],
|
||||
"only_allow_merge_if_pipeline_succeeds": false,
|
||||
"allow_merge_on_skipped_pipeline": null,
|
||||
"restrict_user_defined_variables": false,
|
||||
"request_access_enabled": true,
|
||||
"only_allow_merge_if_all_discussions_are_resolved": false,
|
||||
"remove_source_branch_after_merge": true,
|
||||
"printing_merge_request_link_enabled": true,
|
||||
"merge_method": "merge",
|
||||
"squash_option": "default_off",
|
||||
"suggestion_commit_message": null,
|
||||
"merge_commit_template": null,
|
||||
"squash_commit_template": null,
|
||||
"auto_devops_enabled": false,
|
||||
"auto_devops_deploy_strategy": "continuous",
|
||||
"autoclose_referenced_issues": true,
|
||||
"keep_latest_artifact": true,
|
||||
"runner_token_expiration_interval": null,
|
||||
"approvals_before_merge": 0,
|
||||
"mirror": false,
|
||||
"external_authorization_classification_label": "",
|
||||
"marked_for_deletion_at": null,
|
||||
"marked_for_deletion_on": null,
|
||||
"requirements_enabled": true,
|
||||
"requirements_access_level": "enabled",
|
||||
"security_and_compliance_enabled": false,
|
||||
"compliance_frameworks": [],
|
||||
"issues_template": null,
|
||||
"merge_requests_template": null,
|
||||
"merge_pipelines_enabled": false,
|
||||
"merge_trains_enabled": false
|
||||
},
|
||||
{
|
||||
"id": 27084534,
|
||||
"description": "This is a Shared Project",
|
||||
"name": "shared-argocd",
|
||||
"name_with_namespace": "shared project to test argocd proton / argocd",
|
||||
"path": "shared-argocd",
|
||||
"path_with_namespace": "test-shared-argocd-proton/shared-argocd",
|
||||
"created_at": "2021-06-11T17:30:44.724Z",
|
||||
"default_branch": "master",
|
||||
"tag_list": [
|
||||
"test-topic"
|
||||
],
|
||||
"topics": [
|
||||
"test-topic"
|
||||
],
|
||||
"ssh_url_to_repo": "git@gitlab.com:test-shared-argocd-proton/shared-argocd.git",
|
||||
"http_url_to_repo": "https://gitlab.com/test-shared-argocd-proton/shared-argocd.git",
|
||||
"web_url": "https://gitlab.com/test-shared-argocd-proton/shared-argocd",
|
||||
"readme_url": null,
|
||||
"avatar_url": null,
|
||||
"forks_count": 0,
|
||||
"star_count": 0,
|
||||
"last_activity_at": "2021-06-04T08:19:51.656Z",
|
||||
"namespace": {
|
||||
"id": 12258518,
|
||||
"name": "test shared argocd proton",
|
||||
"path": "test-shared-argocd-proton",
|
||||
"kind": "group",
|
||||
"full_path ": "test-shared-argocd-proton",
|
||||
"parent_id ": null,
|
||||
"avatar_url ": null,
|
||||
"web_url ": "https: //gitlab.com/groups/test-shared-argocd-proton"
|
||||
},
|
||||
"container_registry_image_prefix": "registry.gitlab.com/test-shared-argocd-proton/shared-argocd",
|
||||
"_links": {
|
||||
"self": "https://gitlab.com/api/v4/projects/27084534",
|
||||
"issues": "https://gitlab.com/api/v4/projects/27084534/issues",
|
||||
"merge_requests": "https://gitlab.com/api/v4/projects/27084534/merge_requests",
|
||||
"repo_branches": "https://gitlab.com/api/v4/projects/27084534/repository/branches",
|
||||
"labels": "https://gitlab.com/api/v4/projects/27084534/labels",
|
||||
"events": "https://gitlab.com/api/v4/projects/27084534/events",
|
||||
"members": "https://gitlab.com/api/v4/projects/27084534/members",
|
||||
"cluster_agents": "https://gitlab.com/api/v4/projects/27084534/cluster_agents"
|
||||
},
|
||||
"packages_enabled": true,
|
||||
"empty_repo": false,
|
||||
"archived": false,
|
||||
"visibility": "public",
|
||||
"resolve_outdated_diff_discussions": false,
|
||||
"container_expiration_policy": {
|
||||
"cadence": "1d",
|
||||
"enabled": false,
|
||||
"keep_n": 10,
|
||||
"older_than": "90d",
|
||||
"name_regex": ".*",
|
||||
"name_regex_keep": null,
|
||||
"next_run_at": "2021-06-12T17:30:44.740Z"
|
||||
},
|
||||
"issues_enabled": true,
|
||||
"merge_requests_enabled": true,
|
||||
"wiki_enabled": true,
|
||||
"jobs_enabled": true,
|
||||
"snippets_enabled": true,
|
||||
"container_registry_enabled": true,
|
||||
"service_desk_enabled": true,
|
||||
"can_create_merge_request_in": false,
|
||||
"issues_access_level": "enabled",
|
||||
"repository_access_level": "enabled",
|
||||
"merge_requests_access_level": "enabled",
|
||||
"forking_access_level": "enabled",
|
||||
"wiki_access_level": "enabled",
|
||||
"builds_access_level": "enabled",
|
||||
"snippets_access_level": "enabled",
|
||||
"pages_access_level": "enabled",
|
||||
"operations_access_level": "enabled",
|
||||
"analytics_access_level": "enabled",
|
||||
"container_registry_access_level": "enabled",
|
||||
"security_and_compliance_access_level": "private",
|
||||
"emails_disabled": null,
|
||||
"shared_runners_enabled": true,
|
||||
"lfs_enabled": true,
|
||||
"creator_id": 2378866,
|
||||
"import_status": "none",
|
||||
"open_issues_count": 0,
|
||||
"ci_default_git_depth": 50,
|
||||
"ci_forward_deployment_enabled": true,
|
||||
"ci_job_token_scope_enabled": false,
|
||||
"public_jobs": true,
|
||||
"build_timeout": 3600,
|
||||
"auto_cancel_pending_pipelines": "enabled",
|
||||
"ci_config_path": "",
|
||||
"shared_with_groups": [
|
||||
{
|
||||
"group_id": 12258515,
|
||||
"group_name": "test-argocd-proton",
|
||||
"group_full_path": "test-shared-argocd-proton",
|
||||
"group_access_level": 30,
|
||||
"expires_at": null
|
||||
}
|
||||
],
|
||||
"only_allow_merge_if_pipeline_succeeds": false,
|
||||
"allow_merge_on_skipped_pipeline": null,
|
||||
"restrict_user_defined_variables": false,
|
||||
"request_access_enabled": true,
|
||||
"only_allow_merge_if_all_discussions_are_resolved": false,
|
||||
"remove_source_branch_after_merge": true,
|
||||
"printing_merge_request_link_enabled": true,
|
||||
"merge_method": "merge",
|
||||
"squash_option": "default_off",
|
||||
"suggestion_commit_message": null,
|
||||
"merge_commit_template": null,
|
||||
"squash_commit_template": null,
|
||||
"auto_devops_enabled": false,
|
||||
"auto_devops_deploy_strategy": "continuous",
|
||||
"autoclose_referenced_issues": true,
|
||||
"keep_latest_artifact": true,
|
||||
"runner_token_expiration_interval": null,
|
||||
"approvals_before_merge": 0,
|
||||
"mirror": false,
|
||||
"external_authorization_classification_label": "",
|
||||
"marked_for_deletion_at": null,
|
||||
"marked_for_deletion_on": null,
|
||||
"requirements_enabled": true,
|
||||
"requirements_access_level": "enabled",
|
||||
"security_and_compliance_enabled": false,
|
||||
"compliance_frameworks": [],
|
||||
"issues_template": null,
|
||||
"merge_requests_template": null,
|
||||
"merge_pipelines_enabled": false,
|
||||
"merge_trains_enabled": false
|
||||
}]`)
|
||||
if err != nil {
|
||||
t.Fail()
|
||||
}
|
||||
case "/api/v4/projects/27084533/repository/branches/master":
|
||||
fmt.Println("returning")
|
||||
_, err := io.WriteString(w, `{
|
||||
@@ -229,6 +877,116 @@ func gitlabMockHandler(t *testing.T) func(http.ResponseWriter, *http.Request) {
|
||||
if err != nil {
|
||||
t.Fail()
|
||||
}
|
||||
case "/api/v4/projects/27084534/repository/branches?per_page=100":
|
||||
_, err := io.WriteString(w, `[{
|
||||
"name": "master",
|
||||
"commit": {
|
||||
"id": "8898d7999fc99dd0fd578650b58b244fc63f6b53",
|
||||
"short_id": "8898d799",
|
||||
"created_at": "2021-06-04T08:24:44.000+00:00",
|
||||
"parent_ids": null,
|
||||
"title": "Merge branch 'pipeline-1317911429' into 'master'",
|
||||
"message": "Merge branch 'pipeline-1317911429' into 'master'",
|
||||
"author_name": "Martin Vozník",
|
||||
"author_email": "martin@voznik.cz",
|
||||
"authored_date": "2021-06-04T08:24:44.000+00:00",
|
||||
"committer_name": "Martin Vozník",
|
||||
"committer_email": "martin@voznik.cz",
|
||||
"committed_date": "2021-06-04T08:24:44.000+00:00",
|
||||
"trailers": null,
|
||||
"web_url": "https://gitlab.com/test-shared-argocd-proton/shared-argocd/-/commit/8898d7999fc99dd0fd578650b58b244fc63f6b53"
|
||||
},
|
||||
"merged": false,
|
||||
"protected": true,
|
||||
"developers_can_push": false,
|
||||
"developers_can_merge": false,
|
||||
"can_push": false,
|
||||
"default": true,
|
||||
"web_url": "https://gitlab.com/test-shared-argocd-proton/shared-argocd/-/tree/master"
|
||||
}, {
|
||||
"name": "pipeline-2310077506",
|
||||
"commit": {
|
||||
"id": "0f92540e5f396ba960adea4ed0aa905baf3f73d1",
|
||||
"short_id": "0f92540e",
|
||||
"created_at": "2021-06-01T18:39:59.000+00:00",
|
||||
"parent_ids": null,
|
||||
"title": "[testapp-ci] manifests/demo/test-app.yaml: release v1.0.1",
|
||||
"message": "[testapp-ci] manifests/demo/test-app.yaml: release v1.0.1",
|
||||
"author_name": "ci-test-app",
|
||||
"author_email": "mvoznik+cicd@protonmail.com",
|
||||
"authored_date": "2021-06-01T18:39:59.000+00:00",
|
||||
"committer_name": "ci-test-app",
|
||||
"committer_email": "mvoznik+cicd@protonmail.com",
|
||||
"committed_date": "2021-06-01T18:39:59.000+00:00",
|
||||
"trailers": null,
|
||||
"web_url": "https://gitlab.com/test-shared-argocd-proton/shared-argocd/-/commit/0f92540e5f396ba960adea4ed0aa905baf3f73d1"
|
||||
},
|
||||
"merged": false,
|
||||
"protected": false,
|
||||
"developers_can_push": false,
|
||||
"developers_can_merge": false,
|
||||
"can_push": false,
|
||||
"default": false,
|
||||
"web_url": "https://gitlab.com/test-shared-argocd-proton/shared-argocd/-/tree/pipeline-1310077506"
|
||||
}]`)
|
||||
if err != nil {
|
||||
t.Fail()
|
||||
}
|
||||
case "/api/v4/projects/27084538/repository/branches?per_page=100":
|
||||
_, err := io.WriteString(w, `[{
|
||||
"name": "master",
|
||||
"commit": {
|
||||
"id": "8898d7999fc99dd0fd578650b58b244fc63f6b58",
|
||||
"short_id": "8898d801",
|
||||
"created_at": "2021-06-04T08:24:44.000+00:00",
|
||||
"parent_ids": null,
|
||||
"title": "Merge branch 'pipeline-1317911429' into 'master'",
|
||||
"message": "Merge branch 'pipeline-1317911429' into 'master'",
|
||||
"author_name": "Martin Vozník",
|
||||
"author_email": "martin@voznik.cz",
|
||||
"authored_date": "2021-06-04T08:24:44.000+00:00",
|
||||
"committer_name": "Martin Vozník",
|
||||
"committer_email": "martin@voznik.cz",
|
||||
"committed_date": "2021-06-04T08:24:44.000+00:00",
|
||||
"trailers": null,
|
||||
"web_url": "https://gitlab.com/test-argocd-proton/subgroup/argocd-subgroup/-/commit/8898d7999fc99dd0fd578650b58b244fc63f6b53"
|
||||
},
|
||||
"merged": false,
|
||||
"protected": true,
|
||||
"developers_can_push": false,
|
||||
"developers_can_merge": false,
|
||||
"can_push": false,
|
||||
"default": true,
|
||||
"web_url": "https://gitlab.com/test-argocd-proton/subgroup/argocd-subgroup/-/tree/master"
|
||||
}, {
|
||||
"name": "pipeline-2310077506",
|
||||
"commit": {
|
||||
"id": "0f92540e5f396ba960adea4ed0aa905baf3f73d1",
|
||||
"short_id": "0f92540e",
|
||||
"created_at": "2021-06-01T18:39:59.000+00:00",
|
||||
"parent_ids": null,
|
||||
"title": "[testapp-ci] manifests/demo/test-app.yaml: release v1.0.1",
|
||||
"message": "[testapp-ci] manifests/demo/test-app.yaml: release v1.0.1",
|
||||
"author_name": "ci-test-app",
|
||||
"author_email": "mvoznik+cicd@protonmail.com",
|
||||
"authored_date": "2021-06-01T18:39:59.000+00:00",
|
||||
"committer_name": "ci-test-app",
|
||||
"committer_email": "mvoznik+cicd@protonmail.com",
|
||||
"committed_date": "2021-06-01T18:39:59.000+00:00",
|
||||
"trailers": null,
|
||||
"web_url": "https://gitlab.com/test-argocd-proton/subgroup/argocd-subgroup/-/commit/0f92540e5f396ba960adea4ed0aa905baf3f73d1"
|
||||
},
|
||||
"merged": false,
|
||||
"protected": false,
|
||||
"developers_can_push": false,
|
||||
"developers_can_merge": false,
|
||||
"can_push": false,
|
||||
"default": false,
|
||||
"web_url": "https://gitlab.com/test-argocd-proton/subgroup/argocd-subgroup/-/tree/pipeline-1310077506"
|
||||
}]`)
|
||||
if err != nil {
|
||||
t.Fail()
|
||||
}
|
||||
case "/api/v4/projects/test-argocd-proton%2Fargocd":
|
||||
fmt.Println("auct")
|
||||
_, err := io.WriteString(w, `{
|
||||
@@ -240,8 +998,12 @@ func gitlabMockHandler(t *testing.T) func(http.ResponseWriter, *http.Request) {
|
||||
"path_with_namespace": "test-argocd-proton/argocd",
|
||||
"created_at": "2021-06-01T17:30:44.724Z",
|
||||
"default_branch": "master",
|
||||
"tag_list": [],
|
||||
"topics": [],
|
||||
"tag_list": [
|
||||
"test-topic"
|
||||
],
|
||||
"topics": [
|
||||
"test-topic"
|
||||
],
|
||||
"ssh_url_to_repo": "git@gitlab.com:test-argocd-proton/argocd.git",
|
||||
"http_url_to_repo": "https://gitlab.com/test-argocd-proton/argocd.git",
|
||||
"web_url": "https://gitlab.com/test-argocd-proton/argocd",
|
||||
@@ -286,10 +1048,10 @@ func gitlabMockHandler(t *testing.T) func(http.ResponseWriter, *http.Request) {
|
||||
}
|
||||
func TestGitlabListRepos(t *testing.T) {
|
||||
cases := []struct {
|
||||
name, proto, url string
|
||||
hasError, allBranches, includeSubgroups bool
|
||||
branches []string
|
||||
filters []v1alpha1.SCMProviderGeneratorFilter
|
||||
name, proto, url, topic string
|
||||
hasError, allBranches, includeSubgroups, includeSharedProjects, insecure bool
|
||||
branches []string
|
||||
filters []v1alpha1.SCMProviderGeneratorFilter
|
||||
}{
|
||||
{
|
||||
name: "blank protocol",
|
||||
@@ -317,32 +1079,66 @@ func TestGitlabListRepos(t *testing.T) {
|
||||
url: "git@gitlab.com:test-argocd-proton/argocd.git",
|
||||
branches: []string{"master"},
|
||||
},
|
||||
{
|
||||
name: "all subgroups",
|
||||
allBranches: true,
|
||||
url: "git@gitlab.com:test-argocd-proton/argocd.git",
|
||||
branches: []string{"master"},
|
||||
includeSharedProjects: false,
|
||||
includeSubgroups: true,
|
||||
},
|
||||
{
|
||||
name: "all subgroups and shared projects",
|
||||
allBranches: true,
|
||||
url: "git@gitlab.com:test-argocd-proton/argocd.git",
|
||||
branches: []string{"master"},
|
||||
includeSharedProjects: true,
|
||||
includeSubgroups: true,
|
||||
},
|
||||
{
|
||||
name: "specific topic",
|
||||
allBranches: true,
|
||||
url: "git@gitlab.com:test-argocd-proton/argocd.git",
|
||||
branches: []string{"master"},
|
||||
includeSubgroups: false,
|
||||
topic: "specific-topic",
|
||||
},
|
||||
}
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
gitlabMockHandler(t)(w, r)
|
||||
}))
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
provider, _ := NewGitlabProvider(context.Background(), "test-argocd-proton", "", ts.URL, c.allBranches, c.includeSubgroups)
|
||||
provider, _ := NewGitlabProvider(context.Background(), "test-argocd-proton", "", ts.URL, c.allBranches, c.includeSubgroups, c.includeSharedProjects, c.insecure, "", c.topic)
|
||||
rawRepos, err := ListRepos(context.Background(), provider, c.filters, c.proto)
|
||||
if c.hasError {
|
||||
assert.NotNil(t, err)
|
||||
} else {
|
||||
assert.Nil(t, err)
|
||||
// Just check that this one project shows up. Not a great test but better thing nothing?
|
||||
// Just check that this one project shows up. Not a great test but better than nothing?
|
||||
repos := []*Repository{}
|
||||
uniqueRepos := map[string]int{}
|
||||
branches := []string{}
|
||||
for _, r := range rawRepos {
|
||||
if r.Repository == "argocd" {
|
||||
repos = append(repos, r)
|
||||
branches = append(branches, r.Branch)
|
||||
}
|
||||
uniqueRepos[r.Repository]++
|
||||
}
|
||||
assert.NotEmpty(t, repos)
|
||||
assert.Equal(t, c.url, repos[0].URL)
|
||||
for _, b := range c.branches {
|
||||
assert.Contains(t, branches, b)
|
||||
}
|
||||
// In case of listing subgroups, validate the number of returned projects
|
||||
if c.includeSubgroups || c.includeSharedProjects {
|
||||
assert.Equal(t, 2, len(uniqueRepos))
|
||||
}
|
||||
// In case we filter on the topic, ensure we got only one repo returned
|
||||
if c.topic != "" {
|
||||
assert.Equal(t, 1, len(uniqueRepos))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -352,7 +1148,7 @@ func TestGitlabHasPath(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
gitlabMockHandler(t)(w, r)
|
||||
}))
|
||||
host, _ := NewGitlabProvider(context.Background(), "test-argocd-proton", "", ts.URL, false, true)
|
||||
host, _ := NewGitlabProvider(context.Background(), "test-argocd-proton", "", ts.URL, false, true, true, false, "", "")
|
||||
repo := &Repository{
|
||||
Organization: "test-argocd-proton",
|
||||
Repository: "argocd",
|
||||
@@ -398,7 +1194,7 @@ func TestGitlabGetBranches(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
gitlabMockHandler(t)(w, r)
|
||||
}))
|
||||
host, _ := NewGitlabProvider(context.Background(), "test-argocd-proton", "", ts.URL, false, true)
|
||||
host, _ := NewGitlabProvider(context.Background(), "test-argocd-proton", "", ts.URL, false, true, true, false, "", "")
|
||||
|
||||
repo := &Repository{
|
||||
RepositoryId: 27084533,
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
kubetesting "k8s.io/client-go/testing"
|
||||
|
||||
argoappv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v2/test/e2e/fixture/applicationsets/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -69,7 +68,7 @@ func createClusterSecret(secretName string, clusterName string, clusterServer st
|
||||
secret := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: secretName,
|
||||
Namespace: utils.ArgoCDNamespace,
|
||||
Namespace: fakeNamespace,
|
||||
Labels: map[string]string{
|
||||
ArgoCDSecretTypeLabel: ArgoCDSecretTypeCluster,
|
||||
},
|
||||
@@ -111,7 +110,7 @@ func TestValidateDestination(t *testing.T) {
|
||||
objects = append(objects, secret)
|
||||
kubeclientset := fake.NewSimpleClientset(objects...)
|
||||
|
||||
appCond := ValidateDestination(context.Background(), &dest, kubeclientset, utils.ArgoCDNamespace)
|
||||
appCond := ValidateDestination(context.Background(), &dest, kubeclientset, fakeNamespace)
|
||||
assert.Nil(t, appCond)
|
||||
assert.Equal(t, "https://127.0.0.1:6443", dest.Server)
|
||||
assert.True(t, dest.IsServerInferred())
|
||||
@@ -124,7 +123,7 @@ func TestValidateDestination(t *testing.T) {
|
||||
Namespace: "default",
|
||||
}
|
||||
|
||||
err := ValidateDestination(context.Background(), &dest, nil, utils.ArgoCDNamespace)
|
||||
err := ValidateDestination(context.Background(), &dest, nil, fakeNamespace)
|
||||
assert.Equal(t, "application destination can't have both name and server defined: minikube https://127.0.0.1:6443", err.Error())
|
||||
assert.False(t, dest.IsServerInferred())
|
||||
})
|
||||
@@ -139,7 +138,7 @@ func TestValidateDestination(t *testing.T) {
|
||||
return true, nil, fmt.Errorf("an error occurred")
|
||||
})
|
||||
|
||||
err := ValidateDestination(context.Background(), &dest, kubeclientset, utils.ArgoCDNamespace)
|
||||
err := ValidateDestination(context.Background(), &dest, kubeclientset, fakeNamespace)
|
||||
assert.Equal(t, "unable to find destination server: an error occurred", err.Error())
|
||||
assert.False(t, dest.IsServerInferred())
|
||||
})
|
||||
@@ -154,7 +153,7 @@ func TestValidateDestination(t *testing.T) {
|
||||
objects = append(objects, secret)
|
||||
kubeclientset := fake.NewSimpleClientset(objects...)
|
||||
|
||||
err := ValidateDestination(context.Background(), &dest, kubeclientset, utils.ArgoCDNamespace)
|
||||
err := ValidateDestination(context.Background(), &dest, kubeclientset, fakeNamespace)
|
||||
assert.Equal(t, "unable to find destination server: there are no clusters with this name: minikube", err.Error())
|
||||
assert.False(t, dest.IsServerInferred())
|
||||
})
|
||||
@@ -171,7 +170,7 @@ func TestValidateDestination(t *testing.T) {
|
||||
objects = append(objects, secret, secret2)
|
||||
kubeclientset := fake.NewSimpleClientset(objects...)
|
||||
|
||||
err := ValidateDestination(context.Background(), &dest, kubeclientset, utils.ArgoCDNamespace)
|
||||
err := ValidateDestination(context.Background(), &dest, kubeclientset, fakeNamespace)
|
||||
assert.Equal(t, "unable to find destination server: there are 2 clusters with the same name: [https://127.0.0.1:2443 https://127.0.0.1:8443]", err.Error())
|
||||
assert.False(t, dest.IsServerInferred())
|
||||
})
|
||||
|
||||
@@ -2,18 +2,24 @@ package utils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/conversion"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
|
||||
argov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo"
|
||||
argodiff "github.com/argoproj/argo-cd/v2/util/argo/diff"
|
||||
)
|
||||
|
||||
// CreateOrUpdate overrides "sigs.k8s.io/controller-runtime" function
|
||||
@@ -29,7 +35,7 @@ import (
|
||||
// The MutateFn is called regardless of creating or updating an object.
|
||||
//
|
||||
// It returns the executed operation and an error.
|
||||
func CreateOrUpdate(ctx context.Context, c client.Client, obj client.Object, f controllerutil.MutateFn) (controllerutil.OperationResult, error) {
|
||||
func CreateOrUpdate(ctx context.Context, logCtx *log.Entry, c client.Client, ignoreAppDifferences argov1alpha1.ApplicationSetIgnoreDifferences, obj *argov1alpha1.Application, f controllerutil.MutateFn) (controllerutil.OperationResult, error) {
|
||||
|
||||
key := client.ObjectKeyFromObject(obj)
|
||||
if err := c.Get(ctx, key, obj); err != nil {
|
||||
@@ -45,11 +51,24 @@ func CreateOrUpdate(ctx context.Context, c client.Client, obj client.Object, f c
|
||||
return controllerutil.OperationResultCreated, nil
|
||||
}
|
||||
|
||||
existing := obj.DeepCopyObject()
|
||||
normalizedLive := obj.DeepCopy()
|
||||
|
||||
// Mutate the live object to match the desired state.
|
||||
if err := mutate(f, key, obj); err != nil {
|
||||
return controllerutil.OperationResultNone, err
|
||||
}
|
||||
|
||||
// Apply ignoreApplicationDifferences rules to remove ignored fields from both the live and the desired state. This
|
||||
// prevents those differences from appearing in the diff and therefore in the patch.
|
||||
err := applyIgnoreDifferences(ignoreAppDifferences, normalizedLive, obj)
|
||||
if err != nil {
|
||||
return controllerutil.OperationResultNone, fmt.Errorf("failed to apply ignore differences: %w", err)
|
||||
}
|
||||
|
||||
// Normalize to avoid diffing on unimportant differences.
|
||||
normalizedLive.Spec = *argo.NormalizeApplicationSpec(&normalizedLive.Spec)
|
||||
obj.Spec = *argo.NormalizeApplicationSpec(&obj.Spec)
|
||||
|
||||
equality := conversion.EqualitiesOrDie(
|
||||
func(a, b resource.Quantity) bool {
|
||||
// Ignore formatting, only care that numeric value stayed the same.
|
||||
@@ -75,16 +94,34 @@ func CreateOrUpdate(ctx context.Context, c client.Client, obj client.Object, f c
|
||||
},
|
||||
)
|
||||
|
||||
if equality.DeepEqual(existing, obj) {
|
||||
if equality.DeepEqual(normalizedLive, obj) {
|
||||
return controllerutil.OperationResultNone, nil
|
||||
}
|
||||
|
||||
if err := c.Update(ctx, obj); err != nil {
|
||||
patch := client.MergeFrom(normalizedLive)
|
||||
if log.IsLevelEnabled(log.DebugLevel) {
|
||||
LogPatch(logCtx, patch, obj)
|
||||
}
|
||||
if err := c.Patch(ctx, obj, patch); err != nil {
|
||||
return controllerutil.OperationResultNone, err
|
||||
}
|
||||
return controllerutil.OperationResultUpdated, nil
|
||||
}
|
||||
|
||||
func LogPatch(logCtx *log.Entry, patch client.Patch, obj *argov1alpha1.Application) {
|
||||
patchBytes, err := patch.Data(obj)
|
||||
if err != nil {
|
||||
logCtx.Errorf("failed to generate patch: %v", err)
|
||||
}
|
||||
// Get the patch as a plain object so it is easier to work with in json logs.
|
||||
var patchObj map[string]interface{}
|
||||
err = json.Unmarshal(patchBytes, &patchObj)
|
||||
if err != nil {
|
||||
logCtx.Errorf("failed to unmarshal patch: %v", err)
|
||||
}
|
||||
logCtx.WithField("patch", patchObj).Debug("patching application")
|
||||
}
|
||||
|
||||
// mutate wraps a MutateFn and applies validation to its result
|
||||
func mutate(f controllerutil.MutateFn, key client.ObjectKey, obj client.Object) error {
|
||||
if err := f(); err != nil {
|
||||
@@ -95,3 +132,71 @@ func mutate(f controllerutil.MutateFn, key client.ObjectKey, obj client.Object)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// applyIgnoreDifferences applies the ignore differences rules to the found application. It modifies the applications in place.
|
||||
func applyIgnoreDifferences(applicationSetIgnoreDifferences argov1alpha1.ApplicationSetIgnoreDifferences, found *argov1alpha1.Application, generatedApp *argov1alpha1.Application) error {
|
||||
if len(applicationSetIgnoreDifferences) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
generatedAppCopy := generatedApp.DeepCopy()
|
||||
diffConfig, err := argodiff.NewDiffConfigBuilder().
|
||||
WithDiffSettings(applicationSetIgnoreDifferences.ToApplicationIgnoreDifferences(), nil, false).
|
||||
WithNoCache().
|
||||
Build()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to build diff config: %w", err)
|
||||
}
|
||||
unstructuredFound, err := appToUnstructured(found)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to convert found application to unstructured: %w", err)
|
||||
}
|
||||
unstructuredGenerated, err := appToUnstructured(generatedApp)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to convert found application to unstructured: %w", err)
|
||||
}
|
||||
result, err := argodiff.Normalize([]*unstructured.Unstructured{unstructuredFound}, []*unstructured.Unstructured{unstructuredGenerated}, diffConfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to normalize application spec: %w", err)
|
||||
}
|
||||
if len(result.Lives) != 1 {
|
||||
return fmt.Errorf("expected 1 normalized application, got %d", len(result.Lives))
|
||||
}
|
||||
foundJsonNormalized, err := json.Marshal(result.Lives[0].Object)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal normalized app to json: %w", err)
|
||||
}
|
||||
foundNormalized := &argov1alpha1.Application{}
|
||||
err = json.Unmarshal(foundJsonNormalized, &foundNormalized)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to unmarshal normalized app to json: %w", err)
|
||||
}
|
||||
if len(result.Targets) != 1 {
|
||||
return fmt.Errorf("expected 1 normalized application, got %d", len(result.Targets))
|
||||
}
|
||||
foundNormalized.DeepCopyInto(found)
|
||||
generatedJsonNormalized, err := json.Marshal(result.Targets[0].Object)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal normalized app to json: %w", err)
|
||||
}
|
||||
generatedAppNormalized := &argov1alpha1.Application{}
|
||||
err = json.Unmarshal(generatedJsonNormalized, &generatedAppNormalized)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to unmarshal normalized app json to structured app: %w", err)
|
||||
}
|
||||
generatedAppNormalized.DeepCopyInto(generatedApp)
|
||||
// Prohibit jq queries from mutating silly things.
|
||||
generatedApp.TypeMeta = generatedAppCopy.TypeMeta
|
||||
generatedApp.Name = generatedAppCopy.Name
|
||||
generatedApp.Namespace = generatedAppCopy.Namespace
|
||||
generatedApp.Operation = generatedAppCopy.Operation
|
||||
return nil
|
||||
}
|
||||
|
||||
func appToUnstructured(app client.Object) (*unstructured.Unstructured, error) {
|
||||
u, err := runtime.DefaultUnstructuredConverter.ToUnstructured(app)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert app object to unstructured: %w", err)
|
||||
}
|
||||
return &unstructured.Unstructured{Object: u}, nil
|
||||
}
|
||||
|
||||
234
applicationset/utils/createOrUpdate_test.go
Normal file
234
applicationset/utils/createOrUpdate_test.go
Normal file
@@ -0,0 +1,234 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"gopkg.in/yaml.v3"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
)
|
||||
|
||||
func Test_applyIgnoreDifferences(t *testing.T) {
|
||||
appMeta := metav1.TypeMeta{
|
||||
APIVersion: v1alpha1.ApplicationSchemaGroupVersionKind.GroupVersion().String(),
|
||||
Kind: v1alpha1.ApplicationSchemaGroupVersionKind.Kind,
|
||||
}
|
||||
testCases := []struct {
|
||||
name string
|
||||
ignoreDifferences v1alpha1.ApplicationSetIgnoreDifferences
|
||||
foundApp string
|
||||
generatedApp string
|
||||
expectedApp string
|
||||
}{
|
||||
{
|
||||
name: "empty ignoreDifferences",
|
||||
foundApp: `
|
||||
spec: {}`,
|
||||
generatedApp: `
|
||||
spec: {}`,
|
||||
expectedApp: `
|
||||
spec: {}`,
|
||||
},
|
||||
{
|
||||
// For this use case: https://github.com/argoproj/argo-cd/issues/9101#issuecomment-1191138278
|
||||
name: "ignore target revision with jq",
|
||||
ignoreDifferences: v1alpha1.ApplicationSetIgnoreDifferences{
|
||||
{JQPathExpressions: []string{".spec.source.targetRevision"}},
|
||||
},
|
||||
foundApp: `
|
||||
spec:
|
||||
source:
|
||||
targetRevision: foo`,
|
||||
generatedApp: `
|
||||
spec:
|
||||
source:
|
||||
targetRevision: bar`,
|
||||
expectedApp: `
|
||||
spec:
|
||||
source:
|
||||
targetRevision: foo`,
|
||||
},
|
||||
{
|
||||
// For this use case: https://github.com/argoproj/argo-cd/issues/9101#issuecomment-1103593714
|
||||
name: "ignore helm parameter with jq",
|
||||
ignoreDifferences: v1alpha1.ApplicationSetIgnoreDifferences{
|
||||
{JQPathExpressions: []string{`.spec.source.helm.parameters | select(.name == "image.tag")`}},
|
||||
},
|
||||
foundApp: `
|
||||
spec:
|
||||
source:
|
||||
helm:
|
||||
parameters:
|
||||
- name: image.tag
|
||||
value: test
|
||||
- name: another
|
||||
value: value`,
|
||||
generatedApp: `
|
||||
spec:
|
||||
source:
|
||||
helm:
|
||||
parameters:
|
||||
- name: image.tag
|
||||
value: v1.0.0
|
||||
- name: another
|
||||
value: value`,
|
||||
expectedApp: `
|
||||
spec:
|
||||
source:
|
||||
helm:
|
||||
parameters:
|
||||
- name: image.tag
|
||||
value: test
|
||||
- name: another
|
||||
value: value`,
|
||||
},
|
||||
{
|
||||
// For this use case: https://github.com/argoproj/argo-cd/issues/9101#issuecomment-1191138278
|
||||
name: "ignore auto-sync in appset when it's not in the cluster with jq",
|
||||
ignoreDifferences: v1alpha1.ApplicationSetIgnoreDifferences{
|
||||
{JQPathExpressions: []string{".spec.syncPolicy.automated"}},
|
||||
},
|
||||
foundApp: `
|
||||
spec:
|
||||
syncPolicy:
|
||||
retry:
|
||||
limit: 5`,
|
||||
generatedApp: `
|
||||
spec:
|
||||
syncPolicy:
|
||||
automated:
|
||||
selfHeal: true
|
||||
retry:
|
||||
limit: 5`,
|
||||
expectedApp: `
|
||||
spec:
|
||||
syncPolicy:
|
||||
retry:
|
||||
limit: 5`,
|
||||
},
|
||||
{
|
||||
name: "ignore auto-sync in the cluster when it's not in the appset with jq",
|
||||
ignoreDifferences: v1alpha1.ApplicationSetIgnoreDifferences{
|
||||
{JQPathExpressions: []string{".spec.syncPolicy.automated"}},
|
||||
},
|
||||
foundApp: `
|
||||
spec:
|
||||
syncPolicy:
|
||||
automated:
|
||||
selfHeal: true
|
||||
retry:
|
||||
limit: 5`,
|
||||
generatedApp: `
|
||||
spec:
|
||||
syncPolicy:
|
||||
retry:
|
||||
limit: 5`,
|
||||
expectedApp: `
|
||||
spec:
|
||||
syncPolicy:
|
||||
automated:
|
||||
selfHeal: true
|
||||
retry:
|
||||
limit: 5`,
|
||||
},
|
||||
{
|
||||
// For this use case: https://github.com/argoproj/argo-cd/issues/9101#issuecomment-1420656537
|
||||
name: "ignore a one-off annotation with jq",
|
||||
ignoreDifferences: v1alpha1.ApplicationSetIgnoreDifferences{
|
||||
{JQPathExpressions: []string{`.metadata.annotations | select(.["foo.bar"] == "baz")`}},
|
||||
},
|
||||
foundApp: `
|
||||
metadata:
|
||||
annotations:
|
||||
foo.bar: baz
|
||||
some.other: annotation`,
|
||||
generatedApp: `
|
||||
metadata:
|
||||
annotations:
|
||||
some.other: annotation`,
|
||||
expectedApp: `
|
||||
metadata:
|
||||
annotations:
|
||||
foo.bar: baz
|
||||
some.other: annotation`,
|
||||
},
|
||||
{
|
||||
// For this use case: https://github.com/argoproj/argo-cd/issues/9101#issuecomment-1515672638
|
||||
name: "ignore the source.plugin field with a json pointer",
|
||||
ignoreDifferences: v1alpha1.ApplicationSetIgnoreDifferences{
|
||||
{JSONPointers: []string{"/spec/source/plugin"}},
|
||||
},
|
||||
foundApp: `
|
||||
spec:
|
||||
source:
|
||||
plugin:
|
||||
parameters:
|
||||
- name: url
|
||||
string: https://example.com`,
|
||||
generatedApp: `
|
||||
spec:
|
||||
source:
|
||||
plugin:
|
||||
parameters:
|
||||
- name: url
|
||||
string: https://example.com/wrong`,
|
||||
expectedApp: `
|
||||
spec:
|
||||
source:
|
||||
plugin:
|
||||
parameters:
|
||||
- name: url
|
||||
string: https://example.com`,
|
||||
},
|
||||
{
|
||||
// For this use case: https://github.com/argoproj/argo-cd/pull/14743#issuecomment-1761954799
|
||||
name: "ignore parameters added to a multi-source app in the cluster",
|
||||
ignoreDifferences: v1alpha1.ApplicationSetIgnoreDifferences{
|
||||
{JQPathExpressions: []string{`.spec.sources[] | select(.repoURL | contains("test-repo")).helm.parameters`}},
|
||||
},
|
||||
foundApp: `
|
||||
spec:
|
||||
sources:
|
||||
- repoURL: https://git.example.com/test-org/test-repo
|
||||
helm:
|
||||
parameters:
|
||||
- name: test
|
||||
value: hi`,
|
||||
generatedApp: `
|
||||
spec:
|
||||
sources:
|
||||
- repoURL: https://git.example.com/test-org/test-repo`,
|
||||
expectedApp: `
|
||||
spec:
|
||||
sources:
|
||||
- repoURL: https://git.example.com/test-org/test-repo
|
||||
helm:
|
||||
parameters:
|
||||
- name: test
|
||||
value: hi`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
foundApp := v1alpha1.Application{TypeMeta: appMeta}
|
||||
err := yaml.Unmarshal([]byte(tc.foundApp), &foundApp)
|
||||
require.NoError(t, err, tc.foundApp)
|
||||
generatedApp := v1alpha1.Application{TypeMeta: appMeta}
|
||||
err = yaml.Unmarshal([]byte(tc.generatedApp), &generatedApp)
|
||||
require.NoError(t, err, tc.generatedApp)
|
||||
err = applyIgnoreDifferences(tc.ignoreDifferences, &foundApp, &generatedApp)
|
||||
require.NoError(t, err)
|
||||
yamlFound, err := yaml.Marshal(tc.foundApp)
|
||||
require.NoError(t, err)
|
||||
yamlExpected, err := yaml.Marshal(tc.expectedApp)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, string(yamlExpected), string(yamlFound))
|
||||
})
|
||||
}
|
||||
}
|
||||
71
applicationset/utils/template_functions.go
Normal file
71
applicationset/utils/template_functions.go
Normal file
@@ -0,0 +1,71 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"sigs.k8s.io/yaml"
|
||||
)
|
||||
|
||||
// SanitizeName sanitizes the name in accordance with the below rules
|
||||
// 1. contain no more than 253 characters
|
||||
// 2. contain only lowercase alphanumeric characters, '-' or '.'
|
||||
// 3. start and end with an alphanumeric character
|
||||
func SanitizeName(name string) string {
|
||||
invalidDNSNameChars := regexp.MustCompile("[^-a-z0-9.]")
|
||||
maxDNSNameLength := 253
|
||||
|
||||
name = strings.ToLower(name)
|
||||
name = invalidDNSNameChars.ReplaceAllString(name, "-")
|
||||
if len(name) > maxDNSNameLength {
|
||||
name = name[:maxDNSNameLength]
|
||||
}
|
||||
|
||||
return strings.Trim(name, "-.")
|
||||
}
|
||||
|
||||
// This has been copied from helm and may be removed as soon as it is retrofited in sprig
|
||||
// toYAML takes an interface, marshals it to yaml, and returns a string. It will
|
||||
// always return a string, even on marshal error (empty string).
|
||||
//
|
||||
// This is designed to be called from a template.
|
||||
func toYAML(v interface{}) (string, error) {
|
||||
data, err := yaml.Marshal(v)
|
||||
if err != nil {
|
||||
// Swallow errors inside of a template.
|
||||
return "", err
|
||||
}
|
||||
return strings.TrimSuffix(string(data), "\n"), nil
|
||||
}
|
||||
|
||||
// This has been copied from helm and may be removed as soon as it is retrofited in sprig
|
||||
// fromYAML converts a YAML document into a map[string]interface{}.
|
||||
//
|
||||
// This is not a general-purpose YAML parser, and will not parse all valid
|
||||
// YAML documents. Additionally, because its intended use is within templates
|
||||
// it tolerates errors. It will insert the returned error message string into
|
||||
// m["Error"] in the returned map.
|
||||
func fromYAML(str string) (map[string]interface{}, error) {
|
||||
m := map[string]interface{}{}
|
||||
|
||||
if err := yaml.Unmarshal([]byte(str), &m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// This has been copied from helm and may be removed as soon as it is retrofited in sprig
|
||||
// fromYAMLArray converts a YAML array into a []interface{}.
|
||||
//
|
||||
// This is not a general-purpose YAML parser, and will not parse all valid
|
||||
// YAML documents. Additionally, because its intended use is within templates
|
||||
// it tolerates errors. It will insert the returned error message string as
|
||||
// the first and only item in the returned array.
|
||||
func fromYAMLArray(str string) ([]interface{}, error) {
|
||||
a := []interface{}{}
|
||||
|
||||
if err := yaml.Unmarshal([]byte(str), &a); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return a, nil
|
||||
}
|
||||
@@ -2,9 +2,12 @@ package utils
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"sort"
|
||||
@@ -14,6 +17,7 @@ import (
|
||||
|
||||
"github.com/Masterminds/sprig/v3"
|
||||
"github.com/valyala/fasttemplate"
|
||||
"sigs.k8s.io/yaml"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
@@ -28,6 +32,9 @@ func init() {
|
||||
delete(sprigFuncMap, "expandenv")
|
||||
delete(sprigFuncMap, "getHostByName")
|
||||
sprigFuncMap["normalize"] = SanitizeName
|
||||
sprigFuncMap["toYaml"] = toYAML
|
||||
sprigFuncMap["fromYaml"] = fromYAML
|
||||
sprigFuncMap["fromYamlArray"] = fromYAMLArray
|
||||
}
|
||||
|
||||
type Renderer interface {
|
||||
@@ -48,6 +55,22 @@ func copyUnexported(copy, original reflect.Value) {
|
||||
copyValueIntoUnexported(copy, unexported)
|
||||
}
|
||||
|
||||
func IsJSONStr(str string) bool {
|
||||
str = strings.TrimSpace(str)
|
||||
return len(str) > 0 && str[0] == '{'
|
||||
}
|
||||
|
||||
func ConvertYAMLToJSON(str string) (string, error) {
|
||||
if !IsJSONStr(str) {
|
||||
jsonStr, err := yaml.YAMLToJSON([]byte(str))
|
||||
if err != nil {
|
||||
return str, err
|
||||
}
|
||||
return string(jsonStr), nil
|
||||
}
|
||||
return str, nil
|
||||
}
|
||||
|
||||
// This function is in charge of searching all String fields of the object recursively and apply templating
|
||||
// thanks to https://gist.github.com/randallmlough/1fd78ec8a1034916ca52281e3b886dc7
|
||||
func (r *Render) deeplyReplace(copy, original reflect.Value, replaceMap map[string]interface{}, useGoTemplate bool, goTemplateOptions []string) error {
|
||||
@@ -71,6 +94,7 @@ func (r *Render) deeplyReplace(copy, original reflect.Value, replaceMap map[stri
|
||||
}
|
||||
// Unwrap the newly created pointer
|
||||
if err := r.deeplyReplace(copy.Elem(), originalValue, replaceMap, useGoTemplate, goTemplateOptions); err != nil {
|
||||
// Not wrapping the error, since this is a recursive function. Avoids excessively long error messages.
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -83,11 +107,19 @@ func (r *Render) deeplyReplace(copy, original reflect.Value, replaceMap map[stri
|
||||
originalValue := original.Elem()
|
||||
// Create a new object. Now new gives us a pointer, but we want the value it
|
||||
// points to, so we have to call Elem() to unwrap it
|
||||
copyValue := reflect.New(originalValue.Type()).Elem()
|
||||
if err := r.deeplyReplace(copyValue, originalValue, replaceMap, useGoTemplate, goTemplateOptions); err != nil {
|
||||
return err
|
||||
|
||||
if originalValue.IsValid() {
|
||||
reflectType := originalValue.Type()
|
||||
|
||||
reflectValue := reflect.New(reflectType)
|
||||
|
||||
copyValue := reflectValue.Elem()
|
||||
if err := r.deeplyReplace(copyValue, originalValue, replaceMap, useGoTemplate, goTemplateOptions); err != nil {
|
||||
// Not wrapping the error, since this is a recursive function. Avoids excessively long error messages.
|
||||
return err
|
||||
}
|
||||
copy.Set(copyValue)
|
||||
}
|
||||
copy.Set(copyValue)
|
||||
|
||||
// If it is a struct we translate each field
|
||||
case reflect.Struct:
|
||||
@@ -96,10 +128,14 @@ func (r *Render) deeplyReplace(copy, original reflect.Value, replaceMap map[stri
|
||||
// specific case time
|
||||
if currentType == "time.Time" {
|
||||
copy.Field(i).Set(original.Field(i))
|
||||
} else if currentType == "Raw.k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" {
|
||||
} else if currentType == "Raw.k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" || currentType == "Raw.k8s.io/apimachinery/pkg/runtime" {
|
||||
var unmarshaled interface{}
|
||||
originalBytes := original.Field(i).Bytes()
|
||||
err := json.Unmarshal(originalBytes, &unmarshaled)
|
||||
convertedToJson, err := ConvertYAMLToJSON(string(originalBytes))
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while converting template to json %q: %w", convertedToJson, err)
|
||||
}
|
||||
err = json.Unmarshal([]byte(convertedToJson), &unmarshaled)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to unmarshal JSON field: %w", err)
|
||||
}
|
||||
@@ -116,6 +152,7 @@ func (r *Render) deeplyReplace(copy, original reflect.Value, replaceMap map[stri
|
||||
}
|
||||
copy.Field(i).Set(reflect.ValueOf(data))
|
||||
} else if err := r.deeplyReplace(copy.Field(i), original.Field(i), replaceMap, useGoTemplate, goTemplateOptions); err != nil {
|
||||
// Not wrapping the error, since this is a recursive function. Avoids excessively long error messages.
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -130,6 +167,7 @@ func (r *Render) deeplyReplace(copy, original reflect.Value, replaceMap map[stri
|
||||
|
||||
for i := 0; i < original.Len(); i += 1 {
|
||||
if err := r.deeplyReplace(copy.Index(i), original.Index(i), replaceMap, useGoTemplate, goTemplateOptions); err != nil {
|
||||
// Not wrapping the error, since this is a recursive function. Avoids excessively long error messages.
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -150,6 +188,7 @@ func (r *Render) deeplyReplace(copy, original reflect.Value, replaceMap map[stri
|
||||
copyValue := reflect.New(originalValue.Type()).Elem()
|
||||
|
||||
if err := r.deeplyReplace(copyValue, originalValue, replaceMap, useGoTemplate, goTemplateOptions); err != nil {
|
||||
// Not wrapping the error, since this is a recursive function. Avoids excessively long error messages.
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -157,6 +196,7 @@ func (r *Render) deeplyReplace(copy, original reflect.Value, replaceMap map[stri
|
||||
if key.Kind() == reflect.String {
|
||||
templatedKey, err := r.Replace(key.String(), replaceMap, useGoTemplate, goTemplateOptions)
|
||||
if err != nil {
|
||||
// Not wrapping the error, since this is a recursive function. Avoids excessively long error messages.
|
||||
return err
|
||||
}
|
||||
key = reflect.ValueOf(templatedKey)
|
||||
@@ -171,6 +211,7 @@ func (r *Render) deeplyReplace(copy, original reflect.Value, replaceMap map[stri
|
||||
strToTemplate := original.String()
|
||||
templated, err := r.Replace(strToTemplate, replaceMap, useGoTemplate, goTemplateOptions)
|
||||
if err != nil {
|
||||
// Not wrapping the error, since this is a recursive function. Avoids excessively long error messages.
|
||||
return err
|
||||
}
|
||||
if copy.CanSet() {
|
||||
@@ -280,7 +321,10 @@ func (r *Render) Replace(tmpl string, replaceMap map[string]interface{}, useGoTe
|
||||
return tmpl, nil
|
||||
}
|
||||
|
||||
fstTmpl := fasttemplate.New(tmpl, "{{", "}}")
|
||||
fstTmpl, err := fasttemplate.NewTemplate(tmpl, "{{", "}}")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("invalid template: %w", err)
|
||||
}
|
||||
replacedTmpl := fstTmpl.ExecuteFuncString(func(w io.Writer, tag string) (int, error) {
|
||||
trimmedTag := strings.TrimSpace(tag)
|
||||
replacement, ok := replaceMap[trimmedTag].(string)
|
||||
@@ -390,19 +434,37 @@ func NormalizeBitbucketBasePath(basePath string) string {
|
||||
return basePath
|
||||
}
|
||||
|
||||
// SanitizeName sanitizes the name in accordance with the below rules
|
||||
// 1. contain no more than 253 characters
|
||||
// 2. contain only lowercase alphanumeric characters, '-' or '.'
|
||||
// 3. start and end with an alphanumeric character
|
||||
func SanitizeName(name string) string {
|
||||
invalidDNSNameChars := regexp.MustCompile("[^-a-z0-9.]")
|
||||
maxDNSNameLength := 253
|
||||
func getTlsConfigWithCACert(scmRootCAPath string) *tls.Config {
|
||||
|
||||
name = strings.ToLower(name)
|
||||
name = invalidDNSNameChars.ReplaceAllString(name, "-")
|
||||
if len(name) > maxDNSNameLength {
|
||||
name = name[:maxDNSNameLength]
|
||||
tlsConfig := &tls.Config{}
|
||||
|
||||
if scmRootCAPath != "" {
|
||||
_, err := os.Stat(scmRootCAPath)
|
||||
if os.IsNotExist(err) {
|
||||
log.Errorf("scmRootCAPath '%s' specified does not exist: %s", scmRootCAPath, err)
|
||||
return tlsConfig
|
||||
}
|
||||
rootCA, err := os.ReadFile(scmRootCAPath)
|
||||
if err != nil {
|
||||
log.Errorf("error reading certificate from file '%s', proceeding without custom rootCA : %s", scmRootCAPath, err)
|
||||
return tlsConfig
|
||||
}
|
||||
certPool := x509.NewCertPool()
|
||||
ok := certPool.AppendCertsFromPEM([]byte(rootCA))
|
||||
if !ok {
|
||||
log.Errorf("failed to append certificates from PEM: proceeding without custom rootCA")
|
||||
} else {
|
||||
tlsConfig.RootCAs = certPool
|
||||
}
|
||||
}
|
||||
|
||||
return strings.Trim(name, "-.")
|
||||
return tlsConfig
|
||||
}
|
||||
|
||||
func GetTlsConfig(scmRootCAPath string, insecure bool) *tls.Config {
|
||||
tlsConfig := getTlsConfigWithCACert(scmRootCAPath)
|
||||
|
||||
if insecure {
|
||||
tlsConfig.InsecureSkipVerify = true
|
||||
}
|
||||
return tlsConfig
|
||||
}
|
||||
|
||||
@@ -1,6 +1,10 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"crypto/x509"
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -195,6 +199,113 @@ func TestRenderTemplateParams(t *testing.T) {
|
||||
|
||||
}
|
||||
|
||||
func TestRenderHelmValuesObjectJson(t *testing.T) {
|
||||
|
||||
params := map[string]interface{}{
|
||||
"test": "Hello world",
|
||||
}
|
||||
|
||||
application := &argoappsv1.Application{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: map[string]string{"annotation-key": "annotation-value", "annotation-key2": "annotation-value2"},
|
||||
Labels: map[string]string{"label-key": "label-value", "label-key2": "label-value2"},
|
||||
CreationTimestamp: metav1.NewTime(time.Now()),
|
||||
UID: types.UID("d546da12-06b7-4f9a-8ea2-3adb16a20e2b"),
|
||||
Name: "application-one",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: argoappsv1.ApplicationSpec{
|
||||
Source: &argoappsv1.ApplicationSource{
|
||||
Path: "",
|
||||
RepoURL: "",
|
||||
TargetRevision: "",
|
||||
Chart: "",
|
||||
Helm: &argoappsv1.ApplicationSourceHelm{
|
||||
ValuesObject: &runtime.RawExtension{
|
||||
Raw: []byte(`{
|
||||
"some": {
|
||||
"string": "{{.test}}"
|
||||
}
|
||||
}`),
|
||||
},
|
||||
},
|
||||
},
|
||||
Destination: argoappsv1.ApplicationDestination{
|
||||
Server: "",
|
||||
Namespace: "",
|
||||
Name: "",
|
||||
},
|
||||
Project: "",
|
||||
},
|
||||
}
|
||||
|
||||
// Render the cloned application, into a new application
|
||||
render := Render{}
|
||||
newApplication, err := render.RenderTemplateParams(application, nil, params, true, []string{})
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, newApplication)
|
||||
|
||||
var unmarshaled interface{}
|
||||
err = json.Unmarshal(newApplication.Spec.Source.Helm.ValuesObject.Raw, &unmarshaled)
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, unmarshaled.(map[string]interface{})["some"].(map[string]interface{})["string"], "Hello world")
|
||||
|
||||
}
|
||||
|
||||
func TestRenderHelmValuesObjectYaml(t *testing.T) {
|
||||
|
||||
params := map[string]interface{}{
|
||||
"test": "Hello world",
|
||||
}
|
||||
|
||||
application := &argoappsv1.Application{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: map[string]string{"annotation-key": "annotation-value", "annotation-key2": "annotation-value2"},
|
||||
Labels: map[string]string{"label-key": "label-value", "label-key2": "label-value2"},
|
||||
CreationTimestamp: metav1.NewTime(time.Now()),
|
||||
UID: types.UID("d546da12-06b7-4f9a-8ea2-3adb16a20e2b"),
|
||||
Name: "application-one",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: argoappsv1.ApplicationSpec{
|
||||
Source: &argoappsv1.ApplicationSource{
|
||||
Path: "",
|
||||
RepoURL: "",
|
||||
TargetRevision: "",
|
||||
Chart: "",
|
||||
Helm: &argoappsv1.ApplicationSourceHelm{
|
||||
ValuesObject: &runtime.RawExtension{
|
||||
Raw: []byte(`some:
|
||||
string: "{{.test}}"`),
|
||||
},
|
||||
},
|
||||
},
|
||||
Destination: argoappsv1.ApplicationDestination{
|
||||
Server: "",
|
||||
Namespace: "",
|
||||
Name: "",
|
||||
},
|
||||
Project: "",
|
||||
},
|
||||
}
|
||||
|
||||
// Render the cloned application, into a new application
|
||||
render := Render{}
|
||||
newApplication, err := render.RenderTemplateParams(application, nil, params, true, []string{})
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, newApplication)
|
||||
|
||||
var unmarshaled interface{}
|
||||
err = json.Unmarshal(newApplication.Spec.Source.Helm.ValuesObject.Raw, &unmarshaled)
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, unmarshaled.(map[string]interface{})["some"].(map[string]interface{})["string"], "Hello world")
|
||||
|
||||
}
|
||||
|
||||
func TestRenderTemplateParamsGoTemplate(t *testing.T) {
|
||||
|
||||
// Believe it or not, this is actually less complex than the equivalent solution using reflection
|
||||
@@ -444,6 +555,64 @@ func TestRenderTemplateParamsGoTemplate(t *testing.T) {
|
||||
templateOptions: []string{"missingkey=error"},
|
||||
errorMessage: `failed to execute go template --> {{.doesnotexist}} <--: template: :1:6: executing "" at <.doesnotexist>: map has no entry for key "doesnotexist"`,
|
||||
},
|
||||
{
|
||||
name: "toYaml",
|
||||
fieldVal: `{{ toYaml . | indent 2 }}`,
|
||||
expectedVal: " foo:\n bar:\n bool: true\n number: 2\n str: Hello world",
|
||||
params: map[string]interface{}{
|
||||
"foo": map[string]interface{}{
|
||||
"bar": map[string]interface{}{
|
||||
"bool": true,
|
||||
"number": 2,
|
||||
"str": "Hello world",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "toYaml Error",
|
||||
fieldVal: `{{ toYaml . | indent 2 }}`,
|
||||
expectedVal: " foo:\n bar:\n bool: true\n number: 2\n str: Hello world",
|
||||
errorMessage: "failed to execute go template {{ toYaml . | indent 2 }}: template: :1:3: executing \"\" at <toYaml .>: error calling toYaml: error marshaling into JSON: json: unsupported type: func(*string)",
|
||||
params: map[string]interface{}{
|
||||
"foo": func(test *string) {
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "fromYaml",
|
||||
fieldVal: `{{ get (fromYaml .value) "hello" }}`,
|
||||
expectedVal: "world",
|
||||
params: map[string]interface{}{
|
||||
"value": "hello: world",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "fromYaml error",
|
||||
fieldVal: `{{ get (fromYaml .value) "hello" }}`,
|
||||
expectedVal: "world",
|
||||
errorMessage: "failed to execute go template {{ get (fromYaml .value) \"hello\" }}: template: :1:8: executing \"\" at <fromYaml .value>: error calling fromYaml: error unmarshaling JSON: while decoding JSON: json: cannot unmarshal string into Go value of type map[string]interface {}",
|
||||
params: map[string]interface{}{
|
||||
"value": "non\n compliant\n yaml",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "fromYamlArray",
|
||||
fieldVal: `{{ fromYamlArray .value | last }}`,
|
||||
expectedVal: "bonjour tout le monde",
|
||||
params: map[string]interface{}{
|
||||
"value": "- hello world\n- bonjour tout le monde",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "fromYamlArray error",
|
||||
fieldVal: `{{ fromYamlArray .value | last }}`,
|
||||
expectedVal: "bonjour tout le monde",
|
||||
errorMessage: "failed to execute go template {{ fromYamlArray .value | last }}: template: :1:3: executing \"\" at <fromYamlArray .value>: error calling fromYamlArray: error unmarshaling JSON: while decoding JSON: json: cannot unmarshal string into Go value of type []interface {}",
|
||||
params: map[string]interface{}{
|
||||
"value": "non\n compliant\n yaml",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
@@ -556,6 +725,14 @@ func TestRenderTemplateKeys(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func Test_Render_Replace_no_panic_on_missing_closing_brace(t *testing.T) {
|
||||
r := &Render{}
|
||||
assert.NotPanics(t, func() {
|
||||
_, err := r.Replace("{{properly.closed}} {{improperly.closed}", nil, false, []string{})
|
||||
assert.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestRenderTemplateParamsFinalizers(t *testing.T) {
|
||||
|
||||
emptyApplication := &argoappsv1.Application{
|
||||
@@ -1065,3 +1242,92 @@ func TestNormalizeBitbucketBasePath(t *testing.T) {
|
||||
assert.Equal(t, c.expectedBasePath, result, c.testName)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetTLSConfig(t *testing.T) {
|
||||
// certParsed, err := tls.X509KeyPair(test.Cert, test.PrivateKey)
|
||||
// require.NoError(t, err)
|
||||
|
||||
temppath := t.TempDir()
|
||||
cert := `
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIFvTCCA6WgAwIBAgIUGrTmW3qc39zqnE08e3qNDhUkeWswDQYJKoZIhvcNAQEL
|
||||
BQAwbjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAklMMRAwDgYDVQQHDAdDaGljYWdv
|
||||
MRQwEgYDVQQKDAtDYXBvbmUsIEluYzEQMA4GA1UECwwHU3BlY09wczEYMBYGA1UE
|
||||
AwwPZm9vLmV4YW1wbGUuY29tMB4XDTE5MDcwODEzNTUwNVoXDTIwMDcwNzEzNTUw
|
||||
NVowbjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAklMMRAwDgYDVQQHDAdDaGljYWdv
|
||||
MRQwEgYDVQQKDAtDYXBvbmUsIEluYzEQMA4GA1UECwwHU3BlY09wczEYMBYGA1UE
|
||||
AwwPZm9vLmV4YW1wbGUuY29tMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKC
|
||||
AgEA3csSO13w7qQXKeSLNcpeuAe6wAjXYbRkRl6ariqzTEDcFTKmy2QiXJTKoEGn
|
||||
bvwxq0T91var7rxY88SGL/qi8Zmo0tVSR0XvKSKcghFIkQOTyDmVgMPZGCvixt4q
|
||||
gQ7hUVSk4KkFmtcqBVuvnzI1d/DKfZAGKdmGcfRpuAsnVhac3swP0w4Tl1BFrK9U
|
||||
vuIkz4KwXG77s5oB8rMUnyuLasLsGNpvpvXhkcQRhp6vpcCO2bS7kOTTelAPIucw
|
||||
P37qkOEdZdiWCLrr57dmhg6tmcVlmBMg6JtmfLxn2HQd9ZrCKlkWxMk5NYs6CAW5
|
||||
kgbDZUWQTAsnHeoJKbcgtPkIbxDRxNpPukFMtbA4VEWv1EkODXy9FyEKDOI/PV6K
|
||||
/80oLkgCIhCkP2mvwSFheU0RHTuZ0o0vVolP5TEOq5iufnDN4wrxqb12o//XLRc0
|
||||
RiLqGVVxhFdyKCjVxcLfII9AAp5Tse4PMh6bf6jDfB3OMvGkhMbJWhKXdR2NUTl0
|
||||
esKawMPRXIn5g3oBdNm8kyRsTTnvB567pU8uNSmA8j3jxfGCPynI8JdiwKQuW/+P
|
||||
WgLIflgxqAfG85dVVOsFmF9o5o24dDslvv9yHnHH102c6ijPCg1EobqlyFzqqxOD
|
||||
Wf2OPjIkzoTH+O27VRugnY/maIU1nshNO7ViRX5zIxEUtNMCAwEAAaNTMFEwHQYD
|
||||
VR0OBBYEFNY4gDLgPBidogkmpO8nq5yAq5g+MB8GA1UdIwQYMBaAFNY4gDLgPBid
|
||||
ogkmpO8nq5yAq5g+MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggIB
|
||||
AJ0WGioNtGNg3m6ywpmxNThorQD5ZvDMlmZlDVk78E2wfNyMhwbVhKhlAnONv0wv
|
||||
kmsGjibY75nRZ+EK9PxSJ644841fryQXQ+bli5fhr7DW3uTKwaRsnzETJXRJuljq
|
||||
6+c6Zyg1/mqwnyx7YvPgVh3w496DYx/jm6Fm1IEq3BzOmn6H/gGPq3gbURzEqI3h
|
||||
P+kC2vJa8RZWrpa05Xk/Q1QUkErDX9vJghb9z3+GgirISZQzqWRghII/znv3NOE6
|
||||
zoIgaaWNFn8KPeBVpUoboH+IhpgibsnbTbI0G7AMtFq6qm3kn/4DZ2N2tuh1G2tT
|
||||
zR2Fh7hJbU7CrqxANrgnIoHG/nLSvzE24ckLb0Vj69uGQlwnZkn9fz6F7KytU+Az
|
||||
NoB2rjufaB0GQi1azdboMvdGSOxhSCAR8otWT5yDrywCqVnEvjw0oxKmuRduNe2/
|
||||
6AcG6TtK2/K+LHuhymiAwZM2qE6VD2odvb+tCzDkZOIeoIz/JcVlNpXE9FuVl250
|
||||
9NWvugeghq7tUv81iJ8ninBefJ4lUfxAehTPQqX+zXcfxgjvMRCi/ig73nLyhmjx
|
||||
r2AaraPFgrprnxUibP4L7jxdr+iiw5bWN9/B81PodrS7n5TNtnfnpZD6X6rThqOP
|
||||
xO7Tr5lAo74vNUkF2EHNaI28/RGnJPm2TIxZqy4rNH6L
|
||||
-----END CERTIFICATE-----
|
||||
`
|
||||
|
||||
rootCAPath := path.Join(temppath, "foo.example.com")
|
||||
err := os.WriteFile(rootCAPath, []byte(cert), 0666)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
certPool := x509.NewCertPool()
|
||||
ok := certPool.AppendCertsFromPEM([]byte(cert))
|
||||
assert.True(t, ok)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
scmRootCAPath string
|
||||
insecure bool
|
||||
validateCertInTlsConfig bool
|
||||
}{
|
||||
{
|
||||
name: "Insecure mode configured, SCM Root CA Path not set",
|
||||
scmRootCAPath: "",
|
||||
insecure: true,
|
||||
validateCertInTlsConfig: false,
|
||||
},
|
||||
{
|
||||
name: "SCM Root CA Path set, Insecure mode set to false",
|
||||
scmRootCAPath: rootCAPath,
|
||||
insecure: false,
|
||||
validateCertInTlsConfig: true,
|
||||
},
|
||||
{
|
||||
name: "SCM Root CA Path set, Insecure mode set to true",
|
||||
scmRootCAPath: rootCAPath,
|
||||
insecure: true,
|
||||
validateCertInTlsConfig: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
tlsConfig := GetTlsConfig(testCase.scmRootCAPath, testCase.insecure)
|
||||
assert.Equal(t, testCase.insecure, tlsConfig.InsecureSkipVerify)
|
||||
if testCase.validateCertInTlsConfig {
|
||||
assert.NotNil(t, tlsConfig)
|
||||
assert.True(t, tlsConfig.RootCAs.Equal(certPool))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
85
applicationset/webhook/testdata/azuredevops-pull-request.json
vendored
Normal file
85
applicationset/webhook/testdata/azuredevops-pull-request.json
vendored
Normal file
@@ -0,0 +1,85 @@
|
||||
{
|
||||
"id": "2ab4e3d3-b7a6-425e-92b1-5a9982c1269e",
|
||||
"eventType": "git.pullrequest.created",
|
||||
"publisherId": "tfs",
|
||||
"scope": "all",
|
||||
"message": {
|
||||
"text": "Jamal Hartnett created a new pull request",
|
||||
"html": "Jamal Hartnett created a new pull request",
|
||||
"markdown": "Jamal Hartnett created a new pull request"
|
||||
},
|
||||
"detailedMessage": {
|
||||
"text": "Jamal Hartnett created a new pull request\r\n\r\n- Merge status: Succeeded\r\n- Merge commit: eef717(https://dev.azure.com/fabrikam/DefaultCollection/_apis/repos/git/repositories/4bc14d40-c903-45e2-872e-0462c7748079/commits/eef717f69257a6333f221566c1c987dc94cc0d72)\r\n",
|
||||
"html": "Jamal Hartnett created a new pull request\r\n<ul>\r\n<li>Merge status: Succeeded</li>\r\n<li>Merge commit: <a href=\"https://dev.azure.com/fabrikam/DefaultCollection/_apis/repos/git/repositories/4bc14d40-c903-45e2-872e-0462c7748079/commits/eef717f69257a6333f221566c1c987dc94cc0d72\">eef717</a></li>\r\n</ul>",
|
||||
"markdown": "Jamal Hartnett created a new pull request\r\n\r\n+ Merge status: Succeeded\r\n+ Merge commit: [eef717](https://dev.azure.com/fabrikam/DefaultCollection/_apis/repos/git/repositories/4bc14d40-c903-45e2-872e-0462c7748079/commits/eef717f69257a6333f221566c1c987dc94cc0d72)\r\n"
|
||||
},
|
||||
"resource": {
|
||||
"repository": {
|
||||
"id": "4bc14d40-c903-45e2-872e-0462c7748079",
|
||||
"name": "Fabrikam",
|
||||
"url": "https://dev.azure.com/fabrikam/DefaultCollection/_apis/repos/git/repositories/4bc14d40-c903-45e2-872e-0462c7748079",
|
||||
"project": {
|
||||
"id": "6ce954b1-ce1f-45d1-b94d-e6bf2464ba2c",
|
||||
"name": "DefaultCollection",
|
||||
"url": "https://dev.azure.com/fabrikam/DefaultCollection/_apis/projects/6ce954b1-ce1f-45d1-b94d-e6bf2464ba2c",
|
||||
"state": "wellFormed"
|
||||
},
|
||||
"defaultBranch": "refs/heads/master",
|
||||
"remoteUrl": "https://dev.azure.com/fabrikam/DefaultCollection/_git/Fabrikam"
|
||||
},
|
||||
"pullRequestId": 1,
|
||||
"status": "active",
|
||||
"createdBy": {
|
||||
"id": "54d125f7-69f7-4191-904f-c5b96b6261c8",
|
||||
"displayName": "Jamal Hartnett",
|
||||
"uniqueName": "fabrikamfiber4@hotmail.com",
|
||||
"url": "https://vssps.dev.azure.com/fabrikam/_apis/Identities/54d125f7-69f7-4191-904f-c5b96b6261c8",
|
||||
"imageUrl": "https://dev.azure.com/fabrikam/DefaultCollection/_api/_common/identityImage?id=54d125f7-69f7-4191-904f-c5b96b6261c8"
|
||||
},
|
||||
"creationDate": "2014-06-17T16:55:46.589889Z",
|
||||
"title": "my first pull request",
|
||||
"description": " - test2\r\n",
|
||||
"sourceRefName": "refs/heads/mytopic",
|
||||
"targetRefName": "refs/heads/master",
|
||||
"mergeStatus": "succeeded",
|
||||
"mergeId": "a10bb228-6ba6-4362-abd7-49ea21333dbd",
|
||||
"lastMergeSourceCommit": {
|
||||
"commitId": "53d54ac915144006c2c9e90d2c7d3880920db49c",
|
||||
"url": "https://dev.azure.com/fabrikam/DefaultCollection/_apis/repos/git/repositories/4bc14d40-c903-45e2-872e-0462c7748079/commits/53d54ac915144006c2c9e90d2c7d3880920db49c"
|
||||
},
|
||||
"lastMergeTargetCommit": {
|
||||
"commitId": "a511f535b1ea495ee0c903badb68fbc83772c882",
|
||||
"url": "https://dev.azure.com/fabrikam/DefaultCollection/_apis/repos/git/repositories/4bc14d40-c903-45e2-872e-0462c7748079/commits/a511f535b1ea495ee0c903badb68fbc83772c882"
|
||||
},
|
||||
"lastMergeCommit": {
|
||||
"commitId": "eef717f69257a6333f221566c1c987dc94cc0d72",
|
||||
"url": "https://dev.azure.com/fabrikam/DefaultCollection/_apis/repos/git/repositories/4bc14d40-c903-45e2-872e-0462c7748079/commits/eef717f69257a6333f221566c1c987dc94cc0d72"
|
||||
},
|
||||
"reviewers": [
|
||||
{
|
||||
"reviewerUrl": null,
|
||||
"vote": 0,
|
||||
"id": "2ea2d095-48f9-4cd6-9966-62f6f574096c",
|
||||
"displayName": "[Mobile]\\Mobile Team",
|
||||
"uniqueName": "vstfs:///Classification/TeamProject/f0811a3b-8c8a-4e43-a3bf-9a049b4835bd\\Mobile Team",
|
||||
"url": "https://vssps.dev.azure.com/fabrikam/_apis/Identities/2ea2d095-48f9-4cd6-9966-62f6f574096c",
|
||||
"imageUrl": "https://dev.azure.com/fabrikam/DefaultCollection/_api/_common/identityImage?id=2ea2d095-48f9-4cd6-9966-62f6f574096c",
|
||||
"isContainer": true
|
||||
}
|
||||
],
|
||||
"url": "https://dev.azure.com/fabrikam/DefaultCollection/_apis/repos/git/repositories/4bc14d40-c903-45e2-872e-0462c7748079/pullRequests/1"
|
||||
},
|
||||
"resourceVersion": "1.0",
|
||||
"resourceContainers": {
|
||||
"collection": {
|
||||
"id": "c12d0eb8-e382-443b-9f9c-c52cba5014c2"
|
||||
},
|
||||
"account": {
|
||||
"id": "f844ec47-a9db-4511-8281-8b63f4eaf94e"
|
||||
},
|
||||
"project": {
|
||||
"id": "be9b3917-87e6-42a4-a549-2bc06a7a878f"
|
||||
}
|
||||
},
|
||||
"createdDate": "2016-09-19T13:03:27.2879096Z"
|
||||
}
|
||||
76
applicationset/webhook/testdata/azuredevops-push.json
vendored
Normal file
76
applicationset/webhook/testdata/azuredevops-push.json
vendored
Normal file
@@ -0,0 +1,76 @@
|
||||
{
|
||||
"id": "03c164c2-8912-4d5e-8009-3707d5f83734",
|
||||
"eventType": "git.push",
|
||||
"publisherId": "tfs",
|
||||
"scope": "all",
|
||||
"message": {
|
||||
"text": "Jamal Hartnett pushed updates to branch master of repository Fabrikam-Fiber-Git.",
|
||||
"html": "Jamal Hartnett pushed updates to branch master of repository Fabrikam-Fiber-Git.",
|
||||
"markdown": "Jamal Hartnett pushed updates to branch `master` of repository `Fabrikam-Fiber-Git`."
|
||||
},
|
||||
"detailedMessage": {
|
||||
"text": "Jamal Hartnett pushed 1 commit to branch master of repository Fabrikam-Fiber-Git.\n - Fixed bug in web.config file 33b55f7c",
|
||||
"html": "Jamal Hartnett pushed 1 commit to branch <a href=\"https://dev.azure.com/fabrikam-fiber-inc/DefaultCollection/_git/Fabrikam-Fiber-Git/#version=GBmaster\">master</a> of repository <a href=\"https://dev.azure.com/fabrikam-fiber-inc/DefaultCollection/_git/Fabrikam-Fiber-Git/\">Fabrikam-Fiber-Git</a>.\n<ul>\n<li>Fixed bug in web.config file <a href=\"https://dev.azure.com/fabrikam-fiber-inc/DefaultCollection/_git/Fabrikam-Fiber-Git/commit/33b55f7cb7e7e245323987634f960cf4a6e6bc74\">33b55f7c</a>\n</ul>",
|
||||
"markdown": "Jamal Hartnett pushed 1 commit to branch [master](https://dev.azure.com/fabrikam-fiber-inc/DefaultCollection/_git/Fabrikam-Fiber-Git/#version=GBmaster) of repository [Fabrikam-Fiber-Git](https://dev.azure.com/fabrikam-fiber-inc/DefaultCollection/_git/Fabrikam-Fiber-Git/).\n* Fixed bug in web.config file [33b55f7c](https://dev.azure.com/fabrikam-fiber-inc/DefaultCollection/_git/Fabrikam-Fiber-Git/commit/33b55f7cb7e7e245323987634f960cf4a6e6bc74)"
|
||||
},
|
||||
"resource": {
|
||||
"commits": [
|
||||
{
|
||||
"commitId": "33b55f7cb7e7e245323987634f960cf4a6e6bc74",
|
||||
"author": {
|
||||
"name": "Jamal Hartnett",
|
||||
"email": "fabrikamfiber4@hotmail.com",
|
||||
"date": "2015-02-25T19:01:00Z"
|
||||
},
|
||||
"committer": {
|
||||
"name": "Jamal Hartnett",
|
||||
"email": "fabrikamfiber4@hotmail.com",
|
||||
"date": "2015-02-25T19:01:00Z"
|
||||
},
|
||||
"comment": "Fixed bug in web.config file",
|
||||
"url": "https://dev.azure.com/fabrikam-fiber-inc/DefaultCollection/_git/Fabrikam-Fiber-Git/commit/33b55f7cb7e7e245323987634f960cf4a6e6bc74"
|
||||
}
|
||||
],
|
||||
"refUpdates": [
|
||||
{
|
||||
"name": "refs/heads/master",
|
||||
"oldObjectId": "aad331d8d3b131fa9ae03cf5e53965b51942618a",
|
||||
"newObjectId": "33b55f7cb7e7e245323987634f960cf4a6e6bc74"
|
||||
}
|
||||
],
|
||||
"repository": {
|
||||
"id": "278d5cd2-584d-4b63-824a-2ba458937249",
|
||||
"name": "Fabrikam-Fiber-Git",
|
||||
"url": "https://dev.azure.com/fabrikam-fiber-inc/DefaultCollection/_apis/repos/git/repositories/278d5cd2-584d-4b63-824a-2ba458937249",
|
||||
"project": {
|
||||
"id": "6ce954b1-ce1f-45d1-b94d-e6bf2464ba2c",
|
||||
"name": "DefaultCollection",
|
||||
"url": "https://dev.azure.com/fabrikam-fiber-inc/DefaultCollection/_apis/projects/6ce954b1-ce1f-45d1-b94d-e6bf2464ba2c",
|
||||
"state": "wellFormed"
|
||||
},
|
||||
"defaultBranch": "refs/heads/master",
|
||||
"remoteUrl": "https://dev.azure.com/fabrikam-fiber-inc/DefaultCollection/_git/Fabrikam-Fiber-Git"
|
||||
},
|
||||
"pushedBy": {
|
||||
"id": "00067FFED5C7AF52@Live.com",
|
||||
"displayName": "Jamal Hartnett",
|
||||
"uniqueName": "Windows Live ID\\fabrikamfiber4@hotmail.com"
|
||||
},
|
||||
"pushId": 14,
|
||||
"date": "2014-05-02T19:17:13.3309587Z",
|
||||
"url": "https://dev.azure.com/fabrikam-fiber-inc/DefaultCollection/_apis/repos/git/repositories/278d5cd2-584d-4b63-824a-2ba458937249/pushes/14"
|
||||
},
|
||||
"resourceVersion": "1.0",
|
||||
"resourceContainers": {
|
||||
"collection": {
|
||||
"id": "c12d0eb8-e382-443b-9f9c-c52cba5014c2"
|
||||
},
|
||||
"account": {
|
||||
"id": "f844ec47-a9db-4511-8281-8b63f4eaf94e"
|
||||
},
|
||||
"project": {
|
||||
"id": "be9b3917-87e6-42a4-a549-2bc06a7a878f"
|
||||
}
|
||||
},
|
||||
"createdDate": "2016-09-19T13:03:27.0379153Z"
|
||||
}
|
||||
@@ -2,6 +2,7 @@ package webhook
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"html"
|
||||
"net/http"
|
||||
@@ -19,17 +20,24 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
argosettings "github.com/argoproj/argo-cd/v2/util/settings"
|
||||
|
||||
"github.com/go-playground/webhooks/v6/azuredevops"
|
||||
"github.com/go-playground/webhooks/v6/github"
|
||||
"github.com/go-playground/webhooks/v6/gitlab"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"gopkg.in/go-playground/webhooks.v5/github"
|
||||
"gopkg.in/go-playground/webhooks.v5/gitlab"
|
||||
)
|
||||
|
||||
var (
|
||||
errBasicAuthVerificationFailed = errors.New("basic auth verification failed")
|
||||
)
|
||||
|
||||
type WebhookHandler struct {
|
||||
namespace string
|
||||
github *github.Webhook
|
||||
gitlab *gitlab.Webhook
|
||||
client client.Client
|
||||
generators map[string]generators.Generator
|
||||
namespace string
|
||||
github *github.Webhook
|
||||
gitlab *gitlab.Webhook
|
||||
azuredevops *azuredevops.Webhook
|
||||
azuredevopsAuthHandler func(r *http.Request) error
|
||||
client client.Client
|
||||
generators map[string]generators.Generator
|
||||
}
|
||||
|
||||
type gitGeneratorInfo struct {
|
||||
@@ -39,8 +47,14 @@ type gitGeneratorInfo struct {
|
||||
}
|
||||
|
||||
type prGeneratorInfo struct {
|
||||
Github *prGeneratorGithubInfo
|
||||
Gitlab *prGeneratorGitlabInfo
|
||||
Azuredevops *prGeneratorAzuredevopsInfo
|
||||
Github *prGeneratorGithubInfo
|
||||
Gitlab *prGeneratorGitlabInfo
|
||||
}
|
||||
|
||||
type prGeneratorAzuredevopsInfo struct {
|
||||
Repo string
|
||||
Project string
|
||||
}
|
||||
|
||||
type prGeneratorGithubInfo struct {
|
||||
@@ -68,13 +82,28 @@ func NewWebhookHandler(namespace string, argocdSettingsMgr *argosettings.Setting
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Unable to init GitLab webhook: %v", err)
|
||||
}
|
||||
azuredevopsHandler, err := azuredevops.New()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Unable to init Azure DevOps webhook: %v", err)
|
||||
}
|
||||
azuredevopsAuthHandler := func(r *http.Request) error {
|
||||
if argocdSettings.WebhookAzureDevOpsUsername != "" && argocdSettings.WebhookAzureDevOpsPassword != "" {
|
||||
username, password, ok := r.BasicAuth()
|
||||
if !ok || username != argocdSettings.WebhookAzureDevOpsUsername || password != argocdSettings.WebhookAzureDevOpsPassword {
|
||||
return errBasicAuthVerificationFailed
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
return &WebhookHandler{
|
||||
namespace: namespace,
|
||||
github: githubHandler,
|
||||
gitlab: gitlabHandler,
|
||||
client: client,
|
||||
generators: generators,
|
||||
namespace: namespace,
|
||||
github: githubHandler,
|
||||
gitlab: gitlabHandler,
|
||||
azuredevops: azuredevopsHandler,
|
||||
azuredevopsAuthHandler: azuredevopsAuthHandler,
|
||||
client: client,
|
||||
generators: generators,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -125,6 +154,14 @@ func (h *WebhookHandler) Handler(w http.ResponseWriter, r *http.Request) {
|
||||
payload, err = h.github.Parse(r, github.PushEvent, github.PullRequestEvent, github.PingEvent)
|
||||
case r.Header.Get("X-Gitlab-Event") != "":
|
||||
payload, err = h.gitlab.Parse(r, gitlab.PushEvents, gitlab.TagEvents, gitlab.MergeRequestEvents)
|
||||
case r.Header.Get("X-Vss-Activityid") != "":
|
||||
if err = h.azuredevopsAuthHandler(r); err != nil {
|
||||
if errors.Is(err, errBasicAuthVerificationFailed) {
|
||||
log.WithField(common.SecurityField, common.SecurityHigh).Infof("Azure DevOps webhook basic auth verification failed")
|
||||
}
|
||||
} else {
|
||||
payload, err = h.azuredevops.Parse(r, azuredevops.GitPushEventType, azuredevops.GitPullRequestCreatedEventType, azuredevops.GitPullRequestUpdatedEventType, azuredevops.GitPullRequestMergedEventType)
|
||||
}
|
||||
default:
|
||||
log.Debug("Ignoring unknown webhook event")
|
||||
http.Error(w, "Unknown webhook event", http.StatusBadRequest)
|
||||
@@ -164,6 +201,12 @@ func getGitGeneratorInfo(payload interface{}) *gitGeneratorInfo {
|
||||
webURL = payload.Project.WebURL
|
||||
revision = parseRevision(payload.Ref)
|
||||
touchedHead = payload.Project.DefaultBranch == revision
|
||||
case azuredevops.GitPushEvent:
|
||||
// See: https://learn.microsoft.com/en-us/azure/devops/service-hooks/events?view=azure-devops#git.push
|
||||
webURL = payload.Resource.Repository.RemoteURL
|
||||
revision = parseRevision(payload.Resource.RefUpdates[0].Name)
|
||||
touchedHead = payload.Resource.RefUpdates[0].Name == payload.Resource.Repository.DefaultBranch
|
||||
// unfortunately, Azure DevOps doesn't provide a list of changed files
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
@@ -229,6 +272,18 @@ func getPRGeneratorInfo(payload interface{}) *prGeneratorInfo {
|
||||
Project: strconv.FormatInt(payload.ObjectAttributes.TargetProjectID, 10),
|
||||
APIHostname: urlObj.Hostname(),
|
||||
}
|
||||
case azuredevops.GitPullRequestEvent:
|
||||
if !isAllowedAzureDevOpsPullRequestAction(string(payload.EventType)) {
|
||||
return nil
|
||||
}
|
||||
|
||||
repo := payload.Resource.Repository.Name
|
||||
project := payload.Resource.Repository.Project.Name
|
||||
|
||||
info.Azuredevops = &prGeneratorAzuredevopsInfo{
|
||||
Repo: repo,
|
||||
Project: project,
|
||||
}
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
@@ -256,6 +311,13 @@ var gitlabAllowedPullRequestActions = []string{
|
||||
"merge",
|
||||
}
|
||||
|
||||
// azuredevopsAllowedPullRequestActions is a list of Azure DevOps actions that allow refresh
|
||||
var azuredevopsAllowedPullRequestActions = []string{
|
||||
"git.pullrequest.created",
|
||||
"git.pullrequest.merged",
|
||||
"git.pullrequest.updated",
|
||||
}
|
||||
|
||||
func isAllowedGithubPullRequestAction(action string) bool {
|
||||
for _, allow := range githubAllowedPullRequestActions {
|
||||
if allow == action {
|
||||
@@ -274,6 +336,15 @@ func isAllowedGitlabPullRequestAction(action string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func isAllowedAzureDevOpsPullRequestAction(action string) bool {
|
||||
for _, allow := range azuredevopsAllowedPullRequestActions {
|
||||
if allow == action {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func shouldRefreshGitGenerator(gen *v1alpha1.GitGenerator, info *gitGeneratorInfo) bool {
|
||||
if gen == nil || info == nil {
|
||||
return false
|
||||
@@ -359,6 +430,16 @@ func shouldRefreshPRGenerator(gen *v1alpha1.PullRequestGenerator, info *prGenera
|
||||
return true
|
||||
}
|
||||
|
||||
if gen.AzureDevOps != nil && info.Azuredevops != nil {
|
||||
if gen.AzureDevOps.Project != info.Azuredevops.Project {
|
||||
return false
|
||||
}
|
||||
if gen.AzureDevOps.Repo != info.Azuredevops.Repo {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -562,7 +643,7 @@ func refreshApplicationSet(c client.Client, appSet *v1alpha1.ApplicationSet) err
|
||||
return retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
err := c.Get(context.Background(), types.NamespacedName{Name: appSet.Name, Namespace: appSet.Namespace}, appSet)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("error getting ApplicationSet: %w", err)
|
||||
}
|
||||
if appSet.Annotations == nil {
|
||||
appSet.Annotations = map[string]string{}
|
||||
|
||||
@@ -20,12 +20,13 @@ import (
|
||||
kubefake "k8s.io/client-go/kubernetes/fake"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/generators"
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/services/scm_provider"
|
||||
"github.com/argoproj/argo-cd/v2/common"
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
argosettings "github.com/argoproj/argo-cd/v2/util/settings"
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
)
|
||||
|
||||
type generatorMock struct {
|
||||
@@ -145,6 +146,24 @@ func TestWebhookHandler(t *testing.T) {
|
||||
expectedStatusCode: http.StatusOK,
|
||||
expectedRefresh: false,
|
||||
},
|
||||
{
|
||||
desc: "WebHook from a Azure DevOps repository via Commit",
|
||||
headerKey: "X-Vss-Activityid",
|
||||
headerValue: "Push Hook",
|
||||
payloadFile: "azuredevops-push.json",
|
||||
effectedAppSets: []string{"git-azure-devops", "plugin", "matrix-pull-request-github-plugin"},
|
||||
expectedStatusCode: http.StatusOK,
|
||||
expectedRefresh: true,
|
||||
},
|
||||
{
|
||||
desc: "WebHook from a Azure DevOps repository via pull request event",
|
||||
headerKey: "X-Vss-Activityid",
|
||||
headerValue: "Pull Request Hook",
|
||||
payloadFile: "azuredevops-pull-request.json",
|
||||
effectedAppSets: []string{"pull-request-azure-devops", "plugin", "matrix-pull-request-github-plugin"},
|
||||
expectedStatusCode: http.StatusOK,
|
||||
expectedRefresh: true,
|
||||
},
|
||||
}
|
||||
|
||||
namespace := "test"
|
||||
@@ -160,8 +179,10 @@ func TestWebhookHandler(t *testing.T) {
|
||||
fc := fake.NewClientBuilder().WithScheme(scheme).WithObjects(
|
||||
fakeAppWithGitGenerator("git-github", namespace, "https://github.com/org/repo"),
|
||||
fakeAppWithGitGenerator("git-gitlab", namespace, "https://gitlab/group/name"),
|
||||
fakeAppWithGitGenerator("git-azure-devops", namespace, "https://dev.azure.com/fabrikam-fiber-inc/DefaultCollection/_git/Fabrikam-Fiber-Git"),
|
||||
fakeAppWithGithubPullRequestGenerator("pull-request-github", namespace, "Codertocat", "Hello-World"),
|
||||
fakeAppWithGitlabPullRequestGenerator("pull-request-gitlab", namespace, "100500"),
|
||||
fakeAppWithAzureDevOpsPullRequestGenerator("pull-request-azure-devops", namespace, "DefaultCollection", "Fabrikam"),
|
||||
fakeAppWithPluginGenerator("plugin", namespace),
|
||||
fakeAppWithMatrixAndGitGenerator("matrix-git-github", namespace, "https://github.com/org/repo"),
|
||||
fakeAppWithMatrixAndPullRequestGenerator("matrix-pull-request-github", namespace, "Codertocat", "Hello-World"),
|
||||
@@ -337,6 +358,27 @@ func fakeAppWithGithubPullRequestGenerator(name, namespace, owner, repo string)
|
||||
}
|
||||
}
|
||||
|
||||
func fakeAppWithAzureDevOpsPullRequestGenerator(name, namespace, project, repo string) *v1alpha1.ApplicationSet {
|
||||
return &v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Generators: []v1alpha1.ApplicationSetGenerator{
|
||||
{
|
||||
PullRequest: &v1alpha1.PullRequestGenerator{
|
||||
AzureDevOps: &v1alpha1.PullRequestGeneratorAzureDevOps{
|
||||
Project: project,
|
||||
Repo: repo,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func fakeAppWithMatrixAndGitGenerator(name, namespace, repo string) *v1alpha1.ApplicationSet {
|
||||
return &v1alpha1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
||||
@@ -3,5 +3,6 @@ package assets
|
||||
import "embed"
|
||||
|
||||
// Embedded contains embedded assets
|
||||
//
|
||||
//go:embed *
|
||||
var Embedded embed.FS
|
||||
|
||||
@@ -401,6 +401,11 @@
|
||||
"type": "boolean",
|
||||
"name": "validate",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"name": "project",
|
||||
"in": "query"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
@@ -462,6 +467,11 @@
|
||||
"type": "string",
|
||||
"name": "appNamespace",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"name": "project",
|
||||
"in": "query"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
@@ -523,6 +533,11 @@
|
||||
"type": "string",
|
||||
"name": "appNamespace",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"name": "project",
|
||||
"in": "query"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
@@ -649,6 +664,11 @@
|
||||
"type": "string",
|
||||
"name": "appNamespace",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"name": "project",
|
||||
"in": "query"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
@@ -737,6 +757,11 @@
|
||||
"type": "string",
|
||||
"name": "appNamespace",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"name": "project",
|
||||
"in": "query"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
@@ -773,6 +798,11 @@
|
||||
"type": "string",
|
||||
"name": "namespace",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"name": "project",
|
||||
"in": "query"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
@@ -885,6 +915,11 @@
|
||||
"type": "string",
|
||||
"name": "appNamespace",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"name": "project",
|
||||
"in": "query"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
@@ -935,6 +970,11 @@
|
||||
"type": "string",
|
||||
"name": "appNamespace",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"name": "project",
|
||||
"in": "query"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
@@ -971,6 +1011,11 @@
|
||||
"type": "string",
|
||||
"name": "appNamespace",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"name": "project",
|
||||
"in": "query"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
@@ -1084,6 +1129,11 @@
|
||||
"type": "string",
|
||||
"name": "appNamespace",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"name": "project",
|
||||
"in": "query"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
@@ -1154,6 +1204,11 @@
|
||||
"type": "string",
|
||||
"name": "appNamespace",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"name": "project",
|
||||
"in": "query"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
@@ -1226,6 +1281,11 @@
|
||||
"type": "string",
|
||||
"name": "appNamespace",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"name": "project",
|
||||
"in": "query"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
@@ -1295,6 +1355,11 @@
|
||||
"type": "string",
|
||||
"name": "appNamespace",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"name": "project",
|
||||
"in": "query"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
@@ -1356,6 +1421,11 @@
|
||||
"type": "string",
|
||||
"name": "appNamespace",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"name": "project",
|
||||
"in": "query"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
@@ -1423,6 +1493,11 @@
|
||||
"type": "string",
|
||||
"name": "appNamespace",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"name": "project",
|
||||
"in": "query"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
@@ -1484,6 +1559,11 @@
|
||||
"type": "string",
|
||||
"name": "appNamespace",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"name": "project",
|
||||
"in": "query"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
@@ -1529,6 +1609,11 @@
|
||||
"description": "the application's namespace.",
|
||||
"name": "appNamespace",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"name": "project",
|
||||
"in": "query"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
@@ -1574,6 +1659,11 @@
|
||||
"description": "the application's namespace.",
|
||||
"name": "appNamespace",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"name": "project",
|
||||
"in": "query"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
@@ -1662,6 +1752,11 @@
|
||||
"type": "string",
|
||||
"name": "appNamespace",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"name": "project",
|
||||
"in": "query"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
@@ -1737,6 +1832,11 @@
|
||||
"type": "string",
|
||||
"name": "appNamespace",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"name": "project",
|
||||
"in": "query"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
@@ -3833,6 +3933,11 @@
|
||||
"type": "string",
|
||||
"name": "appNamespace",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"name": "project",
|
||||
"in": "query"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
@@ -3931,7 +4036,7 @@
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"expiresIn": {
|
||||
"type": "string",
|
||||
"type": "integer",
|
||||
"format": "int64",
|
||||
"title": "expiresIn represents a duration in seconds"
|
||||
},
|
||||
@@ -3958,14 +4063,14 @@
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"expiresAt": {
|
||||
"type": "string",
|
||||
"type": "integer",
|
||||
"format": "int64"
|
||||
},
|
||||
"id": {
|
||||
"type": "string"
|
||||
},
|
||||
"issuedAt": {
|
||||
"type": "string",
|
||||
"type": "integer",
|
||||
"format": "int64"
|
||||
}
|
||||
}
|
||||
@@ -3998,6 +4103,9 @@
|
||||
},
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"project": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -4027,6 +4135,9 @@
|
||||
},
|
||||
"patchType": {
|
||||
"type": "string"
|
||||
},
|
||||
"project": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -4051,12 +4162,15 @@
|
||||
"type": "boolean"
|
||||
},
|
||||
"id": {
|
||||
"type": "string",
|
||||
"type": "integer",
|
||||
"format": "int64"
|
||||
},
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"project": {
|
||||
"type": "string"
|
||||
},
|
||||
"prune": {
|
||||
"type": "boolean"
|
||||
}
|
||||
@@ -4087,6 +4201,9 @@
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"project": {
|
||||
"type": "string"
|
||||
},
|
||||
"prune": {
|
||||
"type": "boolean"
|
||||
},
|
||||
@@ -4509,7 +4626,7 @@
|
||||
"type": "string"
|
||||
},
|
||||
"type": {
|
||||
"type": "string",
|
||||
"type": "integer",
|
||||
"format": "int64"
|
||||
}
|
||||
}
|
||||
@@ -4648,7 +4765,7 @@
|
||||
"type": "string"
|
||||
},
|
||||
"expiresIn": {
|
||||
"type": "string",
|
||||
"type": "integer",
|
||||
"format": "int64",
|
||||
"title": "expiresIn represents a duration in seconds"
|
||||
},
|
||||
@@ -5239,7 +5356,7 @@
|
||||
"type": "string"
|
||||
},
|
||||
"remainingItemCount": {
|
||||
"type": "string",
|
||||
"type": "integer",
|
||||
"format": "int64",
|
||||
"title": "remainingItemCount is the number of subsequent items in the list which are not included in this\nlist response. If the list request contained label or field selectors, then the number of\nremaining items is unknown and the field will be left unset and omitted during serialization.\nIf the list is complete (either because it is not chunking or because this is the last chunk),\nthen there are no more remaining items and this field will be left unset and omitted during\nserialization.\nServers older than v1.15 do not set this field.\nThe intended use of the remainingItemCount is *estimating* the size of a collection. Clients\nshould not rely on the remainingItemCount to be set or to be exact.\n+optional"
|
||||
},
|
||||
@@ -5317,7 +5434,7 @@
|
||||
},
|
||||
"seconds": {
|
||||
"description": "Represents seconds of UTC time since Unix epoch\n1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to\n9999-12-31T23:59:59Z inclusive.",
|
||||
"type": "string",
|
||||
"type": "integer",
|
||||
"format": "int64"
|
||||
}
|
||||
}
|
||||
@@ -5387,7 +5504,7 @@
|
||||
"$ref": "#/definitions/v1Time"
|
||||
},
|
||||
"deletionGracePeriodSeconds": {
|
||||
"type": "string",
|
||||
"type": "integer",
|
||||
"format": "int64",
|
||||
"title": "Number of seconds allowed for this object to gracefully terminate before\nit will be removed from the system. Only set when deletionTimestamp is also set.\nMay only be shortened.\nRead-only.\n+optional"
|
||||
},
|
||||
@@ -5406,7 +5523,7 @@
|
||||
"type": "string"
|
||||
},
|
||||
"generation": {
|
||||
"type": "string",
|
||||
"type": "integer",
|
||||
"format": "int64",
|
||||
"title": "A sequence number representing a specific generation of the desired state.\nPopulated by the system. Read-only.\n+optional"
|
||||
},
|
||||
@@ -5537,19 +5654,8 @@
|
||||
},
|
||||
"v1Time": {
|
||||
"description": "Time is a wrapper around time.Time which supports correct\nmarshaling to YAML and JSON. Wrappers are provided for many\nof the factory methods that the time package offers.\n\n+protobuf.options.marshal=false\n+protobuf.as=Timestamp\n+protobuf.options.(gogoproto.goproto_stringer)=false",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"nanos": {
|
||||
"description": "Non-negative fractions of a second at nanosecond resolution. Negative\nsecond values with fractions must still have non-negative nanos values\nthat count forward in time. Must be from 0 to 999,999,999\ninclusive. This field may be limited in precision depending on context.",
|
||||
"type": "integer",
|
||||
"format": "int32"
|
||||
},
|
||||
"seconds": {
|
||||
"description": "Represents seconds of UTC time since Unix epoch\n1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to\n9999-12-31T23:59:59Z inclusive.",
|
||||
"type": "string",
|
||||
"format": "int64"
|
||||
}
|
||||
}
|
||||
"type": "string",
|
||||
"format": "date-time"
|
||||
},
|
||||
"v1alpha1AWSAuthConfig": {
|
||||
"type": "object",
|
||||
@@ -5735,16 +5841,16 @@
|
||||
"title": "ApplicationDestination holds information about the application's destination",
|
||||
"properties": {
|
||||
"name": {
|
||||
"type": "string",
|
||||
"title": "Name is an alternate way of specifying the target cluster by its symbolic name"
|
||||
"description": "Name is an alternate way of specifying the target cluster by its symbolic name. This must be set if Server is not set.",
|
||||
"type": "string"
|
||||
},
|
||||
"namespace": {
|
||||
"type": "string",
|
||||
"title": "Namespace specifies the target namespace for the application's resources.\nThe namespace will only be set for namespace-scoped resources that have not set a value for .metadata.namespace"
|
||||
},
|
||||
"server": {
|
||||
"type": "string",
|
||||
"title": "Server specifies the URL of the target cluster and must be set to the Kubernetes control plane API"
|
||||
"description": "Server specifies the URL of the target cluster's Kubernetes control plane API. This must be set if Name is not set.",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -5788,6 +5894,12 @@
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"labels": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -5943,6 +6055,30 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1ApplicationSetResourceIgnoreDifferences": {
|
||||
"description": "ApplicationSetResourceIgnoreDifferences configures how the ApplicationSet controller will ignore differences in live\napplications when applying changes from generated applications.",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"jqPathExpressions": {
|
||||
"description": "JQPathExpressions is a list of JQ path expressions to fields to ignore differences for.",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"jsonPointers": {
|
||||
"description": "JSONPointers is a list of JSON pointers to fields to ignore differences for.",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"name": {
|
||||
"description": "Name is the name of the application to ignore differences for. If not specified, the rule applies to all applications.",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1ApplicationSetRolloutStep": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -5991,6 +6127,12 @@
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"ignoreApplicationDifferences": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/v1alpha1ApplicationSetResourceIgnoreDifferences"
|
||||
}
|
||||
},
|
||||
"preservedFields": {
|
||||
"$ref": "#/definitions/v1alpha1ApplicationPreservedFields"
|
||||
},
|
||||
@@ -6281,6 +6423,13 @@
|
||||
"type": "string",
|
||||
"title": "Namespace sets the namespace that Kustomize adds to all resources"
|
||||
},
|
||||
"patches": {
|
||||
"type": "array",
|
||||
"title": "Patches is a list of Kustomize patches",
|
||||
"items": {
|
||||
"$ref": "#/definitions/v1alpha1KustomizePatch"
|
||||
}
|
||||
},
|
||||
"replicas": {
|
||||
"type": "array",
|
||||
"title": "Replicas is a list of Kustomize Replicas override specifications",
|
||||
@@ -6369,7 +6518,7 @@
|
||||
},
|
||||
"revisionHistoryLimit": {
|
||||
"description": "RevisionHistoryLimit limits the number of items kept in the application's revision history, which is used for informational purposes as well as for rollbacks to previous versions.\nThis should only be changed in exceptional circumstances.\nSetting to zero will store no history. This will reduce storage used.\nIncreasing will increase the space used to store the history, so we do not recommend increasing it.\nDefault is 10.",
|
||||
"type": "string",
|
||||
"type": "integer",
|
||||
"format": "int64"
|
||||
},
|
||||
"source": {
|
||||
@@ -6519,7 +6668,7 @@
|
||||
"title": "Duration is the amount to back off. Default unit is seconds, but could also be a duration (e.g. \"2m\", \"1h\")"
|
||||
},
|
||||
"factor": {
|
||||
"type": "string",
|
||||
"type": "integer",
|
||||
"format": "int64",
|
||||
"title": "Factor is a factor to multiply the base duration after each failed retry"
|
||||
},
|
||||
@@ -6630,7 +6779,7 @@
|
||||
},
|
||||
"shard": {
|
||||
"description": "Shard contains optional shard number. Calculated on the fly by the application controller if not specified.",
|
||||
"type": "string",
|
||||
"type": "integer",
|
||||
"format": "int64"
|
||||
}
|
||||
}
|
||||
@@ -6640,7 +6789,7 @@
|
||||
"title": "ClusterCacheInfo contains information about the cluster cache",
|
||||
"properties": {
|
||||
"apisCount": {
|
||||
"type": "string",
|
||||
"type": "integer",
|
||||
"format": "int64",
|
||||
"title": "APIsCount holds number of observed Kubernetes API count"
|
||||
},
|
||||
@@ -6648,7 +6797,7 @@
|
||||
"$ref": "#/definitions/v1Time"
|
||||
},
|
||||
"resourcesCount": {
|
||||
"type": "string",
|
||||
"type": "integer",
|
||||
"format": "int64",
|
||||
"title": "ResourcesCount holds number of observed Kubernetes resources"
|
||||
}
|
||||
@@ -6711,7 +6860,7 @@
|
||||
}
|
||||
},
|
||||
"applicationsCount": {
|
||||
"type": "string",
|
||||
"type": "integer",
|
||||
"format": "int64",
|
||||
"title": "ApplicationsCount is the number of applications managed by Argo CD on the cluster"
|
||||
},
|
||||
@@ -6767,6 +6916,13 @@
|
||||
"destination": {
|
||||
"$ref": "#/definitions/v1alpha1ApplicationDestination"
|
||||
},
|
||||
"ignoreDifferences": {
|
||||
"type": "array",
|
||||
"title": "IgnoreDifferences is a reference to the application's ignored differences used for comparison",
|
||||
"items": {
|
||||
"$ref": "#/definitions/v1alpha1ResourceIgnoreDifferences"
|
||||
}
|
||||
},
|
||||
"source": {
|
||||
"$ref": "#/definitions/v1alpha1ApplicationSource"
|
||||
},
|
||||
@@ -6829,7 +6985,7 @@
|
||||
"type": "string"
|
||||
},
|
||||
"requeueAfterSeconds": {
|
||||
"type": "string",
|
||||
"type": "integer",
|
||||
"format": "int64"
|
||||
},
|
||||
"template": {
|
||||
@@ -6917,7 +7073,7 @@
|
||||
"type": "string"
|
||||
},
|
||||
"requeueAfterSeconds": {
|
||||
"type": "string",
|
||||
"type": "integer",
|
||||
"format": "int64"
|
||||
},
|
||||
"revision": {
|
||||
@@ -7049,15 +7205,15 @@
|
||||
"title": "TODO: describe this type",
|
||||
"properties": {
|
||||
"capacity": {
|
||||
"type": "string",
|
||||
"type": "integer",
|
||||
"format": "int64"
|
||||
},
|
||||
"requestedByApp": {
|
||||
"type": "string",
|
||||
"type": "integer",
|
||||
"format": "int64"
|
||||
},
|
||||
"requestedByNeighbors": {
|
||||
"type": "string",
|
||||
"type": "integer",
|
||||
"format": "int64"
|
||||
},
|
||||
"resourceName": {
|
||||
@@ -7095,11 +7251,11 @@
|
||||
"title": "JWTToken holds the issuedAt and expiresAt values of a token",
|
||||
"properties": {
|
||||
"exp": {
|
||||
"type": "string",
|
||||
"type": "integer",
|
||||
"format": "int64"
|
||||
},
|
||||
"iat": {
|
||||
"type": "string",
|
||||
"type": "integer",
|
||||
"format": "int64"
|
||||
},
|
||||
"id": {
|
||||
@@ -7146,6 +7302,20 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1KustomizeGvk": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"group": {
|
||||
"type": "string"
|
||||
},
|
||||
"kind": {
|
||||
"type": "string"
|
||||
},
|
||||
"version": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1KustomizeOptions": {
|
||||
"type": "object",
|
||||
"title": "KustomizeOptions are options for kustomize to use when building manifests",
|
||||
@@ -7160,6 +7330,26 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1KustomizePatch": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"options": {
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"type": "boolean"
|
||||
}
|
||||
},
|
||||
"patch": {
|
||||
"type": "string"
|
||||
},
|
||||
"path": {
|
||||
"type": "string"
|
||||
},
|
||||
"target": {
|
||||
"$ref": "#/definitions/v1alpha1KustomizeSelector"
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1KustomizeReplica": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -7172,6 +7362,34 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1KustomizeResId": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"gvk": {
|
||||
"$ref": "#/definitions/v1alpha1KustomizeGvk"
|
||||
},
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"namespace": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1KustomizeSelector": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"annotationSelector": {
|
||||
"type": "string"
|
||||
},
|
||||
"labelSelector": {
|
||||
"type": "string"
|
||||
},
|
||||
"resId": {
|
||||
"$ref": "#/definitions/v1alpha1KustomizeResId"
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1ListGenerator": {
|
||||
"type": "object",
|
||||
"title": "ListGenerator include items info",
|
||||
@@ -7298,7 +7516,7 @@
|
||||
"title": "Phase is the current phase of the operation"
|
||||
},
|
||||
"retryCount": {
|
||||
"type": "string",
|
||||
"type": "integer",
|
||||
"format": "int64",
|
||||
"title": "RetryCount contains time of operation retries"
|
||||
},
|
||||
@@ -7390,7 +7608,7 @@
|
||||
},
|
||||
"requeueAfterSeconds": {
|
||||
"description": "RequeueAfterSeconds determines how long the ApplicationSet controller will wait before reconciling the ApplicationSet again.",
|
||||
"type": "string",
|
||||
"type": "integer",
|
||||
"format": "int64"
|
||||
},
|
||||
"template": {
|
||||
@@ -7483,7 +7701,7 @@
|
||||
},
|
||||
"requeueAfterSeconds": {
|
||||
"description": "Standard parameters.",
|
||||
"type": "string",
|
||||
"type": "integer",
|
||||
"format": "int64"
|
||||
},
|
||||
"template": {
|
||||
@@ -7588,6 +7806,10 @@
|
||||
"description": "The GitLab API URL to talk to. If blank, uses https://gitlab.com/.",
|
||||
"type": "string"
|
||||
},
|
||||
"insecure": {
|
||||
"type": "boolean",
|
||||
"title": "Skips validating the SCM provider's TLS certificate - useful for self-signed certificates.; default: false"
|
||||
},
|
||||
"labels": {
|
||||
"type": "array",
|
||||
"title": "Labels is used to filter the MRs that you want to target",
|
||||
@@ -7686,12 +7908,12 @@
|
||||
"title": "GithubAppEnterpriseBaseURL specifies the GitHub API URL for GitHub app authentication. If empty will default to https://api.github.com"
|
||||
},
|
||||
"githubAppID": {
|
||||
"type": "string",
|
||||
"type": "integer",
|
||||
"format": "int64",
|
||||
"title": "GithubAppId specifies the Github App ID of the app used to access the repo for GitHub app authentication"
|
||||
},
|
||||
"githubAppInstallationID": {
|
||||
"type": "string",
|
||||
"type": "integer",
|
||||
"format": "int64",
|
||||
"title": "GithubAppInstallationId specifies the ID of the installed GitHub App for GitHub app authentication"
|
||||
},
|
||||
@@ -7776,12 +7998,12 @@
|
||||
"title": "GithubAppEnterpriseBaseURL specifies the base URL of GitHub Enterprise installation. If empty will default to https://api.github.com"
|
||||
},
|
||||
"githubAppID": {
|
||||
"type": "string",
|
||||
"type": "integer",
|
||||
"format": "int64",
|
||||
"title": "GithubAppId specifies the ID of the GitHub app used to access the repo"
|
||||
},
|
||||
"githubAppInstallationID": {
|
||||
"type": "string",
|
||||
"type": "integer",
|
||||
"format": "int64",
|
||||
"title": "GithubAppInstallationId specifies the installation ID of the GitHub App used to access the repo"
|
||||
},
|
||||
@@ -7908,6 +8130,12 @@
|
||||
"disabled": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"displayName": {
|
||||
"type": "string"
|
||||
},
|
||||
"iconClass": {
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
@@ -8089,13 +8317,15 @@
|
||||
"$ref": "#/definitions/v1alpha1ResourceRef"
|
||||
}
|
||||
},
|
||||
"resourceRef": {
|
||||
"$ref": "#/definitions/v1alpha1ResourceRef"
|
||||
},
|
||||
"resourceVersion": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/v1alpha1ResourceRef"
|
||||
}
|
||||
]
|
||||
},
|
||||
"v1alpha1ResourceOverride": {
|
||||
"type": "object",
|
||||
@@ -8223,7 +8453,7 @@
|
||||
"type": "string"
|
||||
},
|
||||
"syncWave": {
|
||||
"type": "string",
|
||||
"type": "integer",
|
||||
"format": "int64"
|
||||
},
|
||||
"version": {
|
||||
@@ -8240,7 +8470,7 @@
|
||||
},
|
||||
"limit": {
|
||||
"description": "Limit is the maximum number of attempts for retrying a failed sync. If set to 0, no retries will be performed.",
|
||||
"type": "string",
|
||||
"type": "integer",
|
||||
"format": "int64"
|
||||
}
|
||||
}
|
||||
@@ -8256,7 +8486,7 @@
|
||||
"$ref": "#/definitions/v1Time"
|
||||
},
|
||||
"id": {
|
||||
"type": "string",
|
||||
"type": "integer",
|
||||
"format": "int64",
|
||||
"title": "ID is an auto incrementing identifier of the RevisionHistory"
|
||||
},
|
||||
@@ -8349,7 +8579,7 @@
|
||||
},
|
||||
"requeueAfterSeconds": {
|
||||
"description": "Standard parameters.",
|
||||
"type": "string",
|
||||
"type": "integer",
|
||||
"format": "int64"
|
||||
},
|
||||
"template": {
|
||||
@@ -8554,12 +8784,24 @@
|
||||
"description": "Gitlab group to scan. Required. You can use either the project id (recommended) or the full namespaced path.",
|
||||
"type": "string"
|
||||
},
|
||||
"includeSharedProjects": {
|
||||
"type": "boolean",
|
||||
"title": "When recursing through subgroups, also include shared Projects (true) or scan only the subgroups under same path (false). Defaults to \"true\""
|
||||
},
|
||||
"includeSubgroups": {
|
||||
"type": "boolean",
|
||||
"title": "Recurse through subgroups (true) or scan only the base group (false). Defaults to \"false\""
|
||||
},
|
||||
"insecure": {
|
||||
"type": "boolean",
|
||||
"title": "Skips validating the SCM provider's TLS certificate - useful for self-signed certificates.; default: false"
|
||||
},
|
||||
"tokenRef": {
|
||||
"$ref": "#/definitions/v1alpha1SecretRef"
|
||||
},
|
||||
"topic": {
|
||||
"description": "Filter repos list based on Gitlab Topic.",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
@@ -30,11 +30,13 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/util/settings"
|
||||
"github.com/argoproj/argo-cd/v2/util/tls"
|
||||
"github.com/argoproj/argo-cd/v2/util/trace"
|
||||
kubeerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
// CLIName is the name of the CLI
|
||||
cliName = "argocd-application-controller"
|
||||
cliName = common.ApplicationController
|
||||
// Default time in seconds for application resync period
|
||||
defaultAppResyncPeriod = 180
|
||||
// Default time in seconds for application hard resync period
|
||||
@@ -43,27 +45,29 @@ const (
|
||||
|
||||
func NewCommand() *cobra.Command {
|
||||
var (
|
||||
clientConfig clientcmd.ClientConfig
|
||||
appResyncPeriod int64
|
||||
appHardResyncPeriod int64
|
||||
repoServerAddress string
|
||||
repoServerTimeoutSeconds int
|
||||
selfHealTimeoutSeconds int
|
||||
statusProcessors int
|
||||
operationProcessors int
|
||||
glogLevel int
|
||||
metricsPort int
|
||||
metricsCacheExpiration time.Duration
|
||||
metricsAplicationLabels []string
|
||||
kubectlParallelismLimit int64
|
||||
cacheSrc func() (*appstatecache.Cache, error)
|
||||
redisClient *redis.Client
|
||||
repoServerPlaintext bool
|
||||
repoServerStrictTLS bool
|
||||
otlpAddress string
|
||||
applicationNamespaces []string
|
||||
persistResourceHealth bool
|
||||
shardingAlgorithm string
|
||||
clientConfig clientcmd.ClientConfig
|
||||
appResyncPeriod int64
|
||||
appHardResyncPeriod int64
|
||||
repoServerAddress string
|
||||
repoServerTimeoutSeconds int
|
||||
selfHealTimeoutSeconds int
|
||||
statusProcessors int
|
||||
operationProcessors int
|
||||
glogLevel int
|
||||
metricsPort int
|
||||
metricsCacheExpiration time.Duration
|
||||
metricsAplicationLabels []string
|
||||
kubectlParallelismLimit int64
|
||||
cacheSource func() (*appstatecache.Cache, error)
|
||||
redisClient *redis.Client
|
||||
repoServerPlaintext bool
|
||||
repoServerStrictTLS bool
|
||||
otlpAddress string
|
||||
otlpAttrs []string
|
||||
applicationNamespaces []string
|
||||
persistResourceHealth bool
|
||||
shardingAlgorithm string
|
||||
enableDynamicClusterDistribution bool
|
||||
)
|
||||
var command = cobra.Command{
|
||||
Use: cliName,
|
||||
@@ -91,7 +95,7 @@ func NewCommand() *cobra.Command {
|
||||
config, err := clientConfig.ClientConfig()
|
||||
errors.CheckError(err)
|
||||
errors.CheckError(v1alpha1.SetK8SConfigDefaults(config))
|
||||
config.UserAgent = fmt.Sprintf("argocd-application-controller/%s (%s)", vers.Version, vers.Platform)
|
||||
config.UserAgent = fmt.Sprintf("%s/%s (%s)", common.DefaultApplicationControllerName, vers.Version, vers.Platform)
|
||||
|
||||
kubeClient := kubernetes.NewForConfigOrDie(config)
|
||||
appClient := appclientset.NewForConfigOrDie(config)
|
||||
@@ -126,7 +130,7 @@ func NewCommand() *cobra.Command {
|
||||
|
||||
repoClientset := apiclient.NewRepoServerClientset(repoServerAddress, repoServerTimeoutSeconds, tlsConfig)
|
||||
|
||||
cache, err := cacheSrc()
|
||||
cache, err := cacheSource()
|
||||
errors.CheckError(err)
|
||||
cache.Cache.SetClient(cacheutil.NewTwoLevelClient(cache.Cache.GetClient(), 10*time.Minute))
|
||||
|
||||
@@ -136,7 +140,8 @@ func NewCommand() *cobra.Command {
|
||||
appController.InvalidateProjectsCache()
|
||||
}))
|
||||
kubectl := kubeutil.NewKubectl()
|
||||
clusterFilter := getClusterFilter(kubeClient, settingsMgr, shardingAlgorithm)
|
||||
clusterFilter := getClusterFilter(kubeClient, settingsMgr, shardingAlgorithm, enableDynamicClusterDistribution)
|
||||
errors.CheckError(err)
|
||||
appController, err = controller.NewApplicationController(
|
||||
namespace,
|
||||
settingsMgr,
|
||||
@@ -164,7 +169,7 @@ func NewCommand() *cobra.Command {
|
||||
stats.RegisterHeapDumper("memprofile")
|
||||
|
||||
if otlpAddress != "" {
|
||||
closeTracer, err := trace.InitTracer(ctx, "argocd-controller", otlpAddress)
|
||||
closeTracer, err := trace.InitTracer(ctx, "argocd-controller", otlpAddress, otlpAttrs)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to initialize tracing: %v", err)
|
||||
}
|
||||
@@ -196,30 +201,66 @@ func NewCommand() *cobra.Command {
|
||||
command.Flags().BoolVar(&repoServerStrictTLS, "repo-server-strict-tls", env.ParseBoolFromEnv("ARGOCD_APPLICATION_CONTROLLER_REPO_SERVER_STRICT_TLS", false), "Whether to use strict validation of the TLS cert presented by the repo server")
|
||||
command.Flags().StringSliceVar(&metricsAplicationLabels, "metrics-application-labels", []string{}, "List of Application labels that will be added to the argocd_application_labels metric")
|
||||
command.Flags().StringVar(&otlpAddress, "otlp-address", env.StringFromEnv("ARGOCD_APPLICATION_CONTROLLER_OTLP_ADDRESS", ""), "OpenTelemetry collector address to send traces to")
|
||||
command.Flags().StringSliceVar(&otlpAttrs, "otlp-attrs", env.StringsFromEnv("ARGOCD_APPLICATION_CONTROLLER_OTLP_ATTRS", []string{}, ","), "List of OpenTelemetry collector extra attrs when send traces, each attribute is separated by a colon(e.g. key:value)")
|
||||
command.Flags().StringSliceVar(&applicationNamespaces, "application-namespaces", env.StringsFromEnv("ARGOCD_APPLICATION_NAMESPACES", []string{}, ","), "List of additional namespaces that applications are allowed to be reconciled from")
|
||||
command.Flags().BoolVar(&persistResourceHealth, "persist-resource-health", env.ParseBoolFromEnv("ARGOCD_APPLICATION_CONTROLLER_PERSIST_RESOURCE_HEALTH", true), "Enables storing the managed resources health in the Application CRD")
|
||||
command.Flags().StringVar(&shardingAlgorithm, "sharding-method", env.StringFromEnv(common.EnvControllerShardingAlgorithm, common.DefaultShardingAlgorithm), "Enables choice of sharding method. Supported sharding methods are : [legacy, round-robin] ")
|
||||
cacheSrc = appstatecache.AddCacheFlagsToCmd(&command, func(client *redis.Client) {
|
||||
command.Flags().BoolVar(&enableDynamicClusterDistribution, "dynamic-cluster-distribution-enabled", env.ParseBoolFromEnv(common.EnvEnableDynamicClusterDistribution, false), "Enables dynamic cluster distribution.")
|
||||
cacheSource = appstatecache.AddCacheFlagsToCmd(&command, func(client *redis.Client) {
|
||||
redisClient = client
|
||||
})
|
||||
return &command
|
||||
}
|
||||
|
||||
func getClusterFilter(kubeClient *kubernetes.Clientset, settingsMgr *settings.SettingsManager, shardingAlgorithm string) sharding.ClusterFilterFunction {
|
||||
replicas := env.ParseNumFromEnv(common.EnvControllerReplicas, 0, 0, math.MaxInt32)
|
||||
func getClusterFilter(kubeClient *kubernetes.Clientset, settingsMgr *settings.SettingsManager, shardingAlgorithm string, enableDynamicClusterDistribution bool) sharding.ClusterFilterFunction {
|
||||
|
||||
var replicas int
|
||||
shard := env.ParseNumFromEnv(common.EnvControllerShard, -1, -math.MaxInt32, math.MaxInt32)
|
||||
|
||||
applicationControllerName := env.StringFromEnv(common.EnvAppControllerName, common.DefaultApplicationControllerName)
|
||||
appControllerDeployment, err := kubeClient.AppsV1().Deployments(settingsMgr.GetNamespace()).Get(context.Background(), applicationControllerName, metav1.GetOptions{})
|
||||
|
||||
// if the application controller deployment was not found, the Get() call returns an empty Deployment object. So, set the variable to nil explicitly
|
||||
if err != nil && kubeerrors.IsNotFound(err) {
|
||||
appControllerDeployment = nil
|
||||
}
|
||||
|
||||
if enableDynamicClusterDistribution && appControllerDeployment != nil && appControllerDeployment.Spec.Replicas != nil {
|
||||
replicas = int(*appControllerDeployment.Spec.Replicas)
|
||||
} else {
|
||||
replicas = env.ParseNumFromEnv(common.EnvControllerReplicas, 0, 0, math.MaxInt32)
|
||||
}
|
||||
|
||||
var clusterFilter func(cluster *v1alpha1.Cluster) bool
|
||||
if replicas > 1 {
|
||||
if shard < 0 {
|
||||
// check for shard mapping using configmap if application-controller is a deployment
|
||||
// else use existing logic to infer shard from pod name if application-controller is a statefulset
|
||||
if enableDynamicClusterDistribution && appControllerDeployment != nil {
|
||||
|
||||
var err error
|
||||
shard, err = sharding.InferShard()
|
||||
// retry 3 times if we find a conflict while updating shard mapping configMap.
|
||||
// If we still see conflicts after the retries, wait for next iteration of heartbeat process.
|
||||
for i := 0; i <= common.AppControllerHeartbeatUpdateRetryCount; i++ {
|
||||
shard, err = sharding.GetOrUpdateShardFromConfigMap(kubeClient, settingsMgr, replicas, shard)
|
||||
if !kubeerrors.IsConflict(err) {
|
||||
err = fmt.Errorf("unable to get shard due to error updating the sharding config map: %s", err)
|
||||
break
|
||||
}
|
||||
log.Warnf("conflict when getting shard from shard mapping configMap. Retrying (%d/3)", i)
|
||||
}
|
||||
errors.CheckError(err)
|
||||
} else {
|
||||
if shard < 0 {
|
||||
var err error
|
||||
shard, err = sharding.InferShard()
|
||||
errors.CheckError(err)
|
||||
}
|
||||
}
|
||||
log.Infof("Processing clusters from shard %d", shard)
|
||||
db := db.NewDB(settingsMgr.GetNamespace(), settingsMgr, kubeClient)
|
||||
log.Infof("Using filter function: %s", shardingAlgorithm)
|
||||
distributionFunction := sharding.GetDistributionFunction(db, shardingAlgorithm)
|
||||
clusterFilter = sharding.GetClusterFilter(distributionFunction, shard)
|
||||
clusterFilter = sharding.GetClusterFilter(db, distributionFunction, shard)
|
||||
} else {
|
||||
log.Info("Processing all cluster shards")
|
||||
}
|
||||
|
||||
@@ -40,10 +40,7 @@ import (
|
||||
argosettings "github.com/argoproj/argo-cd/v2/util/settings"
|
||||
)
|
||||
|
||||
// TODO: load this using Cobra.
|
||||
func getSubmoduleEnabled() bool {
|
||||
return env.ParseBoolFromEnv(common.EnvGitSubmoduleEnabled, true)
|
||||
}
|
||||
var gitSubmoduleEnabled = env.ParseBoolFromEnv(common.EnvGitSubmoduleEnabled, true)
|
||||
|
||||
func NewCommand() *cobra.Command {
|
||||
var (
|
||||
@@ -64,6 +61,10 @@ func NewCommand() *cobra.Command {
|
||||
repoServerStrictTLS bool
|
||||
repoServerTimeoutSeconds int
|
||||
maxConcurrentReconciliations int
|
||||
scmRootCAPath string
|
||||
allowedScmProviders []string
|
||||
globalPreservedAnnotations []string
|
||||
globalPreservedLabels []string
|
||||
)
|
||||
scheme := runtime.NewScheme()
|
||||
_ = clientgoscheme.AddToScheme(scheme)
|
||||
@@ -96,7 +97,7 @@ func NewCommand() *cobra.Command {
|
||||
|
||||
policyObj, exists := utils.Policies[policy]
|
||||
if !exists {
|
||||
log.Info("Policy value can be: sync, create-only, create-update, create-delete, default value: sync")
|
||||
log.Error("Policy value can be: sync, create-only, create-update, create-delete, default value: sync")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
@@ -106,6 +107,9 @@ func NewCommand() *cobra.Command {
|
||||
// If the applicationset-namespaces contains only one namespace it corresponds to the current namespace
|
||||
if len(applicationSetNamespaces) == 1 {
|
||||
watchedNamespace = (applicationSetNamespaces)[0]
|
||||
} else if len(allowedScmProviders) == 0 {
|
||||
log.Error("When enabling applicationset in any namespace using applicationset-namespaces, allowed-scm-providers is required")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
|
||||
@@ -151,16 +155,16 @@ func NewCommand() *cobra.Command {
|
||||
}
|
||||
|
||||
repoClientset := apiclient.NewRepoServerClientset(argocdRepoServer, repoServerTimeoutSeconds, tlsConfig)
|
||||
argoCDService, err := services.NewArgoCDService(argoCDDB, getSubmoduleEnabled(), repoClientset, enableNewGitFileGlobbing)
|
||||
argoCDService, err := services.NewArgoCDService(argoCDDB, gitSubmoduleEnabled, repoClientset, enableNewGitFileGlobbing)
|
||||
errors.CheckError(err)
|
||||
|
||||
terminalGenerators := map[string]generators.Generator{
|
||||
"List": generators.NewListGenerator(),
|
||||
"Clusters": generators.NewClusterGenerator(mgr.GetClient(), ctx, k8sClient, namespace),
|
||||
"Git": generators.NewGitGenerator(argoCDService),
|
||||
"SCMProvider": generators.NewSCMProviderGenerator(mgr.GetClient(), scmAuth),
|
||||
"SCMProvider": generators.NewSCMProviderGenerator(mgr.GetClient(), scmAuth, scmRootCAPath, allowedScmProviders),
|
||||
"ClusterDecisionResource": generators.NewDuckTypeGenerator(ctx, dynamicClient, k8sClient, namespace),
|
||||
"PullRequest": generators.NewPullRequestGenerator(mgr.GetClient(), scmAuth),
|
||||
"PullRequest": generators.NewPullRequestGenerator(mgr.GetClient(), scmAuth, scmRootCAPath, allowedScmProviders),
|
||||
"Plugin": generators.NewPluginGenerator(mgr.GetClient(), ctx, k8sClient, namespace),
|
||||
}
|
||||
|
||||
@@ -198,19 +202,23 @@ func NewCommand() *cobra.Command {
|
||||
}
|
||||
|
||||
if err = (&controllers.ApplicationSetReconciler{
|
||||
Generators: topLevelGenerators,
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Recorder: mgr.GetEventRecorderFor("applicationset-controller"),
|
||||
Renderer: &utils.Render{},
|
||||
Policy: policyObj,
|
||||
EnablePolicyOverride: enablePolicyOverride,
|
||||
ArgoAppClientset: appSetConfig,
|
||||
KubeClientset: k8sClient,
|
||||
ArgoDB: argoCDDB,
|
||||
ArgoCDNamespace: namespace,
|
||||
ApplicationSetNamespaces: applicationSetNamespaces,
|
||||
EnableProgressiveSyncs: enableProgressiveSyncs,
|
||||
Generators: topLevelGenerators,
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Recorder: mgr.GetEventRecorderFor("applicationset-controller"),
|
||||
Renderer: &utils.Render{},
|
||||
Policy: policyObj,
|
||||
EnablePolicyOverride: enablePolicyOverride,
|
||||
ArgoAppClientset: appSetConfig,
|
||||
KubeClientset: k8sClient,
|
||||
ArgoDB: argoCDDB,
|
||||
ArgoCDNamespace: namespace,
|
||||
ApplicationSetNamespaces: applicationSetNamespaces,
|
||||
EnableProgressiveSyncs: enableProgressiveSyncs,
|
||||
SCMRootCAPath: scmRootCAPath,
|
||||
GlobalPreservedAnnotations: globalPreservedAnnotations,
|
||||
GlobalPreservedLabels: globalPreservedLabels,
|
||||
Cache: mgr.GetCache(),
|
||||
}).SetupWithManager(mgr, enableProgressiveSyncs, maxConcurrentReconciliations); err != nil {
|
||||
log.Error(err, "unable to create controller", "controller", "ApplicationSet")
|
||||
os.Exit(1)
|
||||
@@ -239,6 +247,7 @@ func NewCommand() *cobra.Command {
|
||||
command.Flags().BoolVar(&debugLog, "debug", env.ParseBoolFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_DEBUG", false), "Print debug logs. Takes precedence over loglevel")
|
||||
command.Flags().StringVar(&cmdutil.LogFormat, "logformat", env.StringFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_LOGFORMAT", "text"), "Set the logging format. One of: text|json")
|
||||
command.Flags().StringVar(&cmdutil.LogLevel, "loglevel", env.StringFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_LOGLEVEL", "info"), "Set the logging level. One of: debug|info|warn|error")
|
||||
command.Flags().StringSliceVar(&allowedScmProviders, "allowed-scm-providers", env.StringsFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_ALLOWED_SCM_PROVIDERS", []string{}, ","), "The list of allowed scm providers. (Default: Empty = all)")
|
||||
command.Flags().BoolVar(&dryRun, "dry-run", env.ParseBoolFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_DRY_RUN", false), "Enable dry run mode")
|
||||
command.Flags().BoolVar(&enableProgressiveSyncs, "enable-progressive-syncs", env.ParseBoolFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_PROGRESSIVE_SYNCS", false), "Enable use of the experimental progressive syncs feature.")
|
||||
command.Flags().BoolVar(&enableNewGitFileGlobbing, "enable-new-git-file-globbing", env.ParseBoolFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_NEW_GIT_FILE_GLOBBING", false), "Enable new globbing in Git files generator.")
|
||||
@@ -246,6 +255,9 @@ func NewCommand() *cobra.Command {
|
||||
command.Flags().BoolVar(&repoServerStrictTLS, "repo-server-strict-tls", env.ParseBoolFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_REPO_SERVER_STRICT_TLS", false), "Whether to use strict validation of the TLS cert presented by the repo server")
|
||||
command.Flags().IntVar(&repoServerTimeoutSeconds, "repo-server-timeout-seconds", env.ParseNumFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_REPO_SERVER_TIMEOUT_SECONDS", 60, 0, math.MaxInt64), "Repo server RPC call timeout seconds.")
|
||||
command.Flags().IntVar(&maxConcurrentReconciliations, "concurrent-reconciliations", env.ParseNumFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_CONCURRENT_RECONCILIATIONS", 10, 1, 100), "Max concurrent reconciliations limit for the controller")
|
||||
command.Flags().StringVar(&scmRootCAPath, "scm-root-ca-path", env.StringFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_SCM_ROOT_CA_PATH", ""), "Provide Root CA Path for self-signed TLS Certificates")
|
||||
command.Flags().StringSliceVar(&globalPreservedAnnotations, "preserved-annotations", env.StringsFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_GLOBAL_PRESERVED_ANNOTATIONS", []string{}, ","), "Sets global preserved field values for annotations")
|
||||
command.Flags().StringSliceVar(&globalPreservedLabels, "preserved-labels", env.StringsFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_GLOBAL_PRESERVED_LABELS", []string{}, ","), "Sets global preserved field values for labels")
|
||||
return &command
|
||||
}
|
||||
|
||||
|
||||
@@ -26,6 +26,7 @@ func NewCommand() *cobra.Command {
|
||||
var (
|
||||
configFilePath string
|
||||
otlpAddress string
|
||||
otlpAttrs []string
|
||||
)
|
||||
var command = cobra.Command{
|
||||
Use: cliName,
|
||||
@@ -55,7 +56,7 @@ func NewCommand() *cobra.Command {
|
||||
if otlpAddress != "" {
|
||||
var closer func()
|
||||
var err error
|
||||
closer, err = traceutil.InitTracer(ctx, "argocd-cmp-server", otlpAddress)
|
||||
closer, err = traceutil.InitTracer(ctx, "argocd-cmp-server", otlpAddress, otlpAttrs)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to initialize tracing: %v", err)
|
||||
}
|
||||
@@ -82,5 +83,6 @@ func NewCommand() *cobra.Command {
|
||||
command.Flags().StringVar(&cmdutil.LogLevel, "loglevel", "info", "Set the logging level. One of: debug|info|warn|error")
|
||||
command.Flags().StringVar(&configFilePath, "config-dir-path", common.DefaultPluginConfigFilePath, "Config management plugin configuration file location, Default is '/home/argocd/cmp-server/config/'")
|
||||
command.Flags().StringVar(&otlpAddress, "otlp-address", env.StringFromEnv("ARGOCD_CMP_SERVER_OTLP_ADDRESS", ""), "OpenTelemetry collector address to send traces to")
|
||||
command.Flags().StringSliceVar(&otlpAttrs, "otlp-attrs", env.StringsFromEnv("ARGOCD_CMP_SERVER_OTLP_ATTRS", []string{}, ","), "List of OpenTelemetry collector extra attrs when send traces, each attribute is separated by a colon(e.g. key:value)")
|
||||
return &command
|
||||
}
|
||||
|
||||
@@ -20,6 +20,7 @@ func NewCommand() *cobra.Command {
|
||||
|
||||
command.AddCommand(newAWSCommand())
|
||||
command.AddCommand(newGCPCommand())
|
||||
command.AddCommand(newAzureCommand())
|
||||
|
||||
return command
|
||||
}
|
||||
|
||||
43
cmd/argocd-k8s-auth/commands/azure.go
Normal file
43
cmd/argocd-k8s-auth/commands/azure.go
Normal file
@@ -0,0 +1,43 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/Azure/kubelogin/pkg/token"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/util/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
envServerApplicationID = "AAD_SERVER_APPLICATION_ID"
|
||||
envEnvironmentName = "AAD_ENVIRONMENT_NAME"
|
||||
)
|
||||
|
||||
const (
|
||||
DEFAULT_AAD_SERVER_APPLICATION_ID = "6dae42f8-4368-4678-94ff-3960e28e3630"
|
||||
)
|
||||
|
||||
func newAzureCommand() *cobra.Command {
|
||||
o := token.NewOptions()
|
||||
//we'll use default of WorkloadIdentityLogin for the login flow
|
||||
o.LoginMethod = token.WorkloadIdentityLogin
|
||||
o.ServerID = DEFAULT_AAD_SERVER_APPLICATION_ID
|
||||
var command = &cobra.Command{
|
||||
Use: "azure",
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
o.UpdateFromEnv()
|
||||
if v, ok := os.LookupEnv(envServerApplicationID); ok {
|
||||
o.ServerID = v
|
||||
}
|
||||
if v, ok := os.LookupEnv(envEnvironmentName); ok {
|
||||
o.Environment = v
|
||||
}
|
||||
plugin, err := token.New(&o)
|
||||
errors.CheckError(err)
|
||||
err = plugin.Do()
|
||||
errors.CheckError(err)
|
||||
},
|
||||
}
|
||||
return command
|
||||
}
|
||||
@@ -55,6 +55,7 @@ func NewCommand() *cobra.Command {
|
||||
argocdRepoServerStrictTLS bool
|
||||
configMapName string
|
||||
secretName string
|
||||
applicationNamespaces []string
|
||||
)
|
||||
var command = cobra.Command{
|
||||
Use: "controller",
|
||||
@@ -74,26 +75,26 @@ func NewCommand() *cobra.Command {
|
||||
|
||||
restConfig, err := clientConfig.ClientConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to create REST client config: %w", err)
|
||||
}
|
||||
restConfig.UserAgent = fmt.Sprintf("argocd-notifications-controller/%s (%s)", vers.Version, vers.Platform)
|
||||
dynamicClient, err := dynamic.NewForConfig(restConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to create dynamic client: %w", err)
|
||||
}
|
||||
k8sClient, err := kubernetes.NewForConfig(restConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to create Kubernetes client: %w", err)
|
||||
}
|
||||
if namespace == "" {
|
||||
namespace, _, err = clientConfig.Namespace()
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to determine controller's host namespace: %w", err)
|
||||
}
|
||||
}
|
||||
level, err := log.ParseLevel(logLevel)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to parse log level: %w", err)
|
||||
}
|
||||
log.SetLevel(level)
|
||||
|
||||
@@ -105,7 +106,7 @@ func NewCommand() *cobra.Command {
|
||||
log.SetFormatter(&log.TextFormatter{ForceColors: true})
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("Unknown log format '%s'", logFormat)
|
||||
return fmt.Errorf("unknown log format '%s'", logFormat)
|
||||
}
|
||||
|
||||
tlsConfig := apiclient.TLSConfiguration{
|
||||
@@ -118,14 +119,14 @@ func NewCommand() *cobra.Command {
|
||||
fmt.Sprintf("%s/reposerver/tls/ca.crt", env.StringFromEnv(common.EnvAppConfigPath, common.DefaultAppConfigPath)),
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to load repo-server certificate pool: %w", err)
|
||||
}
|
||||
tlsConfig.Certificates = pool
|
||||
}
|
||||
repoClientset := apiclient.NewRepoServerClientset(argocdRepoServer, 5, tlsConfig)
|
||||
argocdService, err := service.NewArgoCDService(k8sClient, namespace, repoClientset)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to initialize Argo CD service: %w", err)
|
||||
}
|
||||
defer argocdService.Close()
|
||||
|
||||
@@ -138,10 +139,10 @@ func NewCommand() *cobra.Command {
|
||||
log.Infof("serving metrics on port %d", metricsPort)
|
||||
log.Infof("loading configuration %d", metricsPort)
|
||||
|
||||
ctrl := notificationscontroller.NewController(k8sClient, dynamicClient, argocdService, namespace, appLabelSelector, registry, secretName, configMapName)
|
||||
ctrl := notificationscontroller.NewController(k8sClient, dynamicClient, argocdService, namespace, applicationNamespaces, appLabelSelector, registry, secretName, configMapName)
|
||||
err = ctrl.Init(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to initialize controller: %w", err)
|
||||
}
|
||||
|
||||
go ctrl.Run(ctx, processorsCount)
|
||||
@@ -161,5 +162,6 @@ func NewCommand() *cobra.Command {
|
||||
command.Flags().BoolVar(&argocdRepoServerStrictTLS, "argocd-repo-server-strict-tls", false, "Perform strict validation of TLS certificates when connecting to repo server")
|
||||
command.Flags().StringVar(&configMapName, "config-map-name", "argocd-notifications-cm", "Set notifications ConfigMap name")
|
||||
command.Flags().StringVar(&secretName, "secret-name", "argocd-notifications-secret", "Set notifications Secret name")
|
||||
command.Flags().StringSliceVar(&applicationNamespaces, "application-namespaces", env.StringsFromEnv("ARGOCD_APPLICATION_NAMESPACES", []string{}, ","), "List of additional namespaces that this controller should send notifications for")
|
||||
return &command
|
||||
}
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"math"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/argoproj/pkg/stats"
|
||||
@@ -36,33 +35,16 @@ import (
|
||||
|
||||
const (
|
||||
// CLIName is the name of the CLI
|
||||
cliName = "argocd-repo-server"
|
||||
gnuPGSourcePath = "/app/config/gpg/source"
|
||||
|
||||
defaultPauseGenerationAfterFailedGenerationAttempts = 3
|
||||
defaultPauseGenerationOnFailureForMinutes = 60
|
||||
defaultPauseGenerationOnFailureForRequests = 0
|
||||
cliName = "argocd-repo-server"
|
||||
)
|
||||
|
||||
func getGnuPGSourcePath() string {
|
||||
return env.StringFromEnv(common.EnvGPGDataPath, gnuPGSourcePath)
|
||||
}
|
||||
|
||||
func getPauseGenerationAfterFailedGenerationAttempts() int {
|
||||
return env.ParseNumFromEnv(common.EnvPauseGenerationAfterFailedAttempts, defaultPauseGenerationAfterFailedGenerationAttempts, 0, math.MaxInt32)
|
||||
}
|
||||
|
||||
func getPauseGenerationOnFailureForMinutes() int {
|
||||
return env.ParseNumFromEnv(common.EnvPauseGenerationMinutes, defaultPauseGenerationOnFailureForMinutes, 0, math.MaxInt32)
|
||||
}
|
||||
|
||||
func getPauseGenerationOnFailureForRequests() int {
|
||||
return env.ParseNumFromEnv(common.EnvPauseGenerationRequests, defaultPauseGenerationOnFailureForRequests, 0, math.MaxInt32)
|
||||
}
|
||||
|
||||
func getSubmoduleEnabled() bool {
|
||||
return env.ParseBoolFromEnv(common.EnvGitSubmoduleEnabled, true)
|
||||
}
|
||||
var (
|
||||
gnuPGSourcePath = env.StringFromEnv(common.EnvGPGDataPath, "/app/config/gpg/source")
|
||||
pauseGenerationAfterFailedGenerationAttempts = env.ParseNumFromEnv(common.EnvPauseGenerationAfterFailedAttempts, 3, 0, math.MaxInt32)
|
||||
pauseGenerationOnFailureForMinutes = env.ParseNumFromEnv(common.EnvPauseGenerationMinutes, 60, 0, math.MaxInt32)
|
||||
pauseGenerationOnFailureForRequests = env.ParseNumFromEnv(common.EnvPauseGenerationRequests, 0, 0, math.MaxInt32)
|
||||
gitSubmoduleEnabled = env.ParseBoolFromEnv(common.EnvGitSubmoduleEnabled, true)
|
||||
)
|
||||
|
||||
func NewCommand() *cobra.Command {
|
||||
var (
|
||||
@@ -72,6 +54,7 @@ func NewCommand() *cobra.Command {
|
||||
metricsPort int
|
||||
metricsHost string
|
||||
otlpAddress string
|
||||
otlpAttrs []string
|
||||
cacheSrc func() (*reposervercache.Cache, error)
|
||||
tlsConfigCustomizer tls.ConfigCustomizer
|
||||
tlsConfigCustomizerSrc func() (tls.ConfigCustomizer, error)
|
||||
@@ -82,6 +65,8 @@ func NewCommand() *cobra.Command {
|
||||
allowOutOfBoundsSymlinks bool
|
||||
streamedManifestMaxTarSize string
|
||||
streamedManifestMaxExtractedSize string
|
||||
helmManifestMaxExtractedSize string
|
||||
disableManifestMaxExtractedSize bool
|
||||
)
|
||||
var command = cobra.Command{
|
||||
Use: cliName,
|
||||
@@ -120,27 +105,31 @@ func NewCommand() *cobra.Command {
|
||||
streamedManifestMaxExtractedSizeQuantity, err := resource.ParseQuantity(streamedManifestMaxExtractedSize)
|
||||
errors.CheckError(err)
|
||||
|
||||
helmManifestMaxExtractedSizeQuantity, err := resource.ParseQuantity(helmManifestMaxExtractedSize)
|
||||
errors.CheckError(err)
|
||||
|
||||
askPassServer := askpass.NewServer()
|
||||
metricsServer := metrics.NewMetricsServer()
|
||||
cacheutil.CollectMetrics(redisClient, metricsServer)
|
||||
server, err := reposerver.NewServer(metricsServer, cache, tlsConfigCustomizer, repository.RepoServerInitConstants{
|
||||
ParallelismLimit: parallelismLimit,
|
||||
PauseGenerationAfterFailedGenerationAttempts: getPauseGenerationAfterFailedGenerationAttempts(),
|
||||
PauseGenerationOnFailureForMinutes: getPauseGenerationOnFailureForMinutes(),
|
||||
PauseGenerationOnFailureForRequests: getPauseGenerationOnFailureForRequests(),
|
||||
SubmoduleEnabled: getSubmoduleEnabled(),
|
||||
PauseGenerationAfterFailedGenerationAttempts: pauseGenerationAfterFailedGenerationAttempts,
|
||||
PauseGenerationOnFailureForMinutes: pauseGenerationOnFailureForMinutes,
|
||||
PauseGenerationOnFailureForRequests: pauseGenerationOnFailureForRequests,
|
||||
SubmoduleEnabled: gitSubmoduleEnabled,
|
||||
MaxCombinedDirectoryManifestsSize: maxCombinedDirectoryManifestsQuantity,
|
||||
CMPTarExcludedGlobs: cmpTarExcludedGlobs,
|
||||
AllowOutOfBoundsSymlinks: allowOutOfBoundsSymlinks,
|
||||
StreamedManifestMaxExtractedSize: streamedManifestMaxExtractedSizeQuantity.ToDec().Value(),
|
||||
StreamedManifestMaxTarSize: streamedManifestMaxTarSizeQuantity.ToDec().Value(),
|
||||
HelmManifestMaxExtractedSize: helmManifestMaxExtractedSizeQuantity.ToDec().Value(),
|
||||
}, askPassServer)
|
||||
errors.CheckError(err)
|
||||
|
||||
if otlpAddress != "" {
|
||||
var closer func()
|
||||
var err error
|
||||
closer, err = traceutil.InitTracer(ctx, "argocd-repo-server", otlpAddress)
|
||||
closer, err = traceutil.InitTracer(ctx, "argocd-repo-server", otlpAddress, otlpAttrs)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to initialize tracing: %v", err)
|
||||
}
|
||||
@@ -182,12 +171,12 @@ func NewCommand() *cobra.Command {
|
||||
err = gpg.InitializeGnuPG()
|
||||
errors.CheckError(err)
|
||||
|
||||
log.Infof("Populating GnuPG keyring with keys from %s", getGnuPGSourcePath())
|
||||
added, removed, err := gpg.SyncKeyRingFromDirectory(getGnuPGSourcePath())
|
||||
log.Infof("Populating GnuPG keyring with keys from %s", gnuPGSourcePath)
|
||||
added, removed, err := gpg.SyncKeyRingFromDirectory(gnuPGSourcePath)
|
||||
errors.CheckError(err)
|
||||
log.Infof("Loaded %d (and removed %d) keys from keyring", len(added), len(removed))
|
||||
|
||||
go func() { errors.CheckError(reposerver.StartGPGWatcher(getGnuPGSourcePath())) }()
|
||||
go func() { errors.CheckError(reposerver.StartGPGWatcher(gnuPGSourcePath)) }()
|
||||
}
|
||||
|
||||
log.Infof("argocd-repo-server is listening on %s", listener.Addr())
|
||||
@@ -199,9 +188,6 @@ func NewCommand() *cobra.Command {
|
||||
return nil
|
||||
},
|
||||
}
|
||||
if cmdutil.LogFormat == "" {
|
||||
cmdutil.LogFormat = os.Getenv("ARGOCD_REPO_SERVER_LOGLEVEL")
|
||||
}
|
||||
command.Flags().StringVar(&cmdutil.LogFormat, "logformat", env.StringFromEnv("ARGOCD_REPO_SERVER_LOGFORMAT", "text"), "Set the logging format. One of: text|json")
|
||||
command.Flags().StringVar(&cmdutil.LogLevel, "loglevel", env.StringFromEnv("ARGOCD_REPO_SERVER_LOGLEVEL", "info"), "Set the logging level. One of: debug|info|warn|error")
|
||||
command.Flags().Int64Var(¶llelismLimit, "parallelismlimit", int64(env.ParseNumFromEnv("ARGOCD_REPO_SERVER_PARALLELISM_LIMIT", 0, 0, math.MaxInt32)), "Limit on number of concurrent manifests generate requests. Any value less the 1 means no limit.")
|
||||
@@ -210,12 +196,15 @@ func NewCommand() *cobra.Command {
|
||||
command.Flags().StringVar(&metricsHost, "metrics-address", env.StringFromEnv("ARGOCD_REPO_SERVER_METRICS_LISTEN_ADDRESS", common.DefaultAddressRepoServerMetrics), "Listen on given address for metrics")
|
||||
command.Flags().IntVar(&metricsPort, "metrics-port", common.DefaultPortRepoServerMetrics, "Start metrics server on given port")
|
||||
command.Flags().StringVar(&otlpAddress, "otlp-address", env.StringFromEnv("ARGOCD_REPO_SERVER_OTLP_ADDRESS", ""), "OpenTelemetry collector address to send traces to")
|
||||
command.Flags().StringSliceVar(&otlpAttrs, "otlp-attrs", env.StringsFromEnv("ARGOCD_REPO_SERVER_OTLP_ATTRS", []string{}, ","), "List of OpenTelemetry collector extra attrs when send traces, each attribute is separated by a colon(e.g. key:value)")
|
||||
command.Flags().BoolVar(&disableTLS, "disable-tls", env.ParseBoolFromEnv("ARGOCD_REPO_SERVER_DISABLE_TLS", false), "Disable TLS on the gRPC endpoint")
|
||||
command.Flags().StringVar(&maxCombinedDirectoryManifestsSize, "max-combined-directory-manifests-size", env.StringFromEnv("ARGOCD_REPO_SERVER_MAX_COMBINED_DIRECTORY_MANIFESTS_SIZE", "10M"), "Max combined size of manifest files in a directory-type Application")
|
||||
command.Flags().StringArrayVar(&cmpTarExcludedGlobs, "plugin-tar-exclude", env.StringsFromEnv("ARGOCD_REPO_SERVER_PLUGIN_TAR_EXCLUSIONS", []string{}, ";"), "Globs to filter when sending tarballs to plugins.")
|
||||
command.Flags().BoolVar(&allowOutOfBoundsSymlinks, "allow-oob-symlinks", env.ParseBoolFromEnv("ARGOCD_REPO_SERVER_ALLOW_OUT_OF_BOUNDS_SYMLINKS", false), "Allow out-of-bounds symlinks in repositories (not recommended)")
|
||||
command.Flags().StringVar(&streamedManifestMaxTarSize, "streamed-manifest-max-tar-size", env.StringFromEnv("ARGOCD_REPO_SERVER_STREAMED_MANIFEST_MAX_TAR_SIZE", "100M"), "Maximum size of streamed manifest archives")
|
||||
command.Flags().StringVar(&streamedManifestMaxExtractedSize, "streamed-manifest-max-extracted-size", env.StringFromEnv("ARGOCD_REPO_SERVER_STREAMED_MANIFEST_MAX_EXTRACTED_SIZE", "1G"), "Maximum size of streamed manifest archives when extracted")
|
||||
command.Flags().StringVar(&helmManifestMaxExtractedSize, "helm-manifest-max-extracted-size", env.StringFromEnv("ARGOCD_REPO_SERVER_HELM_MANIFEST_MAX_EXTRACTED_SIZE", "1G"), "Maximum size of helm manifest archives when extracted")
|
||||
command.Flags().BoolVar(&disableManifestMaxExtractedSize, "disable-helm-manifest-max-extracted-size", env.ParseBoolFromEnv("ARGOCD_REPO_SERVER_DISABLE_HELM_MANIFEST_MAX_EXTRACTED_SIZE", false), "Disable maximum size of helm manifest archives when extracted")
|
||||
tlsConfigCustomizerSrc = tls.AddTLSFlagsToCmd(&command)
|
||||
cacheSrc = reposervercache.AddCacheFlagsToCmd(&command, func(client *redis.Client) {
|
||||
redisClient = client
|
||||
|
||||
@@ -35,15 +35,10 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
failureRetryCount = 0
|
||||
failureRetryPeriodMilliSeconds = 100
|
||||
failureRetryCount = env.ParseNumFromEnv(failureRetryCountEnv, 0, 0, 10)
|
||||
failureRetryPeriodMilliSeconds = env.ParseNumFromEnv(failureRetryPeriodMilliSecondsEnv, 100, 0, 1000)
|
||||
)
|
||||
|
||||
func init() {
|
||||
failureRetryCount = env.ParseNumFromEnv(failureRetryCountEnv, failureRetryCount, 0, 10)
|
||||
failureRetryPeriodMilliSeconds = env.ParseNumFromEnv(failureRetryPeriodMilliSecondsEnv, failureRetryPeriodMilliSeconds, 0, 1000)
|
||||
}
|
||||
|
||||
// NewCommand returns a new instance of an argocd command
|
||||
func NewCommand() *cobra.Command {
|
||||
var (
|
||||
@@ -54,6 +49,7 @@ func NewCommand() *cobra.Command {
|
||||
metricsHost string
|
||||
metricsPort int
|
||||
otlpAddress string
|
||||
otlpAttrs []string
|
||||
glogLevel int
|
||||
clientConfig clientcmd.ClientConfig
|
||||
repoServerTimeoutSeconds int
|
||||
@@ -203,7 +199,7 @@ func NewCommand() *cobra.Command {
|
||||
var closer func()
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
if otlpAddress != "" {
|
||||
closer, err = traceutil.InitTracer(ctx, "argocd-server", otlpAddress)
|
||||
closer, err = traceutil.InitTracer(ctx, "argocd-server", otlpAddress, otlpAttrs)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to initialize tracing: %v", err)
|
||||
}
|
||||
@@ -235,6 +231,7 @@ func NewCommand() *cobra.Command {
|
||||
command.Flags().StringVar(&metricsHost, env.StringFromEnv("ARGOCD_SERVER_METRICS_LISTEN_ADDRESS", "metrics-address"), common.DefaultAddressAPIServerMetrics, "Listen for metrics on given address")
|
||||
command.Flags().IntVar(&metricsPort, "metrics-port", common.DefaultPortArgoCDAPIServerMetrics, "Start metrics on given port")
|
||||
command.Flags().StringVar(&otlpAddress, "otlp-address", env.StringFromEnv("ARGOCD_SERVER_OTLP_ADDRESS", ""), "OpenTelemetry collector address to send traces to")
|
||||
command.Flags().StringSliceVar(&otlpAttrs, "otlp-attrs", env.StringsFromEnv("ARGOCD_SERVER_OTLP_ATTRS", []string{}, ","), "List of OpenTelemetry collector extra attrs when send traces, each attribute is separated by a colon(e.g. key:value)")
|
||||
command.Flags().IntVar(&repoServerTimeoutSeconds, "repo-server-timeout-seconds", env.ParseNumFromEnv("ARGOCD_SERVER_REPO_SERVER_TIMEOUT_SECONDS", 60, 0, math.MaxInt64), "Repo server RPC call timeout seconds.")
|
||||
command.Flags().StringVar(&frameOptions, "x-frame-options", env.StringFromEnv("ARGOCD_SERVER_X_FRAME_OPTIONS", "sameorigin"), "Set X-Frame-Options header in HTTP responses to `value`. To disable, set to \"\".")
|
||||
command.Flags().StringVar(&contentSecurityPolicy, "content-security-policy", env.StringFromEnv("ARGOCD_SERVER_CONTENT_SECURITY_POLICY", "frame-ancestors 'self';"), "Set Content-Security-Policy header in HTTP responses to `value`. To disable, set to \"\".")
|
||||
|
||||
@@ -130,9 +130,9 @@ has appropriate RBAC permissions to change other accounts.
|
||||
},
|
||||
}
|
||||
|
||||
command.Flags().StringVar(¤tPassword, "current-password", "", "password of the currently logged on user")
|
||||
command.Flags().StringVar(&newPassword, "new-password", "", "new password you want to update to")
|
||||
command.Flags().StringVar(&account, "account", "", "an account name that should be updated. Defaults to current user account")
|
||||
command.Flags().StringVar(¤tPassword, "current-password", "", "Password of the currently logged on user")
|
||||
command.Flags().StringVar(&newPassword, "new-password", "", "New password you want to update to")
|
||||
command.Flags().StringVar(&account, "account", "", "An account name that should be updated. Defaults to current user account")
|
||||
return command
|
||||
}
|
||||
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
|
||||
cmdutil "github.com/argoproj/argo-cd/v2/cmd/util"
|
||||
"github.com/argoproj/argo-cd/v2/common"
|
||||
argocdclient "github.com/argoproj/argo-cd/v2/pkg/apiclient"
|
||||
"github.com/argoproj/argo-cd/v2/util/errors"
|
||||
"github.com/argoproj/argo-cd/v2/util/settings"
|
||||
|
||||
@@ -35,7 +36,7 @@ var (
|
||||
)
|
||||
|
||||
// NewAdminCommand returns a new instance of an argocd command
|
||||
func NewAdminCommand() *cobra.Command {
|
||||
func NewAdminCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
var (
|
||||
pathOpts = clientcmd.NewDefaultPathOptions()
|
||||
)
|
||||
@@ -49,10 +50,10 @@ func NewAdminCommand() *cobra.Command {
|
||||
},
|
||||
}
|
||||
|
||||
command.AddCommand(NewClusterCommand(pathOpts))
|
||||
command.AddCommand(NewClusterCommand(clientOpts, pathOpts))
|
||||
command.AddCommand(NewProjectsCommand())
|
||||
command.AddCommand(NewSettingsCommand())
|
||||
command.AddCommand(NewAppCommand())
|
||||
command.AddCommand(NewAppCommand(clientOpts))
|
||||
command.AddCommand(NewRepoCommand())
|
||||
command.AddCommand(NewImportCommand())
|
||||
command.AddCommand(NewExportCommand())
|
||||
|
||||
@@ -20,13 +20,15 @@ import (
|
||||
"sigs.k8s.io/yaml"
|
||||
|
||||
cmdutil "github.com/argoproj/argo-cd/v2/cmd/util"
|
||||
"github.com/argoproj/argo-cd/v2/common"
|
||||
"github.com/argoproj/argo-cd/v2/controller"
|
||||
"github.com/argoproj/argo-cd/v2/controller/cache"
|
||||
"github.com/argoproj/argo-cd/v2/controller/metrics"
|
||||
argocdclient "github.com/argoproj/argo-cd/v2/pkg/apiclient"
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
appclientset "github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned"
|
||||
appinformers "github.com/argoproj/argo-cd/v2/pkg/client/informers/externalversions"
|
||||
argocdclient "github.com/argoproj/argo-cd/v2/reposerver/apiclient"
|
||||
reposerverclient "github.com/argoproj/argo-cd/v2/reposerver/apiclient"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo"
|
||||
cacheutil "github.com/argoproj/argo-cd/v2/util/cache"
|
||||
appstatecache "github.com/argoproj/argo-cd/v2/util/cache/appstate"
|
||||
@@ -39,7 +41,7 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/util/settings"
|
||||
)
|
||||
|
||||
func NewAppCommand() *cobra.Command {
|
||||
func NewAppCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
var command = &cobra.Command{
|
||||
Use: "app",
|
||||
Short: "Manage applications configuration",
|
||||
@@ -49,7 +51,7 @@ func NewAppCommand() *cobra.Command {
|
||||
}
|
||||
|
||||
command.AddCommand(NewGenAppSpecCommand())
|
||||
command.AddCommand(NewReconcileCommand())
|
||||
command.AddCommand(NewReconcileCommand(clientOpts))
|
||||
command.AddCommand(NewDiffReconcileResults())
|
||||
return command
|
||||
}
|
||||
@@ -193,14 +195,14 @@ func diffReconcileResults(res1 reconcileResults, res2 reconcileResults) error {
|
||||
for k, v := range resMap1 {
|
||||
firstUn, err := toUnstructured(v)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("error converting first resource to unstructured: %w", err)
|
||||
}
|
||||
var secondUn *unstructured.Unstructured
|
||||
second, ok := resMap2[k]
|
||||
if ok {
|
||||
secondUn, err = toUnstructured(second)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("error converting second resource to unstructured: %w", err)
|
||||
}
|
||||
delete(resMap2, k)
|
||||
}
|
||||
@@ -224,7 +226,7 @@ func diffReconcileResults(res1 reconcileResults, res2 reconcileResults) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewReconcileCommand() *cobra.Command {
|
||||
func NewReconcileCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
var (
|
||||
clientConfig clientcmd.ClientConfig
|
||||
selector string
|
||||
@@ -259,11 +261,12 @@ func NewReconcileCommand() *cobra.Command {
|
||||
if repoServerAddress == "" {
|
||||
printLine("Repo server is not provided, trying to port-forward to argocd-repo-server pod.")
|
||||
overrides := clientcmd.ConfigOverrides{}
|
||||
repoServerPort, err := kubeutil.PortForward(8081, namespace, &overrides, "app.kubernetes.io/name=argocd-repo-server")
|
||||
repoServerPodLabelSelector := common.LabelKeyAppName + "=" + clientOpts.RepoServerName
|
||||
repoServerPort, err := kubeutil.PortForward(8081, namespace, &overrides, repoServerPodLabelSelector)
|
||||
errors.CheckError(err)
|
||||
repoServerAddress = fmt.Sprintf("localhost:%d", repoServerPort)
|
||||
}
|
||||
repoServerClient := argocdclient.NewRepoServerClientset(repoServerAddress, 60, argocdclient.TLSConfiguration{DisableTLS: false, StrictValidation: false})
|
||||
repoServerClient := reposerverclient.NewRepoServerClientset(repoServerAddress, 60, reposerverclient.TLSConfiguration{DisableTLS: false, StrictValidation: false})
|
||||
|
||||
appClientset := appclientset.NewForConfigOrDie(cfg)
|
||||
kubeClientset := kubernetes.NewForConfigOrDie(cfg)
|
||||
@@ -328,7 +331,7 @@ func reconcileApplications(
|
||||
kubeClientset kubernetes.Interface,
|
||||
appClientset appclientset.Interface,
|
||||
namespace string,
|
||||
repoServerClient argocdclient.Clientset,
|
||||
repoServerClient reposerverclient.Clientset,
|
||||
selector string,
|
||||
createLiveStateCache func(argoDB db.ArgoDB, appInformer kubecache.SharedIndexInformer, settingsMgr *settings.SettingsManager, server *metrics.MetricsServer) cache.LiveStateCache,
|
||||
) ([]appReconcileResult, error) {
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
cmdutil "github.com/argoproj/argo-cd/v2/cmd/util"
|
||||
"github.com/argoproj/argo-cd/v2/common"
|
||||
"github.com/argoproj/argo-cd/v2/controller/sharding"
|
||||
argocdclient "github.com/argoproj/argo-cd/v2/pkg/apiclient"
|
||||
argoappv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo"
|
||||
@@ -39,7 +40,7 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/util/text/label"
|
||||
)
|
||||
|
||||
func NewClusterCommand(pathOpts *clientcmd.PathOptions) *cobra.Command {
|
||||
func NewClusterCommand(clientOpts *argocdclient.ClientOptions, pathOpts *clientcmd.PathOptions) *cobra.Command {
|
||||
var command = &cobra.Command{
|
||||
Use: "cluster",
|
||||
Short: "Manage clusters configuration",
|
||||
@@ -50,8 +51,8 @@ func NewClusterCommand(pathOpts *clientcmd.PathOptions) *cobra.Command {
|
||||
|
||||
command.AddCommand(NewClusterConfig())
|
||||
command.AddCommand(NewGenClusterConfigCommand(pathOpts))
|
||||
command.AddCommand(NewClusterStatsCommand())
|
||||
command.AddCommand(NewClusterShardsCommand())
|
||||
command.AddCommand(NewClusterStatsCommand(clientOpts))
|
||||
command.AddCommand(NewClusterShardsCommand(clientOpts))
|
||||
namespacesCommand := NewClusterNamespacesCommand()
|
||||
namespacesCommand.AddCommand(NewClusterEnableNamespacedMode())
|
||||
namespacesCommand.AddCommand(NewClusterDisableNamespacedMode())
|
||||
@@ -68,7 +69,7 @@ type ClusterWithInfo struct {
|
||||
Namespaces []string
|
||||
}
|
||||
|
||||
func loadClusters(ctx context.Context, kubeClient *kubernetes.Clientset, appClient *versioned.Clientset, replicas int, namespace string, portForwardRedis bool, cacheSrc func() (*appstatecache.Cache, error), shard int) ([]ClusterWithInfo, error) {
|
||||
func loadClusters(ctx context.Context, kubeClient *kubernetes.Clientset, appClient *versioned.Clientset, replicas int, namespace string, portForwardRedis bool, cacheSrc func() (*appstatecache.Cache, error), shard int, redisName string, redisHaProxyName string) ([]ClusterWithInfo, error) {
|
||||
settingsMgr := settings.NewSettingsManager(ctx, kubeClient, namespace)
|
||||
|
||||
argoDB := db.NewDB(namespace, settingsMgr, kubeClient)
|
||||
@@ -79,8 +80,10 @@ func loadClusters(ctx context.Context, kubeClient *kubernetes.Clientset, appClie
|
||||
var cache *appstatecache.Cache
|
||||
if portForwardRedis {
|
||||
overrides := clientcmd.ConfigOverrides{}
|
||||
redisHaProxyPodLabelSelector := common.LabelKeyAppName + "=" + redisHaProxyName
|
||||
redisPodLabelSelector := common.LabelKeyAppName + "=" + redisName
|
||||
port, err := kubeutil.PortForward(6379, namespace, &overrides,
|
||||
"app.kubernetes.io/name=argocd-redis-ha-haproxy", "app.kubernetes.io/name=argocd-redis")
|
||||
redisHaProxyPodLabelSelector, redisPodLabelSelector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -146,16 +149,17 @@ func loadClusters(ctx context.Context, kubeClient *kubernetes.Clientset, appClie
|
||||
return clusters, nil
|
||||
}
|
||||
|
||||
func getControllerReplicas(ctx context.Context, kubeClient *kubernetes.Clientset, namespace string) (int, error) {
|
||||
func getControllerReplicas(ctx context.Context, kubeClient *kubernetes.Clientset, namespace string, appControllerName string) (int, error) {
|
||||
appControllerPodLabelSelector := common.LabelKeyAppName + "=" + appControllerName
|
||||
controllerPods, err := kubeClient.CoreV1().Pods(namespace).List(ctx, v1.ListOptions{
|
||||
LabelSelector: "app.kubernetes.io/name=argocd-application-controller"})
|
||||
LabelSelector: appControllerPodLabelSelector})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return len(controllerPods.Items), nil
|
||||
}
|
||||
|
||||
func NewClusterShardsCommand() *cobra.Command {
|
||||
func NewClusterShardsCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
var (
|
||||
shard int
|
||||
replicas int
|
||||
@@ -179,14 +183,14 @@ func NewClusterShardsCommand() *cobra.Command {
|
||||
appClient := versioned.NewForConfigOrDie(clientCfg)
|
||||
|
||||
if replicas == 0 {
|
||||
replicas, err = getControllerReplicas(ctx, kubeClient, namespace)
|
||||
replicas, err = getControllerReplicas(ctx, kubeClient, namespace, clientOpts.AppControllerName)
|
||||
errors.CheckError(err)
|
||||
}
|
||||
if replicas == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
clusters, err := loadClusters(ctx, kubeClient, appClient, replicas, namespace, portForwardRedis, cacheSrc, shard)
|
||||
clusters, err := loadClusters(ctx, kubeClient, appClient, replicas, namespace, portForwardRedis, cacheSrc, shard, clientOpts.RedisName, clientOpts.RedisHaProxyName)
|
||||
errors.CheckError(err)
|
||||
if len(clusters) == 0 {
|
||||
return
|
||||
@@ -433,7 +437,7 @@ func NewClusterDisableNamespacedMode() *cobra.Command {
|
||||
return &command
|
||||
}
|
||||
|
||||
func NewClusterStatsCommand() *cobra.Command {
|
||||
func NewClusterStatsCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
var (
|
||||
shard int
|
||||
replicas int
|
||||
@@ -457,10 +461,10 @@ func NewClusterStatsCommand() *cobra.Command {
|
||||
kubeClient := kubernetes.NewForConfigOrDie(clientCfg)
|
||||
appClient := versioned.NewForConfigOrDie(clientCfg)
|
||||
if replicas == 0 {
|
||||
replicas, err = getControllerReplicas(ctx, kubeClient, namespace)
|
||||
replicas, err = getControllerReplicas(ctx, kubeClient, namespace, clientOpts.AppControllerName)
|
||||
errors.CheckError(err)
|
||||
}
|
||||
clusters, err := loadClusters(ctx, kubeClient, appClient, replicas, namespace, portForwardRedis, cacheSrc, shard)
|
||||
clusters, err := loadClusters(ctx, kubeClient, appClient, replicas, namespace, portForwardRedis, cacheSrc, shard, clientOpts.RedisName, clientOpts.RedisHaProxyName)
|
||||
errors.CheckError(err)
|
||||
|
||||
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
|
||||
|
||||
@@ -568,7 +568,7 @@ argocd admin settings resource-overrides action list /tmp/deploy.yaml --argocd-c
|
||||
})
|
||||
|
||||
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
|
||||
_, _ = fmt.Fprintf(w, "NAME\tENABLED\n")
|
||||
_, _ = fmt.Fprintf(w, "NAME\tDISABLED\n")
|
||||
for _, action := range availableActions {
|
||||
_, _ = fmt.Fprintf(w, "%s\t%s\n", action.Name, strconv.FormatBool(action.Disabled))
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -373,6 +374,9 @@ func resolveRBACResourceName(name string) string {
|
||||
|
||||
// isValidRBACAction checks whether a given action is a valid RBAC action
|
||||
func isValidRBACAction(action string) bool {
|
||||
if strings.HasPrefix(action, rbacpolicy.ActionAction+"/") {
|
||||
return true
|
||||
}
|
||||
_, ok := validRBACActions[action]
|
||||
return ok
|
||||
}
|
||||
|
||||
@@ -27,6 +27,11 @@ func Test_isValidRBACAction(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func Test_isValidRBACAction_ActionAction(t *testing.T) {
|
||||
ok := isValidRBACAction("action/apps/Deployment/restart")
|
||||
assert.True(t, ok)
|
||||
}
|
||||
|
||||
func Test_isValidRBACResource(t *testing.T) {
|
||||
for k := range validRBACResources {
|
||||
t.Run(k, func(t *testing.T) {
|
||||
|
||||
@@ -393,7 +393,7 @@ func TestResourceOverrideAction(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
assert.Contains(t, out, `NAME ENABLED
|
||||
assert.Contains(t, out, `NAME DISABLED
|
||||
restart false
|
||||
resume false
|
||||
`)
|
||||
@@ -440,7 +440,7 @@ resume false
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.Contains(t, out, "NAME")
|
||||
assert.Contains(t, out, "ENABLED")
|
||||
assert.Contains(t, out, "DISABLED")
|
||||
assert.Contains(t, out, "create-a-job")
|
||||
assert.Contains(t, out, "false")
|
||||
})
|
||||
|
||||
@@ -259,6 +259,52 @@ func hasAppChanged(appReq, appRes *argoappv1.Application, upsert bool) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func parentChildDetails(appIf application.ApplicationServiceClient, ctx context.Context, appName string, appNs string) (map[string]argoappv1.ResourceNode, map[string][]string, map[string]struct{}) {
|
||||
|
||||
mapUidToNode := make(map[string]argoappv1.ResourceNode)
|
||||
mapParentToChild := make(map[string][]string)
|
||||
parentNode := make(map[string]struct{})
|
||||
|
||||
resourceTree, err := appIf.ResourceTree(ctx, &application.ResourcesQuery{Name: &appName, AppNamespace: &appNs, ApplicationName: &appName})
|
||||
errors.CheckError(err)
|
||||
|
||||
for _, node := range resourceTree.Nodes {
|
||||
mapUidToNode[node.UID] = node
|
||||
|
||||
if len(node.ParentRefs) > 0 {
|
||||
_, ok := mapParentToChild[node.ParentRefs[0].UID]
|
||||
if !ok {
|
||||
var temp []string
|
||||
mapParentToChild[node.ParentRefs[0].UID] = temp
|
||||
}
|
||||
mapParentToChild[node.ParentRefs[0].UID] = append(mapParentToChild[node.ParentRefs[0].UID], node.UID)
|
||||
} else {
|
||||
parentNode[node.UID] = struct{}{}
|
||||
}
|
||||
}
|
||||
return mapUidToNode, mapParentToChild, parentNode
|
||||
}
|
||||
|
||||
func printHeader(acdClient argocdclient.Client, app *argoappv1.Application, ctx context.Context, windows *argoappv1.SyncWindows, showOperation bool, showParams bool) {
|
||||
aURL := appURL(ctx, acdClient, app.Name)
|
||||
printAppSummaryTable(app, aURL, windows)
|
||||
|
||||
if len(app.Status.Conditions) > 0 {
|
||||
fmt.Println()
|
||||
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
|
||||
printAppConditions(w, app)
|
||||
_ = w.Flush()
|
||||
fmt.Println()
|
||||
}
|
||||
if showOperation && app.Status.OperationState != nil {
|
||||
fmt.Println()
|
||||
printOperationResult(app.Status.OperationState)
|
||||
}
|
||||
if showParams {
|
||||
printParams(app)
|
||||
}
|
||||
}
|
||||
|
||||
// NewApplicationGetCommand returns a new instance of an `argocd app get` command
|
||||
func NewApplicationGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
var (
|
||||
@@ -273,7 +319,6 @@ func NewApplicationGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Com
|
||||
Short: "Get application details",
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
ctx := c.Context()
|
||||
|
||||
if len(args) == 0 {
|
||||
c.HelpFunc()(c, args)
|
||||
os.Exit(1)
|
||||
@@ -283,11 +328,13 @@ func NewApplicationGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Com
|
||||
defer argoio.Close(conn)
|
||||
|
||||
appName, appNs := argo.ParseFromQualifiedName(args[0], "")
|
||||
|
||||
app, err := appIf.Get(ctx, &application.ApplicationQuery{
|
||||
Name: &appName,
|
||||
Refresh: getRefreshType(refresh, hardRefresh),
|
||||
AppNamespace: &appNs,
|
||||
})
|
||||
|
||||
errors.CheckError(err)
|
||||
|
||||
pConn, projIf := headless.NewClientOrDie(clientOpts, c).NewProjectClientOrDie()
|
||||
@@ -302,35 +349,33 @@ func NewApplicationGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Com
|
||||
err := PrintResource(app, output)
|
||||
errors.CheckError(err)
|
||||
case "wide", "":
|
||||
aURL := appURL(ctx, acdClient, app.Name)
|
||||
printAppSummaryTable(app, aURL, windows)
|
||||
|
||||
if len(app.Status.Conditions) > 0 {
|
||||
fmt.Println()
|
||||
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
|
||||
printAppConditions(w, app)
|
||||
_ = w.Flush()
|
||||
fmt.Println()
|
||||
}
|
||||
if showOperation && app.Status.OperationState != nil {
|
||||
fmt.Println()
|
||||
printOperationResult(app.Status.OperationState)
|
||||
}
|
||||
if showParams {
|
||||
printParams(app)
|
||||
}
|
||||
printHeader(acdClient, app, ctx, windows, showOperation, showParams)
|
||||
if len(app.Status.Resources) > 0 {
|
||||
fmt.Println()
|
||||
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
|
||||
printAppResources(w, app)
|
||||
_ = w.Flush()
|
||||
}
|
||||
case "tree":
|
||||
printHeader(acdClient, app, ctx, windows, showOperation, showParams)
|
||||
mapUidToNode, mapParentToChild, parentNode, mapNodeNameToResourceState := resourceParentChild(ctx, acdClient, appName, appNs)
|
||||
if len(mapUidToNode) > 0 {
|
||||
fmt.Println()
|
||||
printTreeView(mapUidToNode, mapParentToChild, parentNode, mapNodeNameToResourceState)
|
||||
}
|
||||
case "tree=detailed":
|
||||
printHeader(acdClient, app, ctx, windows, showOperation, showParams)
|
||||
mapUidToNode, mapParentToChild, parentNode, mapNodeNameToResourceState := resourceParentChild(ctx, acdClient, appName, appNs)
|
||||
if len(mapUidToNode) > 0 {
|
||||
fmt.Println()
|
||||
printTreeViewDetailed(mapUidToNode, mapParentToChild, parentNode, mapNodeNameToResourceState)
|
||||
}
|
||||
default:
|
||||
errors.CheckError(fmt.Errorf("unknown output format: %s", output))
|
||||
}
|
||||
},
|
||||
}
|
||||
command.Flags().StringVarP(&output, "output", "o", "wide", "Output format. One of: json|yaml|wide")
|
||||
command.Flags().StringVarP(&output, "output", "o", "wide", "Output format. One of: json|yaml|wide|tree")
|
||||
command.Flags().BoolVar(&showOperation, "show-operation", false, "Show application operation")
|
||||
command.Flags().BoolVar(&showParams, "show-params", false, "Show application parameters and overrides")
|
||||
command.Flags().BoolVar(&refresh, "refresh", false, "Refresh application data when retrieving")
|
||||
@@ -420,12 +465,12 @@ func NewApplicationLogsCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
command.Flags().StringVar(&kind, "kind", "", "Resource kind")
|
||||
command.Flags().StringVar(&namespace, "namespace", "", "Resource namespace")
|
||||
command.Flags().StringVar(&resourceName, "name", "", "Resource name")
|
||||
command.Flags().BoolVar(&follow, "follow", false, "Specify if the logs should be streamed")
|
||||
command.Flags().BoolVarP(&follow, "follow", "f", false, "Specify if the logs should be streamed")
|
||||
command.Flags().Int64Var(&tail, "tail", 0, "The number of lines from the end of the logs to show")
|
||||
command.Flags().Int64Var(&sinceSeconds, "since-seconds", 0, "A relative time in seconds before the current time from which to show logs")
|
||||
command.Flags().StringVar(&untilTime, "until-time", "", "Show logs until this time")
|
||||
command.Flags().StringVar(&filter, "filter", "", "Show logs contain this string")
|
||||
command.Flags().StringVar(&container, "container", "", "Optional container name")
|
||||
command.Flags().StringVarP(&container, "container", "c", "", "Optional container name")
|
||||
command.Flags().BoolVarP(&previous, "previous", "p", false, "Specify if the previously terminated container logs should be returned")
|
||||
|
||||
return command
|
||||
@@ -1027,7 +1072,7 @@ func findandPrintDiff(ctx context.Context, app *argoappv1.Application, proj *arg
|
||||
items := make([]objKeyLiveTarget, 0)
|
||||
if diffOptions.local != "" {
|
||||
localObjs := groupObjsByKey(getLocalObjects(ctx, app, proj, diffOptions.local, diffOptions.localRepoRoot, argoSettings.AppLabelKey, diffOptions.cluster.Info.ServerVersion, diffOptions.cluster.Info.APIVersions, argoSettings.KustomizeOptions, argoSettings.TrackingMethod), liveObjs, app.Spec.Destination.Namespace)
|
||||
items = groupObjsForDiff(resources, localObjs, items, argoSettings, app.InstanceName(argoSettings.ControllerNamespace))
|
||||
items = groupObjsForDiff(resources, localObjs, items, argoSettings, app.InstanceName(argoSettings.ControllerNamespace), app.Spec.Destination.Namespace)
|
||||
} else if diffOptions.revision != "" {
|
||||
var unstructureds []*unstructured.Unstructured
|
||||
for _, mfst := range diffOptions.res.Manifests {
|
||||
@@ -1036,7 +1081,7 @@ func findandPrintDiff(ctx context.Context, app *argoappv1.Application, proj *arg
|
||||
unstructureds = append(unstructureds, obj)
|
||||
}
|
||||
groupedObjs := groupObjsByKey(unstructureds, liveObjs, app.Spec.Destination.Namespace)
|
||||
items = groupObjsForDiff(resources, groupedObjs, items, argoSettings, app.InstanceName(argoSettings.ControllerNamespace))
|
||||
items = groupObjsForDiff(resources, groupedObjs, items, argoSettings, app.InstanceName(argoSettings.ControllerNamespace), app.Spec.Destination.Namespace)
|
||||
} else if diffOptions.serversideRes != nil {
|
||||
var unstructureds []*unstructured.Unstructured
|
||||
for _, mfst := range diffOptions.serversideRes.Manifests {
|
||||
@@ -1045,7 +1090,7 @@ func findandPrintDiff(ctx context.Context, app *argoappv1.Application, proj *arg
|
||||
unstructureds = append(unstructureds, obj)
|
||||
}
|
||||
groupedObjs := groupObjsByKey(unstructureds, liveObjs, app.Spec.Destination.Namespace)
|
||||
items = groupObjsForDiff(resources, groupedObjs, items, argoSettings, app.InstanceName(argoSettings.ControllerNamespace))
|
||||
items = groupObjsForDiff(resources, groupedObjs, items, argoSettings, app.InstanceName(argoSettings.ControllerNamespace), app.Spec.Destination.Namespace)
|
||||
} else {
|
||||
for i := range resources.Items {
|
||||
res := resources.Items[i]
|
||||
@@ -1105,7 +1150,7 @@ func findandPrintDiff(ctx context.Context, app *argoappv1.Application, proj *arg
|
||||
return foundDiffs
|
||||
}
|
||||
|
||||
func groupObjsForDiff(resources *application.ManagedResourcesResponse, objs map[kube.ResourceKey]*unstructured.Unstructured, items []objKeyLiveTarget, argoSettings *settings.Settings, appName string) []objKeyLiveTarget {
|
||||
func groupObjsForDiff(resources *application.ManagedResourcesResponse, objs map[kube.ResourceKey]*unstructured.Unstructured, items []objKeyLiveTarget, argoSettings *settings.Settings, appName, namespace string) []objKeyLiveTarget {
|
||||
resourceTracking := argo.NewResourceTracking()
|
||||
for _, res := range resources.Items {
|
||||
var live = &unstructured.Unstructured{}
|
||||
@@ -1120,7 +1165,7 @@ func groupObjsForDiff(resources *application.ManagedResourcesResponse, objs map[
|
||||
}
|
||||
if local, ok := objs[key]; ok || live != nil {
|
||||
if local != nil && !kube.IsCRD(local) {
|
||||
err = resourceTracking.SetAppInstance(local, argoSettings.AppLabelKey, appName, "", argoappv1.TrackingMethod(argoSettings.GetTrackingMethod()))
|
||||
err = resourceTracking.SetAppInstance(local, argoSettings.AppLabelKey, appName, namespace, argoappv1.TrackingMethod(argoSettings.GetTrackingMethod()))
|
||||
errors.CheckError(err)
|
||||
}
|
||||
|
||||
@@ -1449,6 +1494,7 @@ func NewApplicationWaitCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
timeout uint
|
||||
selector string
|
||||
resources []string
|
||||
output string
|
||||
)
|
||||
var command = &cobra.Command{
|
||||
Use: "wait [APPNAME.. | -l selector]",
|
||||
@@ -1497,7 +1543,7 @@ func NewApplicationWaitCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
}
|
||||
}
|
||||
for _, appName := range appNames {
|
||||
_, _, err := waitOnApplicationStatus(ctx, acdClient, appName, timeout, watch, selectedResources)
|
||||
_, _, err := waitOnApplicationStatus(ctx, acdClient, appName, timeout, watch, selectedResources, output)
|
||||
errors.CheckError(err)
|
||||
}
|
||||
},
|
||||
@@ -1510,6 +1556,7 @@ func NewApplicationWaitCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
command.Flags().StringArrayVar(&resources, "resource", []string{}, fmt.Sprintf("Sync only specific resources as GROUP%[1]sKIND%[1]sNAME or %[2]sGROUP%[1]sKIND%[1]sNAME. Fields may be blank and '*' can be used. This option may be specified repeatedly", resourceFieldDelimiter, resourceExcludeIndicator))
|
||||
command.Flags().BoolVar(&watch.operation, "operation", false, "Wait for pending operations")
|
||||
command.Flags().UintVar(&timeout, "timeout", defaultCheckTimeoutSeconds, "Time out after this many seconds")
|
||||
command.Flags().StringVarP(&output, "output", "o", "wide", "Output format. One of: json|yaml|wide|tree|tree=detailed")
|
||||
return command
|
||||
}
|
||||
|
||||
@@ -1521,6 +1568,24 @@ func printAppResources(w io.Writer, app *argoappv1.Application) {
|
||||
}
|
||||
}
|
||||
|
||||
func printTreeView(nodeMapping map[string]argoappv1.ResourceNode, parentChildMapping map[string][]string, parentNodes map[string]struct{}, mapNodeNameToResourceState map[string]*resourceState) {
|
||||
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
|
||||
_, _ = fmt.Fprintf(w, "KIND/NAME\tSTATUS\tHEALTH\tMESSAGE\n")
|
||||
for uid := range parentNodes {
|
||||
treeViewAppGet("", nodeMapping, parentChildMapping, nodeMapping[uid], mapNodeNameToResourceState, w)
|
||||
}
|
||||
_ = w.Flush()
|
||||
}
|
||||
|
||||
func printTreeViewDetailed(nodeMapping map[string]argoappv1.ResourceNode, parentChildMapping map[string][]string, parentNodes map[string]struct{}, mapNodeNameToResourceState map[string]*resourceState) {
|
||||
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
|
||||
fmt.Fprintf(w, "KIND/NAME\tSTATUS\tHEALTH\tAGE\tMESSAGE\tREASON\n")
|
||||
for uid := range parentNodes {
|
||||
detailedTreeViewAppGet("", nodeMapping, parentChildMapping, nodeMapping[uid], mapNodeNameToResourceState, w)
|
||||
}
|
||||
_ = w.Flush()
|
||||
}
|
||||
|
||||
// NewApplicationSyncCommand returns a new instance of an `argocd app sync` command
|
||||
func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
var (
|
||||
@@ -1535,6 +1600,7 @@ func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
force bool
|
||||
replace bool
|
||||
serverSideApply bool
|
||||
applyOutOfSyncOnly bool
|
||||
async bool
|
||||
retryLimit int64
|
||||
retryBackoffDuration time.Duration
|
||||
@@ -1546,6 +1612,7 @@ func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
diffChanges bool
|
||||
diffChangesConfirm bool
|
||||
projects []string
|
||||
output string
|
||||
)
|
||||
var command = &cobra.Command{
|
||||
Use: "sync [APPNAME... | -l selector | --project project-name]",
|
||||
@@ -1574,17 +1641,13 @@ func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
argocd app sync my-app --resource argoproj.io:Rollout:my-namespace/my-rollout`,
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
ctx := c.Context()
|
||||
|
||||
if len(args) == 0 && selector == "" && len(projects) == 0 {
|
||||
|
||||
c.HelpFunc()(c, args)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if len(args) > 1 && selector != "" {
|
||||
log.Fatal("Cannot use selector option when application name(s) passed as argument(s)")
|
||||
}
|
||||
|
||||
acdClient := headless.NewClientOrDie(clientOpts, c)
|
||||
conn, appIf := acdClient.NewApplicationClientOrDie()
|
||||
defer argoio.Close(conn)
|
||||
@@ -1629,6 +1692,8 @@ func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
fmt.Println("The name of the app is ", appName)
|
||||
|
||||
for _, mfst := range res.Manifests {
|
||||
obj, err := argoappv1.UnmarshalToUnstructured(mfst)
|
||||
errors.CheckError(err)
|
||||
@@ -1661,8 +1726,15 @@ func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
errors.CheckError(err)
|
||||
|
||||
if app.Spec.HasMultipleSources() {
|
||||
log.Fatal("argocd cli does not work on multi-source app")
|
||||
return
|
||||
if revision != "" {
|
||||
log.Fatal("argocd cli does not work on multi-source app with --revision flag")
|
||||
return
|
||||
}
|
||||
|
||||
if local != "" {
|
||||
log.Fatal("argocd cli does not work on multi-source app with --local flag")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// filters out only those resources that needs to be synced
|
||||
@@ -1707,6 +1779,9 @@ func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
if serverSideApply {
|
||||
items = append(items, common.SyncOptionServerSideApply)
|
||||
}
|
||||
if applyOutOfSyncOnly {
|
||||
items = append(items, common.SyncOptionApplyOutOfSyncOnly)
|
||||
}
|
||||
|
||||
if len(items) == 0 {
|
||||
// for prevent send even empty array if not need
|
||||
@@ -1778,7 +1853,7 @@ func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
errors.CheckError(err)
|
||||
|
||||
if !async {
|
||||
app, opState, err := waitOnApplicationStatus(ctx, acdClient, appQualifiedName, timeout, watchOpts{operation: true}, selectedResources)
|
||||
app, opState, err := waitOnApplicationStatus(ctx, acdClient, appQualifiedName, timeout, watchOpts{operation: true}, selectedResources, output)
|
||||
errors.CheckError(err)
|
||||
|
||||
if !dryRun {
|
||||
@@ -1811,6 +1886,7 @@ func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
command.Flags().BoolVar(&force, "force", false, "Use a force apply")
|
||||
command.Flags().BoolVar(&replace, "replace", false, "Use a kubectl create/replace instead apply")
|
||||
command.Flags().BoolVar(&serverSideApply, "server-side", false, "Use server-side apply while syncing the application")
|
||||
command.Flags().BoolVar(&applyOutOfSyncOnly, "apply-out-of-sync-only", false, "Sync only out-of-sync resources")
|
||||
command.Flags().BoolVar(&async, "async", false, "Do not wait for application to sync before continuing")
|
||||
command.Flags().StringVar(&local, "local", "", "Path to a local directory. When this flag is present no git queries will be made")
|
||||
command.Flags().StringVar(&localRepoRoot, "local-repo-root", "/", "Path to the repository root. Used together with --local allows setting the repository root")
|
||||
@@ -1818,6 +1894,7 @@ func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
command.Flags().BoolVar(&diffChangesConfirm, "assumeYes", false, "Assume yes as answer for all user queries or prompts")
|
||||
command.Flags().BoolVar(&diffChanges, "preview-changes", false, "Preview difference against the target and live state before syncing app and wait for user confirmation")
|
||||
command.Flags().StringArrayVar(&projects, "project", []string{}, "Sync apps that belong to the specified projects. This option may be specified repeatedly.")
|
||||
command.Flags().StringVarP(&output, "output", "o", "wide", "Output format. One of: json|yaml|wide|tree|tree=detailed")
|
||||
return command
|
||||
}
|
||||
|
||||
@@ -1998,12 +2075,26 @@ func checkResourceStatus(watch watchOpts, healthStatus string, syncStatus string
|
||||
return synced && healthCheckPassed && operational
|
||||
}
|
||||
|
||||
// resourceParentChild gets the latest state of the app and the latest state of the app's resource tree and then
|
||||
// constructs the necessary data structures to print the app as a tree.
|
||||
func resourceParentChild(ctx context.Context, acdClient argocdclient.Client, appName string, appNs string) (map[string]argoappv1.ResourceNode, map[string][]string, map[string]struct{}, map[string]*resourceState) {
|
||||
_, appIf := acdClient.NewApplicationClientOrDie()
|
||||
mapUidToNode, mapParentToChild, parentNode := parentChildDetails(appIf, ctx, appName, appNs)
|
||||
app, err := appIf.Get(ctx, &application.ApplicationQuery{Name: pointer.String(appName), AppNamespace: pointer.String(appNs)})
|
||||
errors.CheckError(err)
|
||||
mapNodeNameToResourceState := make(map[string]*resourceState)
|
||||
for _, res := range getResourceStates(app, nil) {
|
||||
mapNodeNameToResourceState[res.Kind+"/"+res.Name] = res
|
||||
}
|
||||
return mapUidToNode, mapParentToChild, parentNode, mapNodeNameToResourceState
|
||||
}
|
||||
|
||||
const waitFormatString = "%s\t%5s\t%10s\t%10s\t%20s\t%8s\t%7s\t%10s\t%s\n"
|
||||
|
||||
// waitOnApplicationStatus watches an application and blocks until either the desired watch conditions
|
||||
// are fulfiled or we reach the timeout. Returns the app once desired conditions have been filled.
|
||||
// are fulfilled or we reach the timeout. Returns the app once desired conditions have been filled.
|
||||
// Additionally return the operationState at time of fulfilment (which may be different than returned app).
|
||||
func waitOnApplicationStatus(ctx context.Context, acdClient argocdclient.Client, appName string, timeout uint, watch watchOpts, selectedResources []*argoappv1.SyncOperationResource) (*argoappv1.Application, *argoappv1.OperationState, error) {
|
||||
func waitOnApplicationStatus(ctx context.Context, acdClient argocdclient.Client, appName string, timeout uint, watch watchOpts, selectedResources []*argoappv1.SyncOperationResource, output string) (*argoappv1.Application, *argoappv1.OperationState, error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
@@ -2035,18 +2126,49 @@ func waitOnApplicationStatus(ctx context.Context, acdClient argocdclient.Client,
|
||||
printOperationResult(app.Status.OperationState)
|
||||
}
|
||||
|
||||
if len(app.Status.Resources) > 0 {
|
||||
fmt.Println()
|
||||
w := tabwriter.NewWriter(os.Stdout, 5, 0, 2, ' ', 0)
|
||||
printAppResources(w, app)
|
||||
_ = w.Flush()
|
||||
switch output {
|
||||
case "yaml", "json":
|
||||
err := PrintResource(app, output)
|
||||
errors.CheckError(err)
|
||||
case "wide", "":
|
||||
if len(app.Status.Resources) > 0 {
|
||||
fmt.Println()
|
||||
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
|
||||
printAppResources(w, app)
|
||||
_ = w.Flush()
|
||||
}
|
||||
case "tree":
|
||||
mapUidToNode, mapParentToChild, parentNode, mapNodeNameToResourceState := resourceParentChild(ctx, acdClient, appName, appNs)
|
||||
if len(mapUidToNode) > 0 {
|
||||
fmt.Println()
|
||||
printTreeView(mapUidToNode, mapParentToChild, parentNode, mapNodeNameToResourceState)
|
||||
}
|
||||
case "tree=detailed":
|
||||
mapUidToNode, mapParentToChild, parentNode, mapNodeNameToResourceState := resourceParentChild(ctx, acdClient, appName, appNs)
|
||||
if len(mapUidToNode) > 0 {
|
||||
fmt.Println()
|
||||
printTreeViewDetailed(mapUidToNode, mapParentToChild, parentNode, mapNodeNameToResourceState)
|
||||
}
|
||||
default:
|
||||
errors.CheckError(fmt.Errorf("unknown output format: %s", output))
|
||||
}
|
||||
return app
|
||||
}
|
||||
|
||||
if timeout != 0 {
|
||||
time.AfterFunc(time.Duration(timeout)*time.Second, func() {
|
||||
_, appClient := acdClient.NewApplicationClientOrDie()
|
||||
app, err := appClient.Get(ctx, &application.ApplicationQuery{
|
||||
Name: &appRealName,
|
||||
AppNamespace: &appNs,
|
||||
})
|
||||
errors.CheckError(err)
|
||||
fmt.Println()
|
||||
fmt.Println("This is the state of the app after `wait` timed out:")
|
||||
printFinalStatus(app)
|
||||
cancel()
|
||||
fmt.Println()
|
||||
fmt.Println("The command timed out waiting for the conditions to be met.")
|
||||
})
|
||||
}
|
||||
|
||||
@@ -2254,13 +2376,13 @@ func NewApplicationRollbackCommand(clientOpts *argocdclient.ClientOptions) *cobr
|
||||
var (
|
||||
prune bool
|
||||
timeout uint
|
||||
output string
|
||||
)
|
||||
var command = &cobra.Command{
|
||||
Use: "rollback APPNAME [ID]",
|
||||
Short: "Rollback application to a previous deployed version by History ID, omitted will Rollback to the previous version",
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
ctx := c.Context()
|
||||
|
||||
if len(args) == 0 {
|
||||
c.HelpFunc()(c, args)
|
||||
os.Exit(1)
|
||||
@@ -2294,12 +2416,13 @@ func NewApplicationRollbackCommand(clientOpts *argocdclient.ClientOptions) *cobr
|
||||
|
||||
_, _, err = waitOnApplicationStatus(ctx, acdClient, app.QualifiedName(), timeout, watchOpts{
|
||||
operation: true,
|
||||
}, nil)
|
||||
}, nil, output)
|
||||
errors.CheckError(err)
|
||||
},
|
||||
}
|
||||
command.Flags().BoolVar(&prune, "prune", false, "Allow deleting unexpected resources")
|
||||
command.Flags().UintVar(&timeout, "timeout", defaultCheckTimeoutSeconds, "Time out after this many seconds")
|
||||
command.Flags().StringVarP(&output, "output", "o", "wide", "Output format. One of: json|yaml|wide|tree|tree=detailed")
|
||||
return command
|
||||
}
|
||||
|
||||
|
||||
@@ -1,13 +1,93 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
"text/tabwriter"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
)
|
||||
|
||||
func TestPrintTreeViewAppResources(t *testing.T) {
|
||||
var nodes [3]v1alpha1.ResourceNode
|
||||
nodes[0].ResourceRef = v1alpha1.ResourceRef{Group: "", Version: "v1", Kind: "Pod", Namespace: "sandbox-rollout-numalogic-demo", Name: "numalogic-rollout-demo-5dcd5457d5-6trpt", UID: "92c3a5fe-d13e-4ae2-b8ec-c10dd3543b28"}
|
||||
nodes[0].ParentRefs = []v1alpha1.ResourceRef{{Group: "apps", Version: "v1", Kind: "ReplicaSet", Namespace: "sandbox-rollout-numalogic-demo", Name: "numalogic-rollout-demo-5dcd5457d5", UID: "75c30dce-1b66-414f-a86c-573a74be0f40"}}
|
||||
nodes[1].ResourceRef = v1alpha1.ResourceRef{Group: "apps", Version: "v1", Kind: "ReplicaSet", Namespace: "sandbox-rollout-numalogic-demo", Name: "numalogic-rollout-demo-5dcd5457d5", UID: "75c30dce-1b66-414f-a86c-573a74be0f40"}
|
||||
nodes[1].ParentRefs = []v1alpha1.ResourceRef{{Group: "argoproj.io", Version: "", Kind: "Rollout", Namespace: "sandbox-rollout-numalogic-demo", Name: "numalogic-rollout-demo", UID: "87f3aab0-f634-4b2c-959a-7ddd30675ed0"}}
|
||||
nodes[2].ResourceRef = v1alpha1.ResourceRef{Group: "argoproj.io", Version: "", Kind: "Rollout", Namespace: "sandbox-rollout-numalogic-demo", Name: "numalogic-rollout-demo", UID: "87f3aab0-f634-4b2c-959a-7ddd30675ed0"}
|
||||
var nodeMapping = make(map[string]v1alpha1.ResourceNode)
|
||||
var mapParentToChild = make(map[string][]string)
|
||||
var parentNode = make(map[string]struct{})
|
||||
for _, node := range nodes {
|
||||
nodeMapping[node.UID] = node
|
||||
if len(node.ParentRefs) > 0 {
|
||||
_, ok := mapParentToChild[node.ParentRefs[0].UID]
|
||||
if !ok {
|
||||
var temp []string
|
||||
mapParentToChild[node.ParentRefs[0].UID] = temp
|
||||
}
|
||||
mapParentToChild[node.ParentRefs[0].UID] = append(mapParentToChild[node.ParentRefs[0].UID], node.UID)
|
||||
} else {
|
||||
parentNode[node.UID] = struct{}{}
|
||||
}
|
||||
}
|
||||
buf := &bytes.Buffer{}
|
||||
w := tabwriter.NewWriter(buf, 0, 0, 2, ' ', 0)
|
||||
|
||||
printTreeViewAppResourcesNotOrphaned(nodeMapping, mapParentToChild, parentNode, false, false, w)
|
||||
if err := w.Flush(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
output := buf.String()
|
||||
|
||||
assert.Contains(t, output, "Rollout")
|
||||
assert.Contains(t, output, "argoproj.io")
|
||||
}
|
||||
|
||||
func TestPrintTreeViewDetailedAppResources(t *testing.T) {
|
||||
var nodes [3]v1alpha1.ResourceNode
|
||||
nodes[0].ResourceRef = v1alpha1.ResourceRef{Group: "", Version: "v1", Kind: "Pod", Namespace: "sandbox-rollout-numalogic-demo", Name: "numalogic-rollout-demo-5dcd5457d5-6trpt", UID: "92c3a5fe-d13e-4ae2-b8ec-c10dd3543b28"}
|
||||
nodes[0].ParentRefs = []v1alpha1.ResourceRef{{Group: "apps", Version: "v1", Kind: "ReplicaSet", Namespace: "sandbox-rollout-numalogic-demo", Name: "numalogic-rollout-demo-5dcd5457d5", UID: "75c30dce-1b66-414f-a86c-573a74be0f40"}}
|
||||
nodes[1].ResourceRef = v1alpha1.ResourceRef{Group: "apps", Version: "v1", Kind: "ReplicaSet", Namespace: "sandbox-rollout-numalogic-demo", Name: "numalogic-rollout-demo-5dcd5457d5", UID: "75c30dce-1b66-414f-a86c-573a74be0f40"}
|
||||
nodes[1].ParentRefs = []v1alpha1.ResourceRef{{Group: "argoproj.io", Version: "", Kind: "Rollout", Namespace: "sandbox-rollout-numalogic-demo", Name: "numalogic-rollout-demo", UID: "87f3aab0-f634-4b2c-959a-7ddd30675ed0"}}
|
||||
nodes[2].ResourceRef = v1alpha1.ResourceRef{Group: "argoproj.io", Version: "", Kind: "Rollout", Namespace: "sandbox-rollout-numalogic-demo", Name: "numalogic-rollout-demo", UID: "87f3aab0-f634-4b2c-959a-7ddd30675ed0"}
|
||||
nodes[2].Health = &v1alpha1.HealthStatus{
|
||||
Status: "Degraded",
|
||||
Message: "Readiness Gate failed",
|
||||
}
|
||||
|
||||
var nodeMapping = make(map[string]v1alpha1.ResourceNode)
|
||||
var mapParentToChild = make(map[string][]string)
|
||||
var parentNode = make(map[string]struct{})
|
||||
for _, node := range nodes {
|
||||
nodeMapping[node.UID] = node
|
||||
if len(node.ParentRefs) > 0 {
|
||||
_, ok := mapParentToChild[node.ParentRefs[0].UID]
|
||||
if !ok {
|
||||
var temp []string
|
||||
mapParentToChild[node.ParentRefs[0].UID] = temp
|
||||
}
|
||||
mapParentToChild[node.ParentRefs[0].UID] = append(mapParentToChild[node.ParentRefs[0].UID], node.UID)
|
||||
} else {
|
||||
parentNode[node.UID] = struct{}{}
|
||||
}
|
||||
}
|
||||
buf := &bytes.Buffer{}
|
||||
w := tabwriter.NewWriter(buf, 0, 0, 2, ' ', 0)
|
||||
|
||||
printDetailedTreeViewAppResourcesNotOrphaned(nodeMapping, mapParentToChild, parentNode, false, false, w)
|
||||
if err := w.Flush(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
output := buf.String()
|
||||
|
||||
assert.Contains(t, output, "Rollout")
|
||||
assert.Contains(t, output, "Degraded")
|
||||
assert.Contains(t, output, "Readiness Gate failed")
|
||||
}
|
||||
|
||||
func TestPrintResourcesTree(t *testing.T) {
|
||||
tree := v1alpha1.ApplicationTree{
|
||||
Nodes: []v1alpha1.ResourceNode{
|
||||
@@ -32,7 +112,7 @@ func TestPrintResourcesTree(t *testing.T) {
|
||||
},
|
||||
}
|
||||
output, _ := captureOutput(func() error {
|
||||
printResources(true, false, &tree)
|
||||
printResources(true, false, &tree, "")
|
||||
return nil
|
||||
})
|
||||
|
||||
|
||||
@@ -4,9 +4,8 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/cmd/util"
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -149,34 +148,113 @@ func NewApplicationDeleteResourceCommand(clientOpts *argocdclient.ClientOptions)
|
||||
return command
|
||||
}
|
||||
|
||||
func printResources(listAll bool, orphaned bool, appResourceTree *v1alpha1.ApplicationTree) {
|
||||
func parentChildInfo(nodes []v1alpha1.ResourceNode) (map[string]v1alpha1.ResourceNode, map[string][]string, map[string]struct{}) {
|
||||
mapUidToNode := make(map[string]v1alpha1.ResourceNode)
|
||||
mapParentToChild := make(map[string][]string)
|
||||
parentNode := make(map[string]struct{})
|
||||
|
||||
for _, node := range nodes {
|
||||
mapUidToNode[node.UID] = node
|
||||
|
||||
if len(node.ParentRefs) > 0 {
|
||||
_, ok := mapParentToChild[node.ParentRefs[0].UID]
|
||||
if !ok {
|
||||
var temp []string
|
||||
mapParentToChild[node.ParentRefs[0].UID] = temp
|
||||
}
|
||||
mapParentToChild[node.ParentRefs[0].UID] = append(mapParentToChild[node.ParentRefs[0].UID], node.UID)
|
||||
} else {
|
||||
parentNode[node.UID] = struct{}{}
|
||||
}
|
||||
}
|
||||
return mapUidToNode, mapParentToChild, parentNode
|
||||
}
|
||||
|
||||
func printDetailedTreeViewAppResourcesNotOrphaned(nodeMapping map[string]v1alpha1.ResourceNode, parentChildMapping map[string][]string, parentNodes map[string]struct{}, orphaned bool, listAll bool, w *tabwriter.Writer) {
|
||||
for uid := range parentNodes {
|
||||
detailedTreeViewAppResourcesNotOrphaned("", nodeMapping, parentChildMapping, nodeMapping[uid], w)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func printDetailedTreeViewAppResourcesOrphaned(nodeMapping map[string]v1alpha1.ResourceNode, parentChildMapping map[string][]string, parentNodes map[string]struct{}, orphaned bool, listAll bool, w *tabwriter.Writer) {
|
||||
for uid := range parentNodes {
|
||||
detailedTreeViewAppResourcesOrphaned("", nodeMapping, parentChildMapping, nodeMapping[uid], w)
|
||||
}
|
||||
}
|
||||
|
||||
func printTreeViewAppResourcesNotOrphaned(nodeMapping map[string]v1alpha1.ResourceNode, parentChildMapping map[string][]string, parentNodes map[string]struct{}, orphaned bool, listAll bool, w *tabwriter.Writer) {
|
||||
for uid := range parentNodes {
|
||||
treeViewAppResourcesNotOrphaned("", nodeMapping, parentChildMapping, nodeMapping[uid], w)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func printTreeViewAppResourcesOrphaned(nodeMapping map[string]v1alpha1.ResourceNode, parentChildMapping map[string][]string, parentNodes map[string]struct{}, orphaned bool, listAll bool, w *tabwriter.Writer) {
|
||||
for uid := range parentNodes {
|
||||
treeViewAppResourcesOrphaned("", nodeMapping, parentChildMapping, nodeMapping[uid], w)
|
||||
}
|
||||
}
|
||||
|
||||
func printResources(listAll bool, orphaned bool, appResourceTree *v1alpha1.ApplicationTree, output string) {
|
||||
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
|
||||
headers := []interface{}{"GROUP", "KIND", "NAMESPACE", "NAME", "ORPHANED"}
|
||||
fmtStr := "%s\t%s\t%s\t%s\t%s\n"
|
||||
_, _ = fmt.Fprintf(w, fmtStr, headers...)
|
||||
if !orphaned || listAll {
|
||||
for _, res := range appResourceTree.Nodes {
|
||||
if len(res.ParentRefs) == 0 {
|
||||
_, _ = fmt.Fprintf(w, fmtStr, res.Group, res.Kind, res.Namespace, res.Name, "No")
|
||||
if output == "tree=detailed" {
|
||||
fmt.Fprintf(w, "GROUP\tKIND\tNAMESPACE\tNAME\tORPHANED\tAGE\tHEALTH\tREASON\n")
|
||||
|
||||
if !orphaned || listAll {
|
||||
mapUidToNode, mapParentToChild, parentNode := parentChildInfo(appResourceTree.Nodes)
|
||||
printDetailedTreeViewAppResourcesNotOrphaned(mapUidToNode, mapParentToChild, parentNode, orphaned, listAll, w)
|
||||
}
|
||||
|
||||
if orphaned || listAll {
|
||||
mapUidToNode, mapParentToChild, parentNode := parentChildInfo(appResourceTree.OrphanedNodes)
|
||||
printDetailedTreeViewAppResourcesOrphaned(mapUidToNode, mapParentToChild, parentNode, orphaned, listAll, w)
|
||||
}
|
||||
|
||||
} else if output == "tree" {
|
||||
fmt.Fprintf(w, "GROUP\tKIND\tNAMESPACE\tNAME\tORPHANED\n")
|
||||
|
||||
if !orphaned || listAll {
|
||||
mapUidToNode, mapParentToChild, parentNode := parentChildInfo(appResourceTree.Nodes)
|
||||
printTreeViewAppResourcesNotOrphaned(mapUidToNode, mapParentToChild, parentNode, orphaned, listAll, w)
|
||||
}
|
||||
|
||||
if orphaned || listAll {
|
||||
mapUidToNode, mapParentToChild, parentNode := parentChildInfo(appResourceTree.OrphanedNodes)
|
||||
printTreeViewAppResourcesOrphaned(mapUidToNode, mapParentToChild, parentNode, orphaned, listAll, w)
|
||||
}
|
||||
|
||||
} else {
|
||||
|
||||
headers := []interface{}{"GROUP", "KIND", "NAMESPACE", "NAME", "ORPHANED"}
|
||||
fmtStr := "%s\t%s\t%s\t%s\t%s\n"
|
||||
_, _ = fmt.Fprintf(w, fmtStr, headers...)
|
||||
if !orphaned || listAll {
|
||||
for _, res := range appResourceTree.Nodes {
|
||||
if len(res.ParentRefs) == 0 {
|
||||
_, _ = fmt.Fprintf(w, fmtStr, res.Group, res.Kind, res.Namespace, res.Name, "No")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if orphaned || listAll {
|
||||
for _, res := range appResourceTree.OrphanedNodes {
|
||||
_, _ = fmt.Fprintf(w, fmtStr, res.Group, res.Kind, res.Namespace, res.Name, "Yes")
|
||||
if orphaned || listAll {
|
||||
for _, res := range appResourceTree.OrphanedNodes {
|
||||
_, _ = fmt.Fprintf(w, fmtStr, res.Group, res.Kind, res.Namespace, res.Name, "Yes")
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
_ = w.Flush()
|
||||
|
||||
}
|
||||
|
||||
func NewApplicationListResourcesCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
var orphaned bool
|
||||
var output string
|
||||
var command = &cobra.Command{
|
||||
Use: "resources APPNAME",
|
||||
Short: "List resource of application",
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
ctx := c.Context()
|
||||
|
||||
if len(args) != 1 {
|
||||
c.HelpFunc()(c, args)
|
||||
os.Exit(1)
|
||||
@@ -190,9 +268,10 @@ func NewApplicationListResourcesCommand(clientOpts *argocdclient.ClientOptions)
|
||||
AppNamespace: &appNs,
|
||||
})
|
||||
errors.CheckError(err)
|
||||
printResources(listAll, orphaned, appResourceTree)
|
||||
printResources(listAll, orphaned, appResourceTree, output)
|
||||
},
|
||||
}
|
||||
command.Flags().BoolVar(&orphaned, "orphaned", false, "Lists only orphaned resources")
|
||||
command.Flags().StringVar(&output, "output", "", "Provides the tree view of the resources")
|
||||
return command
|
||||
}
|
||||
|
||||
@@ -115,6 +115,86 @@ func TestFindRevisionHistoryWithoutPassedId(t *testing.T) {
|
||||
|
||||
}
|
||||
|
||||
func TestPrintTreeViewAppGet(t *testing.T) {
|
||||
var nodes [3]v1alpha1.ResourceNode
|
||||
nodes[0].ResourceRef = v1alpha1.ResourceRef{Group: "", Version: "v1", Kind: "Pod", Namespace: "sandbox-rollout-numalogic-demo", Name: "numalogic-rollout-demo-5dcd5457d5-6trpt", UID: "92c3a5fe-d13e-4ae2-b8ec-c10dd3543b28"}
|
||||
nodes[0].ParentRefs = []v1alpha1.ResourceRef{{Group: "apps", Version: "v1", Kind: "ReplicaSet", Namespace: "sandbox-rollout-numalogic-demo", Name: "numalogic-rollout-demo-5dcd5457d5", UID: "75c30dce-1b66-414f-a86c-573a74be0f40"}}
|
||||
nodes[1].ResourceRef = v1alpha1.ResourceRef{Group: "apps", Version: "v1", Kind: "ReplicaSet", Namespace: "sandbox-rollout-numalogic-demo", Name: "numalogic-rollout-demo-5dcd5457d5", UID: "75c30dce-1b66-414f-a86c-573a74be0f40"}
|
||||
nodes[1].ParentRefs = []v1alpha1.ResourceRef{{Group: "argoproj.io", Version: "", Kind: "Rollout", Namespace: "sandbox-rollout-numalogic-demo", Name: "numalogic-rollout-demo", UID: "87f3aab0-f634-4b2c-959a-7ddd30675ed0"}}
|
||||
nodes[2].ResourceRef = v1alpha1.ResourceRef{Group: "argoproj.io", Version: "", Kind: "Rollout", Namespace: "sandbox-rollout-numalogic-demo", Name: "numalogic-rollout-demo", UID: "87f3aab0-f634-4b2c-959a-7ddd30675ed0"}
|
||||
|
||||
var nodeMapping = make(map[string]v1alpha1.ResourceNode)
|
||||
var mapParentToChild = make(map[string][]string)
|
||||
var parentNode = make(map[string]struct{})
|
||||
|
||||
for _, node := range nodes {
|
||||
nodeMapping[node.UID] = node
|
||||
|
||||
if len(node.ParentRefs) > 0 {
|
||||
_, ok := mapParentToChild[node.ParentRefs[0].UID]
|
||||
if !ok {
|
||||
var temp []string
|
||||
mapParentToChild[node.ParentRefs[0].UID] = temp
|
||||
}
|
||||
mapParentToChild[node.ParentRefs[0].UID] = append(mapParentToChild[node.ParentRefs[0].UID], node.UID)
|
||||
} else {
|
||||
parentNode[node.UID] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
output, _ := captureOutput(func() error {
|
||||
printTreeView(nodeMapping, mapParentToChild, parentNode, nil)
|
||||
return nil
|
||||
})
|
||||
|
||||
assert.Contains(t, output, "Pod")
|
||||
assert.Contains(t, output, "ReplicaSet")
|
||||
assert.Contains(t, output, "Rollout")
|
||||
assert.Contains(t, output, "numalogic-rollout-demo-5dcd5457d5-6trpt")
|
||||
}
|
||||
|
||||
func TestPrintTreeViewDetailedAppGet(t *testing.T) {
|
||||
var nodes [3]v1alpha1.ResourceNode
|
||||
nodes[0].ResourceRef = v1alpha1.ResourceRef{Group: "", Version: "v1", Kind: "Pod", Namespace: "sandbox-rollout-numalogic-demo", Name: "numalogic-rollout-demo-5dcd5457d5-6trpt", UID: "92c3a5fe-d13e-4ae2-b8ec-c10dd3543b28"}
|
||||
nodes[0].Health = &v1alpha1.HealthStatus{Status: "Degraded", Message: "Readiness Gate failed"}
|
||||
nodes[0].ParentRefs = []v1alpha1.ResourceRef{{Group: "apps", Version: "v1", Kind: "ReplicaSet", Namespace: "sandbox-rollout-numalogic-demo", Name: "numalogic-rollout-demo-5dcd5457d5", UID: "75c30dce-1b66-414f-a86c-573a74be0f40"}}
|
||||
nodes[1].ResourceRef = v1alpha1.ResourceRef{Group: "apps", Version: "v1", Kind: "ReplicaSet", Namespace: "sandbox-rollout-numalogic-demo", Name: "numalogic-rollout-demo-5dcd5457d5", UID: "75c30dce-1b66-414f-a86c-573a74be0f40"}
|
||||
nodes[1].ParentRefs = []v1alpha1.ResourceRef{{Group: "argoproj.io", Version: "", Kind: "Rollout", Namespace: "sandbox-rollout-numalogic-demo", Name: "numalogic-rollout-demo", UID: "87f3aab0-f634-4b2c-959a-7ddd30675ed0"}}
|
||||
nodes[2].ResourceRef = v1alpha1.ResourceRef{Group: "argoproj.io", Version: "", Kind: "Rollout", Namespace: "sandbox-rollout-numalogic-demo", Name: "numalogic-rollout-demo", UID: "87f3aab0-f634-4b2c-959a-7ddd30675ed0"}
|
||||
|
||||
var nodeMapping = make(map[string]v1alpha1.ResourceNode)
|
||||
var mapParentToChild = make(map[string][]string)
|
||||
var parentNode = make(map[string]struct{})
|
||||
|
||||
for _, node := range nodes {
|
||||
nodeMapping[node.UID] = node
|
||||
|
||||
if len(node.ParentRefs) > 0 {
|
||||
_, ok := mapParentToChild[node.ParentRefs[0].UID]
|
||||
if !ok {
|
||||
var temp []string
|
||||
mapParentToChild[node.ParentRefs[0].UID] = temp
|
||||
}
|
||||
mapParentToChild[node.ParentRefs[0].UID] = append(mapParentToChild[node.ParentRefs[0].UID], node.UID)
|
||||
} else {
|
||||
parentNode[node.UID] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
output, _ := captureOutput(func() error {
|
||||
printTreeViewDetailed(nodeMapping, mapParentToChild, parentNode, nil)
|
||||
return nil
|
||||
})
|
||||
|
||||
assert.Contains(t, output, "Pod")
|
||||
assert.Contains(t, output, "ReplicaSet")
|
||||
assert.Contains(t, output, "Rollout")
|
||||
assert.Contains(t, output, "numalogic-rollout-demo-5dcd5457d5-6trpt")
|
||||
assert.Contains(t, output, "Degraded")
|
||||
assert.Contains(t, output, "Readiness Gate failed")
|
||||
|
||||
}
|
||||
|
||||
func TestDefaultWaitOptions(t *testing.T) {
|
||||
watch := watchOpts{
|
||||
sync: false,
|
||||
@@ -307,8 +387,8 @@ func Test_groupObjsByKey(t *testing.T) {
|
||||
}
|
||||
|
||||
expected := map[kube.ResourceKey]*unstructured.Unstructured{
|
||||
kube.ResourceKey{Group: "", Kind: "Pod", Namespace: "default", Name: "pod-name"}: localObjs[0],
|
||||
kube.ResourceKey{Group: "apiextensions.k8s.io", Kind: "CustomResourceDefinition", Namespace: "", Name: "certificates.cert-manager.io"}: localObjs[1],
|
||||
{Group: "", Kind: "Pod", Namespace: "default", Name: "pod-name"}: localObjs[0],
|
||||
{Group: "apiextensions.k8s.io", Kind: "CustomResourceDefinition", Namespace: "", Name: "certificates.cert-manager.io"}: localObjs[1],
|
||||
}
|
||||
|
||||
objByKey := groupObjsByKey(localObjs, liveObjs, "default")
|
||||
|
||||
@@ -147,7 +147,7 @@ func NewApplicationSetCreateCommand(clientOpts *argocdclient.ClientOptions) *cob
|
||||
defer argoio.Close(conn)
|
||||
|
||||
// Get app before creating to see if it is being updated or no change
|
||||
existing, err := appIf.Get(ctx, &applicationset.ApplicationSetGetQuery{Name: appset.Name})
|
||||
existing, err := appIf.Get(ctx, &applicationset.ApplicationSetGetQuery{Name: appset.Name, AppsetNamespace: appset.Namespace})
|
||||
if grpc.UnwrapGRPCStatus(err).Code() != codes.NotFound {
|
||||
errors.CheckError(err)
|
||||
}
|
||||
|
||||
@@ -40,12 +40,12 @@ func TestPrintApplicationSetTable(t *testing.T) {
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Generators: []v1alpha1.ApplicationSetGenerator{
|
||||
v1alpha1.ApplicationSetGenerator{
|
||||
{
|
||||
Git: &v1alpha1.GitGenerator{
|
||||
RepoURL: "https://github.com/argoproj/argo-cd.git",
|
||||
Revision: "head",
|
||||
Directories: []v1alpha1.GitDirectoryGeneratorItem{
|
||||
v1alpha1.GitDirectoryGeneratorItem{
|
||||
{
|
||||
Path: "applicationset/examples/git-generator-directory/cluster-addons/*",
|
||||
},
|
||||
},
|
||||
@@ -60,7 +60,7 @@ func TestPrintApplicationSetTable(t *testing.T) {
|
||||
},
|
||||
Status: v1alpha1.ApplicationSetStatus{
|
||||
Conditions: []v1alpha1.ApplicationSetCondition{
|
||||
v1alpha1.ApplicationSetCondition{
|
||||
{
|
||||
Status: v1alpha1.ApplicationSetConditionStatusTrue,
|
||||
Type: v1alpha1.ApplicationSetConditionResourcesUpToDate,
|
||||
},
|
||||
@@ -75,12 +75,12 @@ func TestPrintApplicationSetTable(t *testing.T) {
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Generators: []v1alpha1.ApplicationSetGenerator{
|
||||
v1alpha1.ApplicationSetGenerator{
|
||||
{
|
||||
Git: &v1alpha1.GitGenerator{
|
||||
RepoURL: "https://github.com/argoproj/argo-cd.git",
|
||||
Revision: "head",
|
||||
Directories: []v1alpha1.GitDirectoryGeneratorItem{
|
||||
v1alpha1.GitDirectoryGeneratorItem{
|
||||
{
|
||||
Path: "applicationset/examples/git-generator-directory/cluster-addons/*",
|
||||
},
|
||||
},
|
||||
@@ -95,7 +95,7 @@ func TestPrintApplicationSetTable(t *testing.T) {
|
||||
},
|
||||
Status: v1alpha1.ApplicationSetStatus{
|
||||
Conditions: []v1alpha1.ApplicationSetCondition{
|
||||
v1alpha1.ApplicationSetCondition{
|
||||
{
|
||||
Status: v1alpha1.ApplicationSetConditionStatusTrue,
|
||||
Type: v1alpha1.ApplicationSetConditionResourcesUpToDate,
|
||||
},
|
||||
@@ -118,12 +118,12 @@ func TestPrintAppSetSummaryTable(t *testing.T) {
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSetSpec{
|
||||
Generators: []v1alpha1.ApplicationSetGenerator{
|
||||
v1alpha1.ApplicationSetGenerator{
|
||||
{
|
||||
Git: &v1alpha1.GitGenerator{
|
||||
RepoURL: "https://github.com/argoproj/argo-cd.git",
|
||||
Revision: "head",
|
||||
Directories: []v1alpha1.GitDirectoryGeneratorItem{
|
||||
v1alpha1.GitDirectoryGeneratorItem{
|
||||
{
|
||||
Path: "applicationset/examples/git-generator-directory/cluster-addons/*",
|
||||
},
|
||||
},
|
||||
@@ -138,7 +138,7 @@ func TestPrintAppSetSummaryTable(t *testing.T) {
|
||||
},
|
||||
Status: v1alpha1.ApplicationSetStatus{
|
||||
Conditions: []v1alpha1.ApplicationSetCondition{
|
||||
v1alpha1.ApplicationSetCondition{
|
||||
{
|
||||
Status: v1alpha1.ApplicationSetConditionStatusTrue,
|
||||
Type: v1alpha1.ApplicationSetConditionResourcesUpToDate,
|
||||
},
|
||||
|
||||
@@ -130,7 +130,7 @@ func NewCertAddTLSCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command
|
||||
}
|
||||
},
|
||||
}
|
||||
command.Flags().StringVar(&fromFile, "from", "", "read TLS certificate data from file (default is to read from stdin)")
|
||||
command.Flags().StringVar(&fromFile, "from", "", "Read TLS certificate data from file (default is to read from stdin)")
|
||||
command.Flags().BoolVar(&upsert, "upsert", false, "Replace existing TLS certificate if certificate is different in input")
|
||||
return command
|
||||
}
|
||||
@@ -300,9 +300,9 @@ func NewCertListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
}
|
||||
|
||||
command.Flags().StringVarP(&output, "output", "o", "wide", "Output format. One of: json|yaml|wide")
|
||||
command.Flags().StringVar(&sortOrder, "sort", "", "set display sort order for output format wide. One of: hostname|type")
|
||||
command.Flags().StringVar(&certType, "cert-type", "", "only list certificates of given type, valid: 'ssh','https'")
|
||||
command.Flags().StringVar(&hostNamePattern, "hostname-pattern", "", "only list certificates for hosts matching given glob-pattern")
|
||||
command.Flags().StringVar(&sortOrder, "sort", "", "Set display sort order for output format wide. One of: hostname|type")
|
||||
command.Flags().StringVar(&certType, "cert-type", "", "Only list certificates of given type, valid: 'ssh','https'")
|
||||
command.Flags().StringVar(&hostNamePattern, "hostname-pattern", "", "Only list certificates for hosts matching given glob-pattern")
|
||||
return command
|
||||
}
|
||||
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/cmd/argocd/commands/initialize"
|
||||
"github.com/argoproj/argo-cd/v2/common"
|
||||
|
||||
"github.com/alicebob/miniredis/v2"
|
||||
"github.com/golang/protobuf/ptypes/empty"
|
||||
@@ -38,12 +39,14 @@ import (
|
||||
)
|
||||
|
||||
type forwardCacheClient struct {
|
||||
namespace string
|
||||
context string
|
||||
init sync.Once
|
||||
client cache.CacheClient
|
||||
compression cache.RedisCompressionType
|
||||
err error
|
||||
namespace string
|
||||
context string
|
||||
init sync.Once
|
||||
client cache.CacheClient
|
||||
compression cache.RedisCompressionType
|
||||
err error
|
||||
redisHaProxyName string
|
||||
redisName string
|
||||
}
|
||||
|
||||
func (c *forwardCacheClient) doLazy(action func(client cache.CacheClient) error) error {
|
||||
@@ -51,8 +54,10 @@ func (c *forwardCacheClient) doLazy(action func(client cache.CacheClient) error)
|
||||
overrides := clientcmd.ConfigOverrides{
|
||||
CurrentContext: c.context,
|
||||
}
|
||||
redisHaProxyPodLabelSelector := common.LabelKeyAppName + "=" + c.redisHaProxyName
|
||||
redisPodLabelSelector := common.LabelKeyAppName + "=" + c.redisName
|
||||
redisPort, err := kubeutil.PortForward(6379, c.namespace, &overrides,
|
||||
"app.kubernetes.io/name=argocd-redis-ha-haproxy", "app.kubernetes.io/name=argocd-redis")
|
||||
redisHaProxyPodLabelSelector, redisPodLabelSelector)
|
||||
if err != nil {
|
||||
c.err = err
|
||||
return
|
||||
@@ -98,11 +103,12 @@ func (c *forwardCacheClient) NotifyUpdated(key string) error {
|
||||
}
|
||||
|
||||
type forwardRepoClientset struct {
|
||||
namespace string
|
||||
context string
|
||||
init sync.Once
|
||||
repoClientset repoapiclient.Clientset
|
||||
err error
|
||||
namespace string
|
||||
context string
|
||||
init sync.Once
|
||||
repoClientset repoapiclient.Clientset
|
||||
err error
|
||||
repoServerName string
|
||||
}
|
||||
|
||||
func (c *forwardRepoClientset) NewRepoServerClient() (io.Closer, repoapiclient.RepoServerServiceClient, error) {
|
||||
@@ -110,7 +116,8 @@ func (c *forwardRepoClientset) NewRepoServerClient() (io.Closer, repoapiclient.R
|
||||
overrides := clientcmd.ConfigOverrides{
|
||||
CurrentContext: c.context,
|
||||
}
|
||||
repoServerPort, err := kubeutil.PortForward(8081, c.namespace, &overrides, "app.kubernetes.io/name=argocd-repo-server")
|
||||
repoServerPodLabelSelector := common.LabelKeyAppName + "=" + c.repoServerName
|
||||
repoServerPort, err := kubeutil.PortForward(8081, c.namespace, &overrides, repoServerPodLabelSelector)
|
||||
if err != nil {
|
||||
c.err = err
|
||||
return
|
||||
@@ -127,15 +134,18 @@ func (c *forwardRepoClientset) NewRepoServerClient() (io.Closer, repoapiclient.R
|
||||
func testAPI(ctx context.Context, clientOpts *apiclient.ClientOptions) error {
|
||||
apiClient, err := apiclient.NewClient(clientOpts)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to create API client: %w", err)
|
||||
}
|
||||
closer, versionClient, err := apiClient.NewVersionClient()
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to create version client: %w", err)
|
||||
}
|
||||
defer io.Close(closer)
|
||||
_, err = versionClient.Version(ctx, &empty.Empty{})
|
||||
return err
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get version: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// StartLocalServer allows executing command in a headless mode: on the fly starts Argo CD API server and
|
||||
@@ -147,12 +157,12 @@ func StartLocalServer(ctx context.Context, clientOpts *apiclient.ClientOptions,
|
||||
if !startInProcessAPI {
|
||||
localCfg, err := localconfig.ReadLocalConfig(clientOpts.ConfigPath)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("error reading local config: %w", err)
|
||||
}
|
||||
if localCfg != nil {
|
||||
configCtx, err := localCfg.ResolveContext(clientOpts.Context)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("error resolving context: %w", err)
|
||||
}
|
||||
startInProcessAPI = configCtx.Server.Core
|
||||
}
|
||||
@@ -173,7 +183,7 @@ func StartLocalServer(ctx context.Context, clientOpts *apiclient.ClientOptions,
|
||||
addr := fmt.Sprintf("%s:0", *address)
|
||||
ln, err := net.Listen("tcp", addr)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to listen on %q: %w", addr, err)
|
||||
}
|
||||
port = &ln.Addr().(*net.TCPAddr).Port
|
||||
io.Close(ln)
|
||||
@@ -181,27 +191,27 @@ func StartLocalServer(ctx context.Context, clientOpts *apiclient.ClientOptions,
|
||||
|
||||
restConfig, err := clientConfig.ClientConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("error creating client config: %w", err)
|
||||
}
|
||||
appClientset, err := appclientset.NewForConfig(restConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("error creating app clientset: %w", err)
|
||||
}
|
||||
kubeClientset, err := kubernetes.NewForConfig(restConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("error creating kubernetes clientset: %w", err)
|
||||
}
|
||||
|
||||
namespace, _, err := clientConfig.Namespace()
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("error getting namespace: %w", err)
|
||||
}
|
||||
|
||||
mr, err := miniredis.Run()
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("error running miniredis: %w", err)
|
||||
}
|
||||
appstateCache := appstatecache.NewCache(cache.NewCache(&forwardCacheClient{namespace: namespace, context: ctxStr, compression: compression}), time.Hour)
|
||||
appstateCache := appstatecache.NewCache(cache.NewCache(&forwardCacheClient{namespace: namespace, context: ctxStr, compression: compression, redisHaProxyName: clientOpts.RedisHaProxyName, redisName: clientOpts.RedisName}), time.Hour)
|
||||
srv := server.NewServer(ctx, server.ArgoCDServerOpts{
|
||||
EnableGZip: false,
|
||||
Namespace: namespace,
|
||||
@@ -213,14 +223,14 @@ func StartLocalServer(ctx context.Context, clientOpts *apiclient.ClientOptions,
|
||||
KubeClientset: kubeClientset,
|
||||
Insecure: true,
|
||||
ListenHost: *address,
|
||||
RepoClientset: &forwardRepoClientset{namespace: namespace, context: ctxStr},
|
||||
RepoClientset: &forwardRepoClientset{namespace: namespace, context: ctxStr, repoServerName: clientOpts.RepoServerName},
|
||||
EnableProxyExtension: false,
|
||||
})
|
||||
srv.Init(ctx)
|
||||
|
||||
lns, err := srv.Listen()
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to listen: %w", err)
|
||||
}
|
||||
go srv.Run(ctx, lns)
|
||||
clientOpts.ServerAddr = fmt.Sprintf("%s:%d", *address, *port)
|
||||
@@ -236,7 +246,10 @@ func StartLocalServer(ctx context.Context, clientOpts *apiclient.ClientOptions,
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
return err
|
||||
if err != nil {
|
||||
return fmt.Errorf("all retries failed: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewClientOrDie creates a new API client from a set of config options, or fails fatally if the new client creation fails.
|
||||
|
||||
@@ -1,15 +1,19 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/cmd/argocd/commands/admin"
|
||||
"github.com/argoproj/argo-cd/v2/cmd/argocd/commands/initialize"
|
||||
cmdutil "github.com/argoproj/argo-cd/v2/cmd/util"
|
||||
"github.com/argoproj/argo-cd/v2/common"
|
||||
argocdclient "github.com/argoproj/argo-cd/v2/pkg/apiclient"
|
||||
"github.com/argoproj/argo-cd/v2/util/cli"
|
||||
"github.com/argoproj/argo-cd/v2/util/config"
|
||||
"github.com/argoproj/argo-cd/v2/util/env"
|
||||
"github.com/argoproj/argo-cd/v2/util/errors"
|
||||
"github.com/argoproj/argo-cd/v2/util/localconfig"
|
||||
)
|
||||
@@ -55,7 +59,7 @@ func NewCommand() *cobra.Command {
|
||||
command.AddCommand(NewLogoutCommand(&clientOpts))
|
||||
command.AddCommand(initialize.InitCommand(NewCertCommand(&clientOpts)))
|
||||
command.AddCommand(initialize.InitCommand(NewGPGCommand(&clientOpts)))
|
||||
command.AddCommand(admin.NewAdminCommand())
|
||||
command.AddCommand(admin.NewAdminCommand(&clientOpts))
|
||||
|
||||
defaultLocalConfigPath, err := localconfig.DefaultLocalConfigPath()
|
||||
errors.CheckError(err)
|
||||
@@ -76,6 +80,11 @@ func NewCommand() *cobra.Command {
|
||||
command.PersistentFlags().StringVar(&clientOpts.PortForwardNamespace, "port-forward-namespace", config.GetFlag("port-forward-namespace", ""), "Namespace name which should be used for port forwarding")
|
||||
command.PersistentFlags().IntVar(&clientOpts.HttpRetryMax, "http-retry-max", 0, "Maximum number of retries to establish http connection to Argo CD server")
|
||||
command.PersistentFlags().BoolVar(&clientOpts.Core, "core", false, "If set to true then CLI talks directly to Kubernetes instead of talking to Argo CD API server")
|
||||
command.PersistentFlags().StringVar(&clientOpts.ServerName, "server-name", env.StringFromEnv(common.EnvServerName, common.DefaultServerName), fmt.Sprintf("Name of the Argo CD API server; set this or the %s environment variable when the server's name label differs from the default, for example when installing via the Helm chart", common.EnvServerName))
|
||||
command.PersistentFlags().StringVar(&clientOpts.AppControllerName, "controller-name", env.StringFromEnv(common.EnvAppControllerName, common.DefaultApplicationControllerName), fmt.Sprintf("Name of the Argo CD Application controller; set this or the %s environment variable when the controller's name label differs from the default, for example when installing via the Helm chart", common.EnvAppControllerName))
|
||||
command.PersistentFlags().StringVar(&clientOpts.RedisHaProxyName, "redis-haproxy-name", env.StringFromEnv(common.EnvRedisHaProxyName, common.DefaultRedisHaProxyName), fmt.Sprintf("Name of the Redis HA Proxy; set this or the %s environment variable when the HA Proxy's name label differs from the default, for example when installing via the Helm chart", common.EnvRedisHaProxyName))
|
||||
command.PersistentFlags().StringVar(&clientOpts.RedisName, "redis-name", env.StringFromEnv(common.EnvRedisName, common.DefaultRedisName), fmt.Sprintf("Name of the Redis deployment; set this or the %s environment variable when the Redis's name label differs from the default, for example when installing via the Helm chart", common.EnvRedisName))
|
||||
command.PersistentFlags().StringVar(&clientOpts.RepoServerName, "repo-server-name", env.StringFromEnv(common.EnvRepoServerName, common.DefaultRepoServerName), fmt.Sprintf("Name of the Argo CD Repo server; set this or the %s environment variable when the server's name label differs from the default, for example when installing via the Helm chart", common.EnvRepoServerName))
|
||||
|
||||
clientOpts.KubeOverrides = &clientcmd.ConfigOverrides{}
|
||||
command.PersistentFlags().StringVar(&clientOpts.KubeOverrides.CurrentContext, "kube-context", "", "Directs the command to the given kube-context")
|
||||
|
||||
168
cmd/argocd/commands/tree.go
Normal file
168
cmd/argocd/commands/tree.go
Normal file
@@ -0,0 +1,168 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/gitops-engine/pkg/health"
|
||||
"k8s.io/apimachinery/pkg/util/duration"
|
||||
)
|
||||
|
||||
const (
|
||||
firstElemPrefix = `├─`
|
||||
lastElemPrefix = `└─`
|
||||
indent = " "
|
||||
pipe = `│ `
|
||||
)
|
||||
|
||||
func extractHealthStatusAndReason(node v1alpha1.ResourceNode) (healthStatus health.HealthStatusCode, reason string) {
|
||||
if node.Health != nil {
|
||||
healthStatus = node.Health.Status
|
||||
reason = node.Health.Message
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func treeViewAppGet(prefix string, uidToNodeMap map[string]v1alpha1.ResourceNode, parentToChildMap map[string][]string, parent v1alpha1.ResourceNode, mapNodeNameToResourceState map[string]*resourceState, w *tabwriter.Writer) {
|
||||
healthStatus, _ := extractHealthStatusAndReason(parent)
|
||||
if mapNodeNameToResourceState[parent.Kind+"/"+parent.Name] != nil {
|
||||
value := mapNodeNameToResourceState[parent.Kind+"/"+parent.Name]
|
||||
_, _ = fmt.Fprintf(w, "%s%s\t%s\t%s\t%s\n", printPrefix(prefix), parent.Kind+"/"+value.Name, value.Status, value.Health, value.Message)
|
||||
} else {
|
||||
_, _ = fmt.Fprintf(w, "%s%s\t%s\t%s\t%s\n", printPrefix(prefix), parent.Kind+"/"+parent.Name, "", healthStatus, "")
|
||||
}
|
||||
chs := parentToChildMap[parent.UID]
|
||||
for i, childUid := range chs {
|
||||
var p string
|
||||
switch i {
|
||||
case len(chs) - 1:
|
||||
p = prefix + lastElemPrefix
|
||||
default:
|
||||
p = prefix + firstElemPrefix
|
||||
}
|
||||
treeViewAppGet(p, uidToNodeMap, parentToChildMap, uidToNodeMap[childUid], mapNodeNameToResourceState, w)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func detailedTreeViewAppGet(prefix string, uidToNodeMap map[string]v1alpha1.ResourceNode, parentChildMap map[string][]string, parent v1alpha1.ResourceNode, mapNodeNameToResourceState map[string]*resourceState, w *tabwriter.Writer) {
|
||||
healthStatus, reason := extractHealthStatusAndReason(parent)
|
||||
var age = "<unknown>"
|
||||
if parent.CreatedAt != nil {
|
||||
age = duration.HumanDuration(time.Since(parent.CreatedAt.Time))
|
||||
}
|
||||
|
||||
if mapNodeNameToResourceState[parent.Kind+"/"+parent.Name] != nil {
|
||||
value := mapNodeNameToResourceState[parent.Kind+"/"+parent.Name]
|
||||
_, _ = fmt.Fprintf(w, "%s%s\t%s\t%s\t%s\t%s\t%s\n", printPrefix(prefix), parent.Kind+"/"+value.Name, value.Status, value.Health, age, value.Message, reason)
|
||||
} else {
|
||||
_, _ = fmt.Fprintf(w, "%s%s\t%s\t%s\t%s\t%s\t%s\n", printPrefix(prefix), parent.Kind+"/"+parent.Name, "", healthStatus, age, "", reason)
|
||||
|
||||
}
|
||||
chs := parentChildMap[parent.UID]
|
||||
for i, child := range chs {
|
||||
var p string
|
||||
switch i {
|
||||
case len(chs) - 1:
|
||||
p = prefix + lastElemPrefix
|
||||
default:
|
||||
p = prefix + firstElemPrefix
|
||||
}
|
||||
detailedTreeViewAppGet(p, uidToNodeMap, parentChildMap, uidToNodeMap[child], mapNodeNameToResourceState, w)
|
||||
}
|
||||
}
|
||||
|
||||
func treeViewAppResourcesNotOrphaned(prefix string, uidToNodeMap map[string]v1alpha1.ResourceNode, parentChildMap map[string][]string, parent v1alpha1.ResourceNode, w *tabwriter.Writer) {
|
||||
if len(parent.ParentRefs) == 0 {
|
||||
_, _ = fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n", parent.Group, parent.Kind, parent.Namespace, parent.Name, "No")
|
||||
}
|
||||
chs := parentChildMap[parent.UID]
|
||||
for i, child := range chs {
|
||||
var p string
|
||||
switch i {
|
||||
case len(chs) - 1:
|
||||
p = prefix + lastElemPrefix
|
||||
default:
|
||||
p = prefix + firstElemPrefix
|
||||
}
|
||||
treeViewAppResourcesNotOrphaned(p, uidToNodeMap, parentChildMap, uidToNodeMap[child], w)
|
||||
}
|
||||
}
|
||||
|
||||
func treeViewAppResourcesOrphaned(prefix string, uidToNodeMap map[string]v1alpha1.ResourceNode, parentChildMap map[string][]string, parent v1alpha1.ResourceNode, w *tabwriter.Writer) {
|
||||
_, _ = fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n", parent.Group, parent.Kind, parent.Namespace, parent.Name, "Yes")
|
||||
chs := parentChildMap[parent.UID]
|
||||
for i, child := range chs {
|
||||
var p string
|
||||
switch i {
|
||||
case len(chs) - 1:
|
||||
p = prefix + lastElemPrefix
|
||||
default:
|
||||
p = prefix + firstElemPrefix
|
||||
}
|
||||
treeViewAppResourcesOrphaned(p, uidToNodeMap, parentChildMap, uidToNodeMap[child], w)
|
||||
}
|
||||
}
|
||||
|
||||
func detailedTreeViewAppResourcesNotOrphaned(prefix string, uidToNodeMap map[string]v1alpha1.ResourceNode, parentChildMap map[string][]string, parent v1alpha1.ResourceNode, w *tabwriter.Writer) {
|
||||
|
||||
if len(parent.ParentRefs) == 0 {
|
||||
healthStatus, reason := extractHealthStatusAndReason(parent)
|
||||
var age = "<unknown>"
|
||||
if parent.CreatedAt != nil {
|
||||
age = duration.HumanDuration(time.Since(parent.CreatedAt.Time))
|
||||
}
|
||||
_, _ = fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n", parent.Group, parent.Kind, parent.Namespace, parent.Name, "No", age, healthStatus, reason)
|
||||
}
|
||||
chs := parentChildMap[parent.UID]
|
||||
for i, child := range chs {
|
||||
var p string
|
||||
switch i {
|
||||
case len(chs) - 1:
|
||||
p = prefix + lastElemPrefix
|
||||
default:
|
||||
p = prefix + firstElemPrefix
|
||||
}
|
||||
detailedTreeViewAppResourcesNotOrphaned(p, uidToNodeMap, parentChildMap, uidToNodeMap[child], w)
|
||||
}
|
||||
}
|
||||
|
||||
func detailedTreeViewAppResourcesOrphaned(prefix string, uidToNodeMap map[string]v1alpha1.ResourceNode, parentChildMap map[string][]string, parent v1alpha1.ResourceNode, w *tabwriter.Writer) {
|
||||
healthStatus, reason := extractHealthStatusAndReason(parent)
|
||||
var age = "<unknown>"
|
||||
if parent.CreatedAt != nil {
|
||||
age = duration.HumanDuration(time.Since(parent.CreatedAt.Time))
|
||||
}
|
||||
_, _ = fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n", parent.Group, parent.Kind, parent.Namespace, parent.Name, "Yes", age, healthStatus, reason)
|
||||
|
||||
chs := parentChildMap[parent.UID]
|
||||
for i, child := range chs {
|
||||
var p string
|
||||
switch i {
|
||||
case len(chs) - 1:
|
||||
p = prefix + lastElemPrefix
|
||||
default:
|
||||
p = prefix + firstElemPrefix
|
||||
}
|
||||
detailedTreeViewAppResourcesOrphaned(p, uidToNodeMap, parentChildMap, uidToNodeMap[child], w)
|
||||
}
|
||||
}
|
||||
|
||||
func printPrefix(p string) string {
|
||||
|
||||
if strings.HasSuffix(p, firstElemPrefix) {
|
||||
p = strings.Replace(p, firstElemPrefix, pipe, strings.Count(p, firstElemPrefix)-1)
|
||||
} else {
|
||||
p = strings.ReplaceAll(p, firstElemPrefix, pipe)
|
||||
}
|
||||
|
||||
if strings.HasSuffix(p, lastElemPrefix) {
|
||||
p = strings.Replace(p, lastElemPrefix, strings.Repeat(" ", len([]rune(lastElemPrefix))), strings.Count(p, lastElemPrefix)-1)
|
||||
} else {
|
||||
p = strings.ReplaceAll(p, lastElemPrefix, strings.Repeat(" ", len([]rune(lastElemPrefix))))
|
||||
}
|
||||
return p
|
||||
}
|
||||
216
cmd/argocd/commands/tree_test.go
Normal file
216
cmd/argocd/commands/tree_test.go
Normal file
@@ -0,0 +1,216 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
"text/tabwriter"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestTreeViewAppGet(t *testing.T) {
|
||||
var parent v1alpha1.ResourceNode
|
||||
parent.ResourceRef = v1alpha1.ResourceRef{Group: "argoproj.io", Version: "", Kind: "Rollout", Namespace: "sandbox-rollout-numalogic-demo", Name: "numalogic-rollout-demo", UID: "87f3aab0-f634-4b2c-959a-7ddd30675ed0"}
|
||||
objs := make(map[string]v1alpha1.ResourceNode)
|
||||
objs["87f3aab0-f634-4b2c-959a-7ddd30675ed0"] = parent
|
||||
var child v1alpha1.ResourceNode
|
||||
child.ResourceRef = v1alpha1.ResourceRef{Group: "apps", Version: "v1", Kind: "ReplicaSet", Namespace: "sandbox-rollout-numalogic-demo", Name: "numalogic-rollout-demo-5dcd5457d5", UID: "75c30dce-1b66-414f-a86c-573a74be0f40"}
|
||||
child.ParentRefs = []v1alpha1.ResourceRef{{Group: "argoproj.io", Version: "", Kind: "Rollout", Namespace: "sandbox-rollout-numalogic-demo", Name: "numalogic-rollout-demo", UID: "87f3aab0-f634-4b2c-959a-7ddd30675ed0"}}
|
||||
|
||||
objs["75c30dce-1b66-414f-a86c-573a74be0f40"] = child
|
||||
|
||||
childMapping := make(map[string][]string)
|
||||
childMapping["87f3aab0-f634-4b2c-959a-7ddd30675ed0"] = []string{"75c30dce-1b66-414f-a86c-573a74be0f40"}
|
||||
|
||||
stateMap := make(map[string]*resourceState)
|
||||
stateMap["Rollout/numalogic-rollout-demo"] = &resourceState{
|
||||
Status: "Running",
|
||||
Health: "Healthy",
|
||||
Hook: "",
|
||||
Message: "No Issues",
|
||||
Name: "sandbox-rollout-numalogic-demo",
|
||||
Kind: "Rollout",
|
||||
Group: "argoproj.io",
|
||||
}
|
||||
|
||||
buf := &bytes.Buffer{}
|
||||
w := tabwriter.NewWriter(buf, 0, 0, 2, ' ', 0)
|
||||
treeViewAppGet("", objs, childMapping, parent, stateMap, w)
|
||||
if err := w.Flush(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
output := buf.String()
|
||||
assert.Contains(t, output, "ReplicaSet")
|
||||
assert.Contains(t, output, "Rollout")
|
||||
assert.Contains(t, output, "numalogic-rollout")
|
||||
assert.Contains(t, output, "Healthy")
|
||||
assert.Contains(t, output, "No Issues")
|
||||
}
|
||||
|
||||
func TestTreeViewDetailedAppGet(t *testing.T) {
|
||||
var parent v1alpha1.ResourceNode
|
||||
parent.ResourceRef = v1alpha1.ResourceRef{Group: "argoproj.io", Version: "", Kind: "Rollout", Namespace: "sandbox-rollout-numalogic-demo", Name: "numalogic-rollout-demo", UID: "87f3aab0-f634-4b2c-959a-7ddd30675ed0"}
|
||||
objs := make(map[string]v1alpha1.ResourceNode)
|
||||
objs["87f3aab0-f634-4b2c-959a-7ddd30675ed0"] = parent
|
||||
var child v1alpha1.ResourceNode
|
||||
child.ResourceRef = v1alpha1.ResourceRef{Group: "apps", Version: "v1", Kind: "ReplicaSet", Namespace: "sandbox-rollout-numalogic-demo", Name: "numalogic-rollout-demo-5dcd5457d5", UID: "75c30dce-1b66-414f-a86c-573a74be0f40"}
|
||||
child.ParentRefs = []v1alpha1.ResourceRef{{Group: "argoproj.io", Version: "", Kind: "Rollout", Namespace: "sandbox-rollout-numalogic-demo", Name: "numalogic-rollout-demo", UID: "87f3aab0-f634-4b2c-959a-7ddd30675ed0"}}
|
||||
child.Health = &v1alpha1.HealthStatus{Status: "Degraded", Message: "Readiness Gate failed"}
|
||||
objs["75c30dce-1b66-414f-a86c-573a74be0f40"] = child
|
||||
|
||||
childMapping := make(map[string][]string)
|
||||
childMapping["87f3aab0-f634-4b2c-959a-7ddd30675ed0"] = []string{"75c30dce-1b66-414f-a86c-573a74be0f40"}
|
||||
|
||||
stateMap := make(map[string]*resourceState)
|
||||
stateMap["Rollout/numalogic-rollout-demo"] = &resourceState{
|
||||
Status: "Running",
|
||||
Health: "Healthy",
|
||||
Hook: "",
|
||||
Message: "No Issues",
|
||||
Name: "sandbox-rollout-numalogic-demo",
|
||||
Kind: "Rollout",
|
||||
Group: "argoproj.io",
|
||||
}
|
||||
|
||||
buf := &bytes.Buffer{}
|
||||
w := tabwriter.NewWriter(buf, 0, 0, 2, ' ', 0)
|
||||
detailedTreeViewAppGet("", objs, childMapping, parent, stateMap, w)
|
||||
if err := w.Flush(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
output := buf.String()
|
||||
|
||||
assert.Contains(t, output, "ReplicaSet")
|
||||
assert.Contains(t, output, "Rollout")
|
||||
assert.Contains(t, output, "numalogic-rollout")
|
||||
assert.Contains(t, output, "Healthy")
|
||||
assert.Contains(t, output, "No Issues")
|
||||
assert.Contains(t, output, "Degraded")
|
||||
assert.Contains(t, output, "Readiness Gate failed")
|
||||
}
|
||||
|
||||
func TestTreeViewAppResources(t *testing.T) {
|
||||
var parent v1alpha1.ResourceNode
|
||||
parent.ResourceRef = v1alpha1.ResourceRef{Group: "argoproj.io", Version: "", Kind: "Rollout", Namespace: "sandbox-rollout-numalogic-demo", Name: "numalogic-rollout-demo", UID: "87f3aab0-f634-4b2c-959a-7ddd30675ed0"}
|
||||
objs := make(map[string]v1alpha1.ResourceNode)
|
||||
objs["87f3aab0-f634-4b2c-959a-7ddd30675ed0"] = parent
|
||||
var child v1alpha1.ResourceNode
|
||||
child.ResourceRef = v1alpha1.ResourceRef{Group: "apps", Version: "v1", Kind: "ReplicaSet", Namespace: "sandbox-rollout-numalogic-demo", Name: "numalogic-rollout-demo-5dcd5457d5", UID: "75c30dce-1b66-414f-a86c-573a74be0f40"}
|
||||
child.ParentRefs = []v1alpha1.ResourceRef{{Group: "argoproj.io", Version: "", Kind: "Rollout", Namespace: "sandbox-rollout-numalogic-demo", Name: "numalogic-rollout-demo", UID: "87f3aab0-f634-4b2c-959a-7ddd30675ed0"}}
|
||||
|
||||
objs["75c30dce-1b66-414f-a86c-573a74be0f40"] = child
|
||||
|
||||
childMapping := make(map[string][]string)
|
||||
childMapping["87f3aab0-f634-4b2c-959a-7ddd30675ed0"] = []string{"75c30dce-1b66-414f-a86c-573a74be0f40"}
|
||||
|
||||
buf := &bytes.Buffer{}
|
||||
w := tabwriter.NewWriter(buf, 0, 0, 2, ' ', 0)
|
||||
|
||||
treeViewAppResourcesNotOrphaned("", objs, childMapping, parent, w)
|
||||
|
||||
var orphan v1alpha1.ResourceNode
|
||||
orphan.ResourceRef = v1alpha1.ResourceRef{Group: "apps", Version: "v1", Kind: "ReplicaSet", Namespace: "sandbox-rollout-numalogic-demo", Name: "numalogic-rollout-demo-5dcdnk457d5", UID: "75c30dce-1b66-41hf-a86c-573a74be0f40"}
|
||||
objsOrphan := make(map[string]v1alpha1.ResourceNode)
|
||||
objsOrphan["75c30dce-1b66-41hf-a86c-573a74be0f40"] = orphan
|
||||
orphanchildMapping := make(map[string][]string)
|
||||
orphanParent := orphan
|
||||
|
||||
treeViewAppResourcesOrphaned("", objsOrphan, orphanchildMapping, orphanParent, w)
|
||||
if err := w.Flush(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
output := buf.String()
|
||||
|
||||
assert.Contains(t, output, "ReplicaSet")
|
||||
assert.Contains(t, output, "Rollout")
|
||||
assert.Contains(t, output, "numalogic-rollout")
|
||||
assert.Contains(t, output, "argoproj.io")
|
||||
assert.Contains(t, output, "No")
|
||||
assert.Contains(t, output, "Yes")
|
||||
assert.Contains(t, output, "numalogic-rollout-demo-5dcdnk457d5")
|
||||
}
|
||||
|
||||
func TestTreeViewDetailedAppResources(t *testing.T) {
|
||||
var parent v1alpha1.ResourceNode
|
||||
parent.ResourceRef = v1alpha1.ResourceRef{Group: "argoproj.io", Version: "", Kind: "Rollout", Namespace: "sandbox-rollout-numalogic-demo", Name: "numalogic-rollout-demo", UID: "87f3aab0-f634-4b2c-959a-7ddd30675ed0"}
|
||||
objs := make(map[string]v1alpha1.ResourceNode)
|
||||
objs["87f3aab0-f634-4b2c-959a-7ddd30675ed0"] = parent
|
||||
var child v1alpha1.ResourceNode
|
||||
child.ResourceRef = v1alpha1.ResourceRef{Group: "apps", Version: "v1", Kind: "ReplicaSet", Namespace: "sandbox-rollout-numalogic-demo", Name: "numalogic-rollout-demo-5dcd5457d5", UID: "75c30dce-1b66-414f-a86c-573a74be0f40"}
|
||||
child.ParentRefs = []v1alpha1.ResourceRef{{Group: "argoproj.io", Version: "", Kind: "Rollout", Namespace: "sandbox-rollout-numalogic-demo", Name: "numalogic-rollout-demo", UID: "87f3aab0-f634-4b2c-959a-7ddd30675ed0"}}
|
||||
objs["75c30dce-1b66-414f-a86c-573a74be0f40"] = child
|
||||
childMapping := make(map[string][]string)
|
||||
childMapping["87f3aab0-f634-4b2c-959a-7ddd30675ed0"] = []string{"75c30dce-1b66-414f-a86c-573a74be0f40"}
|
||||
buf := &bytes.Buffer{}
|
||||
w := tabwriter.NewWriter(buf, 0, 0, 2, ' ', 0)
|
||||
detailedTreeViewAppResourcesNotOrphaned("", objs, childMapping, parent, w)
|
||||
var orphan v1alpha1.ResourceNode
|
||||
orphan.ResourceRef = v1alpha1.ResourceRef{Group: "apps", Version: "v1", Kind: "ReplicaSet", Namespace: "sandbox-rollout-numalogic-demo", Name: "numalogic-rollout-demo-5dcdnk457d5", UID: "75c30dce-1b66-41hf-a86c-573a74be0f40"}
|
||||
orphan.Health = &v1alpha1.HealthStatus{
|
||||
Status: "Degraded",
|
||||
Message: "Readiness Gate failed",
|
||||
}
|
||||
objsOrphan := make(map[string]v1alpha1.ResourceNode)
|
||||
objsOrphan["75c30dce-1b66-41hf-a86c-573a74be0f40"] = orphan
|
||||
|
||||
orphanchildMapping := make(map[string][]string)
|
||||
orphanParent := orphan
|
||||
detailedTreeViewAppResourcesOrphaned("", objsOrphan, orphanchildMapping, orphanParent, w)
|
||||
if err := w.Flush(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
output := buf.String()
|
||||
|
||||
assert.Contains(t, output, "ReplicaSet")
|
||||
assert.Contains(t, output, "Rollout")
|
||||
assert.Contains(t, output, "numalogic-rollout")
|
||||
assert.Contains(t, output, "argoproj.io")
|
||||
assert.Contains(t, output, "No")
|
||||
assert.Contains(t, output, "Yes")
|
||||
assert.Contains(t, output, "numalogic-rollout-demo-5dcdnk457d5")
|
||||
assert.Contains(t, output, "Degraded")
|
||||
assert.Contains(t, output, "Readiness Gate failed")
|
||||
}
|
||||
|
||||
func TestPrintPrefix(t *testing.T) {
|
||||
tests := []struct {
|
||||
input string
|
||||
expected string
|
||||
name string
|
||||
}{
|
||||
{
|
||||
input: "",
|
||||
expected: "",
|
||||
name: "empty string",
|
||||
},
|
||||
{
|
||||
input: firstElemPrefix,
|
||||
expected: firstElemPrefix,
|
||||
name: "only first element prefix",
|
||||
},
|
||||
{
|
||||
input: lastElemPrefix,
|
||||
expected: lastElemPrefix,
|
||||
name: "only last element prefix",
|
||||
},
|
||||
{
|
||||
input: firstElemPrefix + firstElemPrefix,
|
||||
expected: pipe + firstElemPrefix,
|
||||
name: "double first element prefix",
|
||||
},
|
||||
{
|
||||
input: firstElemPrefix + lastElemPrefix,
|
||||
expected: pipe + lastElemPrefix,
|
||||
name: "first then last element prefix",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
got := printPrefix(test.input)
|
||||
assert.Equal(t, test.expected, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
|
||||
grpc_retry "github.com/grpc-ecosystem/go-grpc-middleware/retry"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
|
||||
@@ -47,8 +46,8 @@ func NewConnection(address string) (*grpc.ClientConn, error) {
|
||||
grpc.WithStreamInterceptor(grpc_retry.StreamClientInterceptor(retryOpts...)),
|
||||
grpc.WithUnaryInterceptor(grpc_middleware.ChainUnaryClient(unaryInterceptors...)),
|
||||
grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(MaxGRPCMessageSize), grpc.MaxCallSendMsgSize(MaxGRPCMessageSize)),
|
||||
grpc.WithUnaryInterceptor(otelgrpc.UnaryClientInterceptor()),
|
||||
grpc.WithStreamInterceptor(otelgrpc.StreamClientInterceptor()),
|
||||
grpc.WithUnaryInterceptor(grpc_util.OTELUnaryClientInterceptor()),
|
||||
grpc.WithStreamInterceptor(grpc_util.OTELStreamClientInterceptor()),
|
||||
}
|
||||
|
||||
dialOpts = append(dialOpts, grpc.WithTransportCredentials(insecure.NewCredentials()))
|
||||
|
||||
@@ -97,6 +97,14 @@ func runCommand(ctx context.Context, command Command, path string, env []string)
|
||||
<-ctx.Done()
|
||||
// Kill by group ID to make sure child processes are killed. The - tells `kill` that it's a group ID.
|
||||
// Since we didn't set Pgid in SysProcAttr, the group ID is the same as the process ID. https://pkg.go.dev/syscall#SysProcAttr
|
||||
|
||||
// Sending a TERM signal first to allow any potential cleanup if needed, and then sending a KILL signal
|
||||
_ = sysCallTerm(-cmd.Process.Pid)
|
||||
|
||||
// modify cleanup timeout to allow process to cleanup
|
||||
cleanupTimeout := 5 * time.Second
|
||||
time.Sleep(cleanupTimeout)
|
||||
|
||||
_ = sysCallKill(-cmd.Process.Pid)
|
||||
}()
|
||||
|
||||
@@ -112,11 +120,16 @@ func runCommand(ctx context.Context, command Command, path string, env []string)
|
||||
logCtx.Error(err.Error())
|
||||
return strings.TrimSuffix(output, "\n"), err
|
||||
}
|
||||
|
||||
logCtx = logCtx.WithFields(log.Fields{
|
||||
"stderr": stderr.String(),
|
||||
"command": command,
|
||||
})
|
||||
if len(output) == 0 {
|
||||
log.WithFields(log.Fields{
|
||||
"stderr": stderr.String(),
|
||||
"command": command,
|
||||
}).Warn("Plugin command returned zero output")
|
||||
logCtx.Warn("Plugin command returned zero output")
|
||||
} else {
|
||||
// Log stderr even on successfull commands to help develop plugins
|
||||
logCtx.Info("Plugin command successfull")
|
||||
}
|
||||
|
||||
return strings.TrimSuffix(output, "\n"), nil
|
||||
|
||||
@@ -369,6 +369,28 @@ func TestRunCommandEmptyCommand(t *testing.T) {
|
||||
assert.ErrorContains(t, err, "Command is empty")
|
||||
}
|
||||
|
||||
// TestRunCommandContextTimeoutWithGracefulTermination makes sure that the process is given enough time to cleanup before sending SIGKILL.
|
||||
func TestRunCommandContextTimeoutWithCleanup(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 900*time.Millisecond)
|
||||
defer cancel()
|
||||
|
||||
// Use a subshell so there's a child command.
|
||||
// This command sleeps for 4 seconds which is currently less than the 5 second delay between SIGTERM and SIGKILL signal and then exits successfully.
|
||||
command := Command{
|
||||
Command: []string{"sh", "-c"},
|
||||
Args: []string{`(trap 'echo "cleanup completed"; exit' TERM; sleep 4)`},
|
||||
}
|
||||
|
||||
before := time.Now()
|
||||
output, err := runCommand(ctx, command, "", []string{})
|
||||
after := time.Now()
|
||||
|
||||
assert.Error(t, err) // The command should time out, causing an error.
|
||||
assert.Less(t, after.Sub(before), 1*time.Second)
|
||||
// The command should still have completed the cleanup after termination.
|
||||
assert.Contains(t, output, "cleanup completed")
|
||||
}
|
||||
|
||||
func Test_getParametersAnnouncement_empty_command(t *testing.T) {
|
||||
staticYAML := `
|
||||
- name: static-a
|
||||
|
||||
@@ -14,3 +14,7 @@ func newSysProcAttr(setpgid bool) *syscall.SysProcAttr {
|
||||
func sysCallKill(pid int) error {
|
||||
return syscall.Kill(pid, syscall.SIGKILL)
|
||||
}
|
||||
|
||||
func sysCallTerm(pid int) error {
|
||||
return syscall.Kill(pid, syscall.SIGTERM)
|
||||
}
|
||||
|
||||
@@ -14,3 +14,7 @@ func newSysProcAttr(setpgid bool) *syscall.SysProcAttr {
|
||||
func sysCallKill(pid int) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func sysCallTerm(pid int) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -12,6 +12,11 @@ import (
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// Component names
|
||||
const (
|
||||
ApplicationController = "argocd-application-controller"
|
||||
)
|
||||
|
||||
// Default service addresses and URLS of Argo CD internal services
|
||||
const (
|
||||
// DefaultRepoServerAddr is the gRPC address of the Argo CD repo server
|
||||
@@ -34,6 +39,8 @@ const (
|
||||
// ArgoCDTLSCertsConfigMapName contains TLS certificate data for connecting repositories. Will get mounted as volume to pods
|
||||
ArgoCDTLSCertsConfigMapName = "argocd-tls-certs-cm"
|
||||
ArgoCDGPGKeysConfigMapName = "argocd-gpg-keys-cm"
|
||||
// ArgoCDAppControllerShardConfigMapName contains the application controller to shard mapping
|
||||
ArgoCDAppControllerShardConfigMapName = "argocd-app-controller-shard-cm"
|
||||
)
|
||||
|
||||
// Some default configurables
|
||||
@@ -109,6 +116,8 @@ const (
|
||||
// RoundRobinShardingAlgorithm is a flag value that can be opted for Sharding Algorithm it uses an equal distribution accross all shards
|
||||
RoundRobinShardingAlgorithm = "round-robin"
|
||||
DefaultShardingAlgorithm = LegacyShardingAlgorithm
|
||||
// AppControllerHeartbeatUpdateRetryCount is the retry count for updating the Shard Mapping to the Shard Mapping ConfigMap used by Application Controller
|
||||
AppControllerHeartbeatUpdateRetryCount = 3
|
||||
)
|
||||
|
||||
// Dex related constants
|
||||
@@ -138,6 +147,8 @@ const (
|
||||
// LabelKeyAppInstance is the label key to use to uniquely identify the instance of an application
|
||||
// The Argo CD application name is used as the instance name
|
||||
LabelKeyAppInstance = "app.kubernetes.io/instance"
|
||||
// LabelKeyAppName is the label key to use to uniquely identify the name of the Kubernetes application
|
||||
LabelKeyAppName = "app.kubernetes.io/name"
|
||||
// LabelKeyLegacyApplicationName is the legacy label (v0.10 and below) and is superseded by 'app.kubernetes.io/instance'
|
||||
LabelKeyLegacyApplicationName = "applications.argoproj.io/app-name"
|
||||
// LabelKeySecretType contains the type of argocd secret (currently: 'cluster', 'repository', 'repo-config' or 'repo-creds')
|
||||
@@ -207,10 +218,14 @@ const (
|
||||
EnvPauseGenerationRequests = "ARGOCD_PAUSE_GEN_REQUESTS"
|
||||
// EnvControllerReplicas is the number of controller replicas
|
||||
EnvControllerReplicas = "ARGOCD_CONTROLLER_REPLICAS"
|
||||
// EnvControllerHeartbeatTime will update the heartbeat for application controller to claim shard
|
||||
EnvControllerHeartbeatTime = "ARGOCD_CONTROLLER_HEARTBEAT_TIME"
|
||||
// EnvControllerShard is the shard number that should be handled by controller
|
||||
EnvControllerShard = "ARGOCD_CONTROLLER_SHARD"
|
||||
// EnvControllerShardingAlgorithm is the distribution sharding algorithm to be used: legacy or round-robin
|
||||
EnvControllerShardingAlgorithm = "ARGOCD_CONTROLLER_SHARDING_ALGORITHM"
|
||||
//EnvEnableDynamicClusterDistribution enables dynamic sharding (ALPHA)
|
||||
EnvEnableDynamicClusterDistribution = "ARGOCD_ENABLE_DYNAMIC_CLUSTER_DISTRIBUTION"
|
||||
// EnvEnableGRPCTimeHistogramEnv enables gRPC metrics collection
|
||||
EnvEnableGRPCTimeHistogramEnv = "ARGOCD_ENABLE_GRPC_TIME_HISTOGRAM"
|
||||
// EnvGithubAppCredsExpirationDuration controls the caching of Github app credentials. This value is in minutes (default: 60)
|
||||
@@ -233,6 +248,16 @@ const (
|
||||
EnvCMPWorkDir = "ARGOCD_CMP_WORKDIR"
|
||||
// EnvGPGDataPath overrides the location where GPG keyring for signature verification is stored
|
||||
EnvGPGDataPath = "ARGOCD_GPG_DATA_PATH"
|
||||
// EnvServerName is the name of the Argo CD server component, as specified by the value under the LabelKeyAppName label key.
|
||||
EnvServerName = "ARGOCD_SERVER_NAME"
|
||||
// EnvRepoServerName is the name of the Argo CD repo server component, as specified by the value under the LabelKeyAppName label key.
|
||||
EnvRepoServerName = "ARGOCD_REPO_SERVER_NAME"
|
||||
// EnvAppControllerName is the name of the Argo CD application controller component, as specified by the value under the LabelKeyAppName label key.
|
||||
EnvAppControllerName = "ARGOCD_APPLICATION_CONTROLLER_NAME"
|
||||
// EnvRedisName is the name of the Argo CD redis component, as specified by the value under the LabelKeyAppName label key.
|
||||
EnvRedisName = "ARGOCD_REDIS_NAME"
|
||||
// EnvRedisHaProxyName is the name of the Argo CD Redis HA proxy component, as specified by the value under the LabelKeyAppName label key.
|
||||
EnvRedisHaProxyName = "ARGOCD_REDIS_HAPROXY_NAME"
|
||||
)
|
||||
|
||||
// Config Management Plugin related constants
|
||||
@@ -268,6 +293,16 @@ const (
|
||||
DefaultGitRetryFactor = int64(2)
|
||||
)
|
||||
|
||||
// Constants represent the pod selector labels of the Argo CD component names. These values are determined by the
|
||||
// installation manifests.
|
||||
const (
|
||||
DefaultServerName = "argocd-server"
|
||||
DefaultRepoServerName = "argocd-repo-server"
|
||||
DefaultApplicationControllerName = "argocd-application-controller"
|
||||
DefaultRedisName = "argocd-redis"
|
||||
DefaultRedisHaProxyName = "argocd-redis-ha-haproxy"
|
||||
)
|
||||
|
||||
// GetGnuPGHomePath retrieves the path to use for GnuPG home directory, which is either taken from GNUPGHOME environment or a default value
|
||||
func GetGnuPGHomePath() string {
|
||||
if gnuPgHome := os.Getenv(EnvGnuPGHome); gnuPgHome == "" {
|
||||
|
||||
@@ -3,6 +3,7 @@ package controller
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
goerrors "errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"net/http"
|
||||
@@ -34,6 +35,8 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/informers"
|
||||
informerv1 "k8s.io/client-go/informers/apps/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
@@ -51,6 +54,7 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/reposerver/apiclient"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo"
|
||||
argodiff "github.com/argoproj/argo-cd/v2/util/argo/diff"
|
||||
"github.com/argoproj/argo-cd/v2/util/env"
|
||||
|
||||
appstatecache "github.com/argoproj/argo-cd/v2/util/cache/appstate"
|
||||
"github.com/argoproj/argo-cd/v2/util/db"
|
||||
@@ -59,10 +63,12 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/util/helm"
|
||||
logutils "github.com/argoproj/argo-cd/v2/util/log"
|
||||
settings_util "github.com/argoproj/argo-cd/v2/util/settings"
|
||||
kubeerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
updateOperationStateTimeout = 1 * time.Second
|
||||
updateOperationStateTimeout = 1 * time.Second
|
||||
defaultDeploymentInformerResyncDuration = 10 * time.Second
|
||||
// orphanedIndex contains application which monitor orphaned resources by namespace
|
||||
orphanedIndex = "orphaned"
|
||||
)
|
||||
@@ -105,6 +111,7 @@ type ApplicationController struct {
|
||||
appInformer cache.SharedIndexInformer
|
||||
appLister applisters.ApplicationLister
|
||||
projInformer cache.SharedIndexInformer
|
||||
deploymentInformer informerv1.DeploymentInformer
|
||||
appStateManager AppStateManager
|
||||
stateCache statecache.LiveStateCache
|
||||
statusRefreshTimeout time.Duration
|
||||
@@ -160,7 +167,7 @@ func NewApplicationController(
|
||||
statusHardRefreshTimeout: appHardResyncPeriod,
|
||||
refreshRequestedApps: make(map[string]CompareWith),
|
||||
refreshRequestedAppsMutex: &sync.Mutex{},
|
||||
auditLogger: argo.NewAuditLogger(namespace, kubeClientset, "argocd-application-controller"),
|
||||
auditLogger: argo.NewAuditLogger(namespace, kubeClientset, common.ApplicationController),
|
||||
settingsMgr: settingsMgr,
|
||||
selfHealTimeout: selfHealTimeout,
|
||||
clusterFilter: clusterFilter,
|
||||
@@ -201,11 +208,35 @@ func NewApplicationController(
|
||||
}
|
||||
},
|
||||
})
|
||||
|
||||
factory := informers.NewSharedInformerFactoryWithOptions(ctrl.kubeClientset, defaultDeploymentInformerResyncDuration, informers.WithNamespace(settingsMgr.GetNamespace()))
|
||||
deploymentInformer := factory.Apps().V1().Deployments()
|
||||
|
||||
readinessHealthCheck := func(r *http.Request) error {
|
||||
applicationControllerName := env.StringFromEnv(common.EnvAppControllerName, common.DefaultApplicationControllerName)
|
||||
appControllerDeployment, err := deploymentInformer.Lister().Deployments(settingsMgr.GetNamespace()).Get(applicationControllerName)
|
||||
if err != nil {
|
||||
if kubeerrors.IsNotFound(err) {
|
||||
appControllerDeployment = nil
|
||||
} else {
|
||||
return fmt.Errorf("error retrieving Application Controller Deployment: %s", err)
|
||||
}
|
||||
}
|
||||
if appControllerDeployment != nil {
|
||||
if appControllerDeployment.Spec.Replicas != nil && int(*appControllerDeployment.Spec.Replicas) <= 0 {
|
||||
return fmt.Errorf("application controller deployment replicas is not set or is less than 0, replicas: %d", appControllerDeployment.Spec.Replicas)
|
||||
}
|
||||
shard := env.ParseNumFromEnv(common.EnvControllerShard, -1, -math.MaxInt32, math.MaxInt32)
|
||||
if _, err := sharding.GetOrUpdateShardFromConfigMap(kubeClientset.(*kubernetes.Clientset), settingsMgr, int(*appControllerDeployment.Spec.Replicas), shard); err != nil {
|
||||
return fmt.Errorf("error while updating the heartbeat for to the Shard Mapping ConfigMap: %s", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
metricsAddr := fmt.Sprintf("0.0.0.0:%d", metricsPort)
|
||||
var err error
|
||||
ctrl.metricsServer, err = metrics.NewMetricsServer(metricsAddr, appLister, ctrl.canProcessApp, func(r *http.Request) error {
|
||||
return nil
|
||||
}, metricsApplicationLabels)
|
||||
ctrl.metricsServer, err = metrics.NewMetricsServer(metricsAddr, appLister, ctrl.canProcessApp, readinessHealthCheck, metricsApplicationLabels)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -220,6 +251,7 @@ func NewApplicationController(
|
||||
ctrl.appInformer = appInformer
|
||||
ctrl.appLister = appLister
|
||||
ctrl.projInformer = projInformer
|
||||
ctrl.deploymentInformer = deploymentInformer
|
||||
ctrl.appStateManager = appStateManager
|
||||
ctrl.stateCache = stateCache
|
||||
|
||||
@@ -364,12 +396,14 @@ func (ctrl *ApplicationController) handleObjectUpdated(managedByApp map[string]b
|
||||
namespace = "(cluster-scoped)"
|
||||
}
|
||||
log.WithFields(log.Fields{
|
||||
"application": appKey,
|
||||
"level": level,
|
||||
"namespace": namespace,
|
||||
"name": ref.Name,
|
||||
"api-version": ref.APIVersion,
|
||||
"kind": ref.Kind,
|
||||
"application": appKey,
|
||||
"level": level,
|
||||
"namespace": namespace,
|
||||
"name": ref.Name,
|
||||
"api-version": ref.APIVersion,
|
||||
"kind": ref.Kind,
|
||||
"server": app.Spec.Destination.Server,
|
||||
"cluster-name": app.Spec.Destination.Name,
|
||||
}).Debug("Requesting app refresh caused by object update")
|
||||
|
||||
ctrl.requestAppRefresh(app.QualifiedName(), &level, nil)
|
||||
@@ -722,6 +756,7 @@ func (ctrl *ApplicationController) Run(ctx context.Context, statusProcessors int
|
||||
|
||||
go ctrl.appInformer.Run(ctx.Done())
|
||||
go ctrl.projInformer.Run(ctx.Done())
|
||||
go ctrl.deploymentInformer.Informer().Run(ctx.Done())
|
||||
|
||||
errors.CheckError(ctrl.stateCache.Init())
|
||||
|
||||
@@ -1246,40 +1281,44 @@ func (ctrl *ApplicationController) processRequestedAppOperation(app *appv1.Appli
|
||||
}
|
||||
|
||||
func (ctrl *ApplicationController) setOperationState(app *appv1.Application, state *appv1.OperationState) {
|
||||
kube.RetryUntilSucceed(context.Background(), updateOperationStateTimeout, "Update application operation state", logutils.NewLogrusLogger(logutils.NewWithCurrentConfig()), func() error {
|
||||
if state.Phase == "" {
|
||||
// expose any bugs where we neglect to set phase
|
||||
panic("no phase was set")
|
||||
}
|
||||
if state.Phase.Completed() {
|
||||
now := metav1.Now()
|
||||
state.FinishedAt = &now
|
||||
}
|
||||
patch := map[string]interface{}{
|
||||
"status": map[string]interface{}{
|
||||
"operationState": state,
|
||||
},
|
||||
}
|
||||
if state.Phase.Completed() {
|
||||
// If operation is completed, clear the operation field to indicate no operation is
|
||||
// in progress.
|
||||
patch["operation"] = nil
|
||||
}
|
||||
if reflect.DeepEqual(app.Status.OperationState, state) {
|
||||
log.Infof("No operation updates necessary to '%s'. Skipping patch", app.QualifiedName())
|
||||
return nil
|
||||
}
|
||||
patchJSON, err := json.Marshal(patch)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error marshaling json: %w", err)
|
||||
}
|
||||
if app.Status.OperationState != nil && app.Status.OperationState.FinishedAt != nil && state.FinishedAt == nil {
|
||||
patchJSON, err = jsonpatch.MergeMergePatches(patchJSON, []byte(`{"status": {"operationState": {"finishedAt": null}}}`))
|
||||
if err != nil {
|
||||
return fmt.Errorf("error merging operation state patch: %w", err)
|
||||
}
|
||||
}
|
||||
logCtx := log.WithFields(log.Fields{"application": app.Name, "appNamespace": app.Namespace, "project": app.Spec.Project})
|
||||
|
||||
if state.Phase == "" {
|
||||
// expose any bugs where we neglect to set phase
|
||||
panic("no phase was set")
|
||||
}
|
||||
if state.Phase.Completed() {
|
||||
now := metav1.Now()
|
||||
state.FinishedAt = &now
|
||||
}
|
||||
patch := map[string]interface{}{
|
||||
"status": map[string]interface{}{
|
||||
"operationState": state,
|
||||
},
|
||||
}
|
||||
if state.Phase.Completed() {
|
||||
// If operation is completed, clear the operation field to indicate no operation is
|
||||
// in progress.
|
||||
patch["operation"] = nil
|
||||
}
|
||||
if reflect.DeepEqual(app.Status.OperationState, state) {
|
||||
logCtx.Infof("No operation updates necessary to '%s'. Skipping patch", app.QualifiedName())
|
||||
return
|
||||
}
|
||||
patchJSON, err := json.Marshal(patch)
|
||||
if err != nil {
|
||||
logCtx.Errorf("error marshaling json: %v", err)
|
||||
return
|
||||
}
|
||||
if app.Status.OperationState != nil && app.Status.OperationState.FinishedAt != nil && state.FinishedAt == nil {
|
||||
patchJSON, err = jsonpatch.MergeMergePatches(patchJSON, []byte(`{"status": {"operationState": {"finishedAt": null}}}`))
|
||||
if err != nil {
|
||||
logCtx.Errorf("error merging operation state patch: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
kube.RetryUntilSucceed(context.Background(), updateOperationStateTimeout, "Update application operation state", logutils.NewLogrusLogger(logutils.NewWithCurrentConfig()), func() error {
|
||||
appClient := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(app.Namespace)
|
||||
_, err = appClient.Patch(context.Background(), app.Name, types.MergePatchType, patchJSON, metav1.PatchOptions{})
|
||||
if err != nil {
|
||||
@@ -1287,35 +1326,41 @@ func (ctrl *ApplicationController) setOperationState(app *appv1.Application, sta
|
||||
if apierr.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
// kube.RetryUntilSucceed logs failed attempts at "debug" level, but we want to know if this fails. Log a
|
||||
// warning.
|
||||
logCtx.Warnf("error patching application with operation state: %v", err)
|
||||
return fmt.Errorf("error patching application with operation state: %w", err)
|
||||
}
|
||||
log.Infof("updated '%s' operation (phase: %s)", app.QualifiedName(), state.Phase)
|
||||
if state.Phase.Completed() {
|
||||
eventInfo := argo.EventInfo{Reason: argo.EventReasonOperationCompleted}
|
||||
var messages []string
|
||||
if state.Operation.Sync != nil && len(state.Operation.Sync.Resources) > 0 {
|
||||
messages = []string{"Partial sync operation"}
|
||||
} else {
|
||||
messages = []string{"Sync operation"}
|
||||
}
|
||||
if state.SyncResult != nil {
|
||||
messages = append(messages, "to", state.SyncResult.Revision)
|
||||
}
|
||||
if state.Phase.Successful() {
|
||||
eventInfo.Type = v1.EventTypeNormal
|
||||
messages = append(messages, "succeeded")
|
||||
} else {
|
||||
eventInfo.Type = v1.EventTypeWarning
|
||||
messages = append(messages, "failed:", state.Message)
|
||||
}
|
||||
ctrl.auditLogger.LogAppEvent(app, eventInfo, strings.Join(messages, " "), "")
|
||||
ctrl.metricsServer.IncSync(app, state)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
logCtx.Infof("updated '%s' operation (phase: %s)", app.QualifiedName(), state.Phase)
|
||||
if state.Phase.Completed() {
|
||||
eventInfo := argo.EventInfo{Reason: argo.EventReasonOperationCompleted}
|
||||
var messages []string
|
||||
if state.Operation.Sync != nil && len(state.Operation.Sync.Resources) > 0 {
|
||||
messages = []string{"Partial sync operation"}
|
||||
} else {
|
||||
messages = []string{"Sync operation"}
|
||||
}
|
||||
if state.SyncResult != nil {
|
||||
messages = append(messages, "to", state.SyncResult.Revision)
|
||||
}
|
||||
if state.Phase.Successful() {
|
||||
eventInfo.Type = v1.EventTypeNormal
|
||||
messages = append(messages, "succeeded")
|
||||
} else {
|
||||
eventInfo.Type = v1.EventTypeWarning
|
||||
messages = append(messages, "failed:", state.Message)
|
||||
}
|
||||
ctrl.auditLogger.LogAppEvent(app, eventInfo, strings.Join(messages, " "), "")
|
||||
ctrl.metricsServer.IncSync(app, state)
|
||||
}
|
||||
}
|
||||
|
||||
func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext bool) {
|
||||
patchMs := time.Duration(0) // time spent in doing patch/update calls
|
||||
setOpMs := time.Duration(0) // time spent in doing Operation patch calls in autosync
|
||||
appKey, shutdown := ctrl.appRefreshQueue.Get()
|
||||
if shutdown {
|
||||
processNext = false
|
||||
@@ -1357,6 +1402,8 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo
|
||||
ctrl.metricsServer.IncReconcile(origApp, reconcileDuration)
|
||||
logCtx.WithFields(log.Fields{
|
||||
"time_ms": reconcileDuration.Milliseconds(),
|
||||
"patch_ms": patchMs.Milliseconds(),
|
||||
"setop_ms": setOpMs.Milliseconds(),
|
||||
"level": comparisonLevel,
|
||||
"dest-server": origApp.Spec.Destination.Server,
|
||||
"dest-name": origApp.Spec.Destination.Name,
|
||||
@@ -1378,7 +1425,7 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo
|
||||
}
|
||||
}
|
||||
|
||||
ctrl.persistAppStatus(origApp, &app.Status)
|
||||
patchMs = ctrl.persistAppStatus(origApp, &app.Status)
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -1387,7 +1434,7 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo
|
||||
if hasErrors {
|
||||
app.Status.Sync.Status = appv1.SyncStatusCodeUnknown
|
||||
app.Status.Health.Status = health.HealthStatusUnknown
|
||||
ctrl.persistAppStatus(origApp, &app.Status)
|
||||
patchMs = ctrl.persistAppStatus(origApp, &app.Status)
|
||||
|
||||
if err := ctrl.cache.SetAppResourcesTree(app.InstanceName(ctrl.namespace), &appv1.ApplicationTree{}); err != nil {
|
||||
log.Warnf("failed to set app resource tree: %v", err)
|
||||
@@ -1449,7 +1496,8 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo
|
||||
}
|
||||
|
||||
if project.Spec.SyncWindows.Matches(app).CanSync(false) {
|
||||
syncErrCond := ctrl.autoSync(app, compareResult.syncStatus, compareResult.resources)
|
||||
syncErrCond, opMS := ctrl.autoSync(app, compareResult.syncStatus, compareResult.resources)
|
||||
setOpMs = opMS
|
||||
if syncErrCond != nil {
|
||||
app.Status.SetConditions(
|
||||
[]appv1.ApplicationCondition{*syncErrCond},
|
||||
@@ -1477,7 +1525,7 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo
|
||||
app.Status.SourceType = compareResult.appSourceType
|
||||
app.Status.SourceTypes = compareResult.appSourceTypes
|
||||
app.Status.ControllerNamespace = ctrl.namespace
|
||||
ctrl.persistAppStatus(origApp, &app.Status)
|
||||
patchMs = ctrl.persistAppStatus(origApp, &app.Status)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1533,6 +1581,8 @@ func (ctrl *ApplicationController) needRefreshAppStatus(app *appv1.Application,
|
||||
reason = "spec.destination differs"
|
||||
} else if app.HasChangedManagedNamespaceMetadata() {
|
||||
reason = "spec.syncPolicy.managedNamespaceMetadata differs"
|
||||
} else if !app.Spec.IgnoreDifferences.Equals(app.Status.Sync.ComparedTo.IgnoreDifferences) {
|
||||
reason = "spec.ignoreDifferences differs"
|
||||
} else if requested, level := ctrl.isRefreshRequested(app.QualifiedName()); requested {
|
||||
compareWith = level
|
||||
reason = "controller refresh requested"
|
||||
@@ -1590,7 +1640,7 @@ func (ctrl *ApplicationController) normalizeApplication(orig, app *appv1.Applica
|
||||
}
|
||||
|
||||
// persistAppStatus persists updates to application status. If no changes were made, it is a no-op
|
||||
func (ctrl *ApplicationController) persistAppStatus(orig *appv1.Application, newStatus *appv1.ApplicationStatus) {
|
||||
func (ctrl *ApplicationController) persistAppStatus(orig *appv1.Application, newStatus *appv1.ApplicationStatus) (patchMs time.Duration) {
|
||||
logCtx := log.WithFields(log.Fields{"application": orig.QualifiedName()})
|
||||
if orig.Status.Sync.Status != newStatus.Sync.Status {
|
||||
message := fmt.Sprintf("Updated sync status: %s -> %s", orig.Status.Sync.Status, newStatus.Sync.Status)
|
||||
@@ -1619,6 +1669,11 @@ func (ctrl *ApplicationController) persistAppStatus(orig *appv1.Application, new
|
||||
logCtx.Infof("No status changes. Skipping patch")
|
||||
return
|
||||
}
|
||||
// calculate time for path call
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
patchMs = time.Since(start)
|
||||
}()
|
||||
appClient := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(orig.Namespace)
|
||||
_, err = appClient.Patch(context.Background(), orig.Name, types.MergePatchType, patch, metav1.PatchOptions{})
|
||||
if err != nil {
|
||||
@@ -1626,29 +1681,30 @@ func (ctrl *ApplicationController) persistAppStatus(orig *appv1.Application, new
|
||||
} else {
|
||||
logCtx.Infof("Update successful")
|
||||
}
|
||||
return patchMs
|
||||
}
|
||||
|
||||
// autoSync will initiate a sync operation for an application configured with automated sync
|
||||
func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus *appv1.SyncStatus, resources []appv1.ResourceStatus) *appv1.ApplicationCondition {
|
||||
func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus *appv1.SyncStatus, resources []appv1.ResourceStatus) (*appv1.ApplicationCondition, time.Duration) {
|
||||
if app.Spec.SyncPolicy == nil || app.Spec.SyncPolicy.Automated == nil {
|
||||
return nil
|
||||
return nil, 0
|
||||
}
|
||||
logCtx := log.WithFields(log.Fields{"application": app.QualifiedName()})
|
||||
|
||||
if app.Operation != nil {
|
||||
logCtx.Infof("Skipping auto-sync: another operation is in progress")
|
||||
return nil
|
||||
return nil, 0
|
||||
}
|
||||
if app.DeletionTimestamp != nil && !app.DeletionTimestamp.IsZero() {
|
||||
logCtx.Infof("Skipping auto-sync: deletion in progress")
|
||||
return nil
|
||||
return nil, 0
|
||||
}
|
||||
|
||||
// Only perform auto-sync if we detect OutOfSync status. This is to prevent us from attempting
|
||||
// a sync when application is already in a Synced or Unknown state
|
||||
if syncStatus.Status != appv1.SyncStatusCodeOutOfSync {
|
||||
logCtx.Infof("Skipping auto-sync: application status is %s", syncStatus.Status)
|
||||
return nil
|
||||
return nil, 0
|
||||
}
|
||||
|
||||
if !app.Spec.SyncPolicy.Automated.Prune {
|
||||
@@ -1661,7 +1717,7 @@ func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus *
|
||||
}
|
||||
if requirePruneOnly {
|
||||
logCtx.Infof("Skipping auto-sync: need to prune extra resources only but automated prune is disabled")
|
||||
return nil
|
||||
return nil, 0
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1690,10 +1746,10 @@ func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus *
|
||||
if !attemptPhase.Successful() {
|
||||
logCtx.Warnf("Skipping auto-sync: failed previous sync attempt to %s", desiredCommitSHA)
|
||||
message := fmt.Sprintf("Failed sync attempt to %s: %s", desiredCommitSHA, app.Status.OperationState.Message)
|
||||
return &appv1.ApplicationCondition{Type: appv1.ApplicationConditionSyncError, Message: message}
|
||||
return &appv1.ApplicationCondition{Type: appv1.ApplicationConditionSyncError, Message: message}, 0
|
||||
}
|
||||
logCtx.Infof("Skipping auto-sync: most recent sync already to %s", desiredCommitSHA)
|
||||
return nil
|
||||
return nil, 0
|
||||
} else if alreadyAttempted && selfHeal {
|
||||
if shouldSelfHeal, retryAfter := ctrl.shouldSelfHeal(app); shouldSelfHeal {
|
||||
for _, resource := range resources {
|
||||
@@ -1708,7 +1764,7 @@ func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus *
|
||||
} else {
|
||||
logCtx.Infof("Skipping auto-sync: already attempted sync to %s with timeout %v (retrying in %v)", desiredCommitSHA, ctrl.selfHealTimeout, retryAfter)
|
||||
ctrl.requestAppRefresh(app.QualifiedName(), CompareWithLatest.Pointer(), &retryAfter)
|
||||
return nil
|
||||
return nil, 0
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1723,19 +1779,29 @@ func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus *
|
||||
if bAllNeedPrune {
|
||||
message := fmt.Sprintf("Skipping sync attempt to %s: auto-sync will wipe out all resources", desiredCommitSHA)
|
||||
logCtx.Warnf(message)
|
||||
return &appv1.ApplicationCondition{Type: appv1.ApplicationConditionSyncError, Message: message}
|
||||
return &appv1.ApplicationCondition{Type: appv1.ApplicationConditionSyncError, Message: message}, 0
|
||||
}
|
||||
}
|
||||
|
||||
appIf := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(app.Namespace)
|
||||
start := time.Now()
|
||||
_, err := argo.SetAppOperation(appIf, app.Name, &op)
|
||||
setOpTime := time.Since(start)
|
||||
if err != nil {
|
||||
if goerrors.Is(err, argo.ErrAnotherOperationInProgress) {
|
||||
// skipping auto-sync because another operation is in progress and was not noticed due to stale data in informer
|
||||
// it is safe to skip auto-sync because it is already running
|
||||
logCtx.Warnf("Failed to initiate auto-sync to %s: %v", desiredCommitSHA, err)
|
||||
return nil, 0
|
||||
}
|
||||
|
||||
logCtx.Errorf("Failed to initiate auto-sync to %s: %v", desiredCommitSHA, err)
|
||||
return &appv1.ApplicationCondition{Type: appv1.ApplicationConditionSyncError, Message: err.Error()}
|
||||
return &appv1.ApplicationCondition{Type: appv1.ApplicationConditionSyncError, Message: err.Error()}, setOpTime
|
||||
}
|
||||
message := fmt.Sprintf("Initiated automated sync to '%s'", desiredCommitSHA)
|
||||
ctrl.auditLogger.LogAppEvent(app, argo.EventInfo{Reason: argo.EventReasonOperationStarted, Type: v1.EventTypeNormal}, message, "")
|
||||
logCtx.Info(message)
|
||||
return nil
|
||||
return nil, setOpTime
|
||||
}
|
||||
|
||||
// alreadyAttemptedSync returns whether the most recent sync was performed against the
|
||||
@@ -1791,6 +1857,12 @@ func (ctrl *ApplicationController) shouldSelfHeal(app *appv1.Application) (bool,
|
||||
return retryAfter <= 0, retryAfter
|
||||
}
|
||||
|
||||
// isAppNamespaceAllowed returns whether the application is allowed in the
|
||||
// namespace it's residing in.
|
||||
func (ctrl *ApplicationController) isAppNamespaceAllowed(app *appv1.Application) bool {
|
||||
return app.Namespace == ctrl.namespace || glob.MatchStringInList(ctrl.applicationNamespaces, app.Namespace, false)
|
||||
}
|
||||
|
||||
func (ctrl *ApplicationController) canProcessApp(obj interface{}) bool {
|
||||
app, ok := obj.(*appv1.Application)
|
||||
if !ok {
|
||||
@@ -1799,7 +1871,7 @@ func (ctrl *ApplicationController) canProcessApp(obj interface{}) bool {
|
||||
|
||||
// Only process given app if it exists in a watched namespace, or in the
|
||||
// control plane's namespace.
|
||||
if app.Namespace != ctrl.namespace && !glob.MatchStringInList(ctrl.applicationNamespaces, app.Namespace, false) {
|
||||
if !ctrl.isAppNamespaceAllowed(app) {
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -1850,7 +1922,7 @@ func (ctrl *ApplicationController) newApplicationInformerAndLister() (cache.Shar
|
||||
}
|
||||
newItems := []appv1.Application{}
|
||||
for _, app := range appList.Items {
|
||||
if ctrl.namespace == app.Namespace || glob.MatchStringInList(ctrl.applicationNamespaces, app.Namespace, false) {
|
||||
if ctrl.isAppNamespaceAllowed(&app) {
|
||||
newItems = append(newItems, app)
|
||||
}
|
||||
}
|
||||
@@ -1867,20 +1939,24 @@ func (ctrl *ApplicationController) newApplicationInformerAndLister() (cache.Shar
|
||||
cache.NamespaceIndex: func(obj interface{}) ([]string, error) {
|
||||
app, ok := obj.(*appv1.Application)
|
||||
if ok {
|
||||
// This call to 'ValidateDestination' ensures that the .spec.destination field of all Applications
|
||||
// returned by the informer/lister will have server field set (if not already set) based on the name.
|
||||
// (or, if not found, an error app condition)
|
||||
// We only generally work with applications that are in one
|
||||
// the allowed namespaces.
|
||||
if ctrl.isAppNamespaceAllowed(app) {
|
||||
// If the application is not allowed to use the project,
|
||||
// log an error.
|
||||
if _, err := ctrl.getAppProj(app); err != nil {
|
||||
ctrl.setAppCondition(app, ctrl.projectErrorToCondition(err, app))
|
||||
} else {
|
||||
// This call to 'ValidateDestination' ensures that the .spec.destination field of all Applications
|
||||
// returned by the informer/lister will have server field set (if not already set) based on the name.
|
||||
// (or, if not found, an error app condition)
|
||||
|
||||
// If the server field is not set, set it based on the cluster name; if the cluster name can't be found,
|
||||
// log an error as an App Condition.
|
||||
if err := argo.ValidateDestination(context.Background(), &app.Spec.Destination, ctrl.db); err != nil {
|
||||
ctrl.setAppCondition(app, appv1.ApplicationCondition{Type: appv1.ApplicationConditionInvalidSpecError, Message: err.Error()})
|
||||
}
|
||||
|
||||
// If the application is not allowed to use the project,
|
||||
// log an error.
|
||||
if _, err := ctrl.getAppProj(app); err != nil {
|
||||
ctrl.setAppCondition(app, ctrl.projectErrorToCondition(err, app))
|
||||
// If the server field is not set, set it based on the cluster name; if the cluster name can't be found,
|
||||
// log an error as an App Condition.
|
||||
if err := argo.ValidateDestination(context.Background(), &app.Spec.Destination, ctrl.db); err != nil {
|
||||
ctrl.setAppCondition(app, appv1.ApplicationCondition{Type: appv1.ApplicationConditionInvalidSpecError, Message: err.Error()})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1892,7 +1968,11 @@ func (ctrl *ApplicationController) newApplicationInformerAndLister() (cache.Shar
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
proj, err := applisters.NewAppProjectLister(ctrl.projInformer.GetIndexer()).AppProjects(ctrl.namespace).Get(app.Spec.GetProject())
|
||||
if !ctrl.isAppNamespaceAllowed(app) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
proj, err := ctrl.getAppProj(app)
|
||||
if err != nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@@ -3,9 +3,11 @@ package controller
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
|
||||
clustercache "github.com/argoproj/gitops-engine/pkg/cache"
|
||||
@@ -366,7 +368,7 @@ func TestAutoSync(t *testing.T) {
|
||||
Status: v1alpha1.SyncStatusCodeOutOfSync,
|
||||
Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
|
||||
}
|
||||
cond := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{{Name: "guestbook", Kind: kube.DeploymentKind, Status: v1alpha1.SyncStatusCodeOutOfSync}})
|
||||
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{{Name: "guestbook", Kind: kube.DeploymentKind, Status: v1alpha1.SyncStatusCodeOutOfSync}})
|
||||
assert.Nil(t, cond)
|
||||
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get(context.Background(), "my-app", metav1.GetOptions{})
|
||||
assert.NoError(t, err)
|
||||
@@ -383,7 +385,7 @@ func TestAutoSyncNotAllowEmpty(t *testing.T) {
|
||||
Status: v1alpha1.SyncStatusCodeOutOfSync,
|
||||
Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
|
||||
}
|
||||
cond := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{})
|
||||
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{})
|
||||
assert.NotNil(t, cond)
|
||||
}
|
||||
|
||||
@@ -396,7 +398,7 @@ func TestAutoSyncAllowEmpty(t *testing.T) {
|
||||
Status: v1alpha1.SyncStatusCodeOutOfSync,
|
||||
Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
|
||||
}
|
||||
cond := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{})
|
||||
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{})
|
||||
assert.Nil(t, cond)
|
||||
}
|
||||
|
||||
@@ -410,7 +412,7 @@ func TestSkipAutoSync(t *testing.T) {
|
||||
Status: v1alpha1.SyncStatusCodeOutOfSync,
|
||||
Revision: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
|
||||
}
|
||||
cond := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{})
|
||||
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{})
|
||||
assert.Nil(t, cond)
|
||||
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get(context.Background(), "my-app", metav1.GetOptions{})
|
||||
assert.NoError(t, err)
|
||||
@@ -425,7 +427,7 @@ func TestSkipAutoSync(t *testing.T) {
|
||||
Status: v1alpha1.SyncStatusCodeSynced,
|
||||
Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
|
||||
}
|
||||
cond := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{})
|
||||
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{})
|
||||
assert.Nil(t, cond)
|
||||
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get(context.Background(), "my-app", metav1.GetOptions{})
|
||||
assert.NoError(t, err)
|
||||
@@ -441,7 +443,7 @@ func TestSkipAutoSync(t *testing.T) {
|
||||
Status: v1alpha1.SyncStatusCodeOutOfSync,
|
||||
Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
|
||||
}
|
||||
cond := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{})
|
||||
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{})
|
||||
assert.Nil(t, cond)
|
||||
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get(context.Background(), "my-app", metav1.GetOptions{})
|
||||
assert.NoError(t, err)
|
||||
@@ -458,7 +460,7 @@ func TestSkipAutoSync(t *testing.T) {
|
||||
Status: v1alpha1.SyncStatusCodeOutOfSync,
|
||||
Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
|
||||
}
|
||||
cond := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{})
|
||||
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{})
|
||||
assert.Nil(t, cond)
|
||||
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get(context.Background(), "my-app", metav1.GetOptions{})
|
||||
assert.NoError(t, err)
|
||||
@@ -484,7 +486,7 @@ func TestSkipAutoSync(t *testing.T) {
|
||||
Status: v1alpha1.SyncStatusCodeOutOfSync,
|
||||
Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
|
||||
}
|
||||
cond := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{{Name: "guestbook", Kind: kube.DeploymentKind, Status: v1alpha1.SyncStatusCodeOutOfSync}})
|
||||
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{{Name: "guestbook", Kind: kube.DeploymentKind, Status: v1alpha1.SyncStatusCodeOutOfSync}})
|
||||
assert.NotNil(t, cond)
|
||||
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get(context.Background(), "my-app", metav1.GetOptions{})
|
||||
assert.NoError(t, err)
|
||||
@@ -498,7 +500,7 @@ func TestSkipAutoSync(t *testing.T) {
|
||||
Status: v1alpha1.SyncStatusCodeOutOfSync,
|
||||
Revision: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
|
||||
}
|
||||
cond := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{
|
||||
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{
|
||||
{Name: "guestbook", Kind: kube.DeploymentKind, Status: v1alpha1.SyncStatusCodeOutOfSync, RequiresPruning: true},
|
||||
})
|
||||
assert.Nil(t, cond)
|
||||
@@ -536,7 +538,7 @@ func TestAutoSyncIndicateError(t *testing.T) {
|
||||
Source: *app.Spec.Source.DeepCopy(),
|
||||
},
|
||||
}
|
||||
cond := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{{Name: "guestbook", Kind: kube.DeploymentKind, Status: v1alpha1.SyncStatusCodeOutOfSync}})
|
||||
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{{Name: "guestbook", Kind: kube.DeploymentKind, Status: v1alpha1.SyncStatusCodeOutOfSync}})
|
||||
assert.NotNil(t, cond)
|
||||
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get(context.Background(), "my-app", metav1.GetOptions{})
|
||||
assert.NoError(t, err)
|
||||
@@ -579,7 +581,7 @@ func TestAutoSyncParameterOverrides(t *testing.T) {
|
||||
Revision: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
|
||||
},
|
||||
}
|
||||
cond := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{{Name: "guestbook", Kind: kube.DeploymentKind, Status: v1alpha1.SyncStatusCodeOutOfSync}})
|
||||
cond, _ := ctrl.autoSync(app, &syncStatus, []v1alpha1.ResourceStatus{{Name: "guestbook", Kind: kube.DeploymentKind, Status: v1alpha1.SyncStatusCodeOutOfSync}})
|
||||
assert.Nil(t, cond)
|
||||
app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(test.FakeArgoCDNamespace).Get(context.Background(), "my-app", metav1.GetOptions{})
|
||||
assert.NoError(t, err)
|
||||
@@ -926,6 +928,41 @@ func TestSetOperationStateOnDeletedApp(t *testing.T) {
|
||||
assert.True(t, patched)
|
||||
}
|
||||
|
||||
type logHook struct {
|
||||
entries []logrus.Entry
|
||||
}
|
||||
|
||||
func (h *logHook) Levels() []logrus.Level {
|
||||
return []logrus.Level{logrus.WarnLevel}
|
||||
}
|
||||
|
||||
func (h *logHook) Fire(entry *logrus.Entry) error {
|
||||
h.entries = append(h.entries, *entry)
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestSetOperationStateLogRetries(t *testing.T) {
|
||||
hook := logHook{}
|
||||
logrus.AddHook(&hook)
|
||||
t.Cleanup(func() {
|
||||
logrus.StandardLogger().ReplaceHooks(logrus.LevelHooks{})
|
||||
})
|
||||
ctrl := newFakeController(&fakeData{apps: []runtime.Object{}})
|
||||
fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset)
|
||||
fakeAppCs.ReactionChain = nil
|
||||
patched := false
|
||||
fakeAppCs.AddReactor("patch", "*", func(action kubetesting.Action) (handled bool, ret runtime.Object, err error) {
|
||||
if !patched {
|
||||
patched = true
|
||||
return true, nil, errors.New("fake error")
|
||||
}
|
||||
return true, nil, nil
|
||||
})
|
||||
ctrl.setOperationState(newFakeApp(), &v1alpha1.OperationState{Phase: synccommon.OperationSucceeded})
|
||||
assert.True(t, patched)
|
||||
assert.Contains(t, hook.entries[0].Message, "fake error")
|
||||
}
|
||||
|
||||
func TestNeedRefreshAppStatus(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
@@ -943,7 +980,6 @@ func TestNeedRefreshAppStatus(t *testing.T) {
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
ctrl := newFakeController(&fakeData{apps: []runtime.Object{}})
|
||||
app := tc.app
|
||||
now := metav1.Now()
|
||||
app.Status.ReconciledAt = &now
|
||||
@@ -951,7 +987,8 @@ func TestNeedRefreshAppStatus(t *testing.T) {
|
||||
app.Status.Sync = v1alpha1.SyncStatus{
|
||||
Status: v1alpha1.SyncStatusCodeSynced,
|
||||
ComparedTo: v1alpha1.ComparedTo{
|
||||
Destination: app.Spec.Destination,
|
||||
Destination: app.Spec.Destination,
|
||||
IgnoreDifferences: app.Spec.IgnoreDifferences,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -961,44 +998,67 @@ func TestNeedRefreshAppStatus(t *testing.T) {
|
||||
app.Status.Sync.ComparedTo.Source = app.Spec.GetSource()
|
||||
}
|
||||
|
||||
// no need to refresh just reconciled application
|
||||
needRefresh, _, _ := ctrl.needRefreshAppStatus(app, 1*time.Hour, 2*time.Hour)
|
||||
assert.False(t, needRefresh)
|
||||
ctrl := newFakeController(&fakeData{apps: []runtime.Object{}})
|
||||
|
||||
// refresh app using the 'deepest' requested comparison level
|
||||
ctrl.requestAppRefresh(app.Name, CompareWithRecent.Pointer(), nil)
|
||||
ctrl.requestAppRefresh(app.Name, ComparisonWithNothing.Pointer(), nil)
|
||||
t.Run("no need to refresh just reconciled application", func(t *testing.T) {
|
||||
needRefresh, _, _ := ctrl.needRefreshAppStatus(app, 1*time.Hour, 2*time.Hour)
|
||||
assert.False(t, needRefresh)
|
||||
})
|
||||
|
||||
needRefresh, refreshType, compareWith := ctrl.needRefreshAppStatus(app, 1*time.Hour, 2*time.Hour)
|
||||
assert.True(t, needRefresh)
|
||||
assert.Equal(t, v1alpha1.RefreshTypeNormal, refreshType)
|
||||
assert.Equal(t, CompareWithRecent, compareWith)
|
||||
t.Run("requested refresh is respected", func(t *testing.T) {
|
||||
needRefresh, _, _ := ctrl.needRefreshAppStatus(app, 1*time.Hour, 2*time.Hour)
|
||||
assert.False(t, needRefresh)
|
||||
|
||||
// refresh application which status is not reconciled using latest commit
|
||||
app.Status.Sync = v1alpha1.SyncStatus{Status: v1alpha1.SyncStatusCodeUnknown}
|
||||
// use a one-off controller so other tests don't have a manual refresh request
|
||||
ctrl := newFakeController(&fakeData{apps: []runtime.Object{}})
|
||||
|
||||
needRefresh, refreshType, compareWith = ctrl.needRefreshAppStatus(app, 1*time.Hour, 2*time.Hour)
|
||||
assert.True(t, needRefresh)
|
||||
assert.Equal(t, v1alpha1.RefreshTypeNormal, refreshType)
|
||||
assert.Equal(t, CompareWithLatestForceResolve, compareWith)
|
||||
|
||||
t.Run("refresh app using the 'latest' level if comparison expired", func(t *testing.T) {
|
||||
app := app.DeepCopy()
|
||||
// refresh app using the 'deepest' requested comparison level
|
||||
ctrl.requestAppRefresh(app.Name, CompareWithRecent.Pointer(), nil)
|
||||
reconciledAt := metav1.NewTime(time.Now().UTC().Add(-1 * time.Hour))
|
||||
app.Status.ReconciledAt = &reconciledAt
|
||||
needRefresh, refreshType, compareWith = ctrl.needRefreshAppStatus(app, 1*time.Minute, 2*time.Hour)
|
||||
ctrl.requestAppRefresh(app.Name, ComparisonWithNothing.Pointer(), nil)
|
||||
|
||||
needRefresh, refreshType, compareWith := ctrl.needRefreshAppStatus(app, 1*time.Hour, 2*time.Hour)
|
||||
assert.True(t, needRefresh)
|
||||
assert.Equal(t, v1alpha1.RefreshTypeNormal, refreshType)
|
||||
assert.Equal(t, CompareWithRecent, compareWith)
|
||||
})
|
||||
|
||||
t.Run("refresh application which status is not reconciled using latest commit", func(t *testing.T) {
|
||||
app := app.DeepCopy()
|
||||
needRefresh, _, _ := ctrl.needRefreshAppStatus(app, 1*time.Hour, 2*time.Hour)
|
||||
assert.False(t, needRefresh)
|
||||
app.Status.Sync = v1alpha1.SyncStatus{Status: v1alpha1.SyncStatusCodeUnknown}
|
||||
|
||||
needRefresh, refreshType, compareWith := ctrl.needRefreshAppStatus(app, 1*time.Hour, 2*time.Hour)
|
||||
assert.True(t, needRefresh)
|
||||
assert.Equal(t, v1alpha1.RefreshTypeNormal, refreshType)
|
||||
assert.Equal(t, CompareWithLatestForceResolve, compareWith)
|
||||
})
|
||||
|
||||
t.Run("refresh app using the 'latest' level if comparison expired", func(t *testing.T) {
|
||||
app := app.DeepCopy()
|
||||
|
||||
// use a one-off controller so other tests don't have a manual refresh request
|
||||
ctrl := newFakeController(&fakeData{apps: []runtime.Object{}})
|
||||
|
||||
needRefresh, _, _ := ctrl.needRefreshAppStatus(app, 1*time.Hour, 2*time.Hour)
|
||||
assert.False(t, needRefresh)
|
||||
|
||||
ctrl.requestAppRefresh(app.Name, CompareWithRecent.Pointer(), nil)
|
||||
reconciledAt := metav1.NewTime(time.Now().UTC().Add(-1 * time.Hour))
|
||||
app.Status.ReconciledAt = &reconciledAt
|
||||
needRefresh, refreshType, compareWith := ctrl.needRefreshAppStatus(app, 1*time.Minute, 2*time.Hour)
|
||||
assert.True(t, needRefresh)
|
||||
assert.Equal(t, v1alpha1.RefreshTypeNormal, refreshType)
|
||||
assert.Equal(t, CompareWithLatest, compareWith)
|
||||
})
|
||||
|
||||
t.Run("refresh app using the 'latest' level if comparison expired for hard refresh", func(t *testing.T) {
|
||||
app := app.DeepCopy()
|
||||
app.Status.Sync = v1alpha1.SyncStatus{
|
||||
Status: v1alpha1.SyncStatusCodeSynced,
|
||||
ComparedTo: v1alpha1.ComparedTo{
|
||||
Destination: app.Spec.Destination,
|
||||
Destination: app.Spec.Destination,
|
||||
IgnoreDifferences: app.Spec.IgnoreDifferences,
|
||||
},
|
||||
}
|
||||
if app.Spec.HasMultipleSources() {
|
||||
@@ -1006,10 +1066,16 @@ func TestNeedRefreshAppStatus(t *testing.T) {
|
||||
} else {
|
||||
app.Status.Sync.ComparedTo.Source = app.Spec.GetSource()
|
||||
}
|
||||
|
||||
// use a one-off controller so other tests don't have a manual refresh request
|
||||
ctrl := newFakeController(&fakeData{apps: []runtime.Object{}})
|
||||
|
||||
needRefresh, _, _ := ctrl.needRefreshAppStatus(app, 1*time.Hour, 2*time.Hour)
|
||||
assert.False(t, needRefresh)
|
||||
ctrl.requestAppRefresh(app.Name, CompareWithRecent.Pointer(), nil)
|
||||
reconciledAt := metav1.NewTime(time.Now().UTC().Add(-1 * time.Hour))
|
||||
app.Status.ReconciledAt = &reconciledAt
|
||||
needRefresh, refreshType, compareWith = ctrl.needRefreshAppStatus(app, 2*time.Hour, 1*time.Minute)
|
||||
needRefresh, refreshType, compareWith := ctrl.needRefreshAppStatus(app, 2*time.Hour, 1*time.Minute)
|
||||
assert.True(t, needRefresh)
|
||||
assert.Equal(t, v1alpha1.RefreshTypeHard, refreshType)
|
||||
assert.Equal(t, CompareWithLatest, compareWith)
|
||||
@@ -1017,12 +1083,14 @@ func TestNeedRefreshAppStatus(t *testing.T) {
|
||||
|
||||
t.Run("execute hard refresh if app has refresh annotation", func(t *testing.T) {
|
||||
app := app.DeepCopy()
|
||||
needRefresh, _, _ := ctrl.needRefreshAppStatus(app, 1*time.Hour, 2*time.Hour)
|
||||
assert.False(t, needRefresh)
|
||||
reconciledAt := metav1.NewTime(time.Now().UTC().Add(-1 * time.Hour))
|
||||
app.Status.ReconciledAt = &reconciledAt
|
||||
app.Annotations = map[string]string{
|
||||
v1alpha1.AnnotationKeyRefresh: string(v1alpha1.RefreshTypeHard),
|
||||
}
|
||||
needRefresh, refreshType, compareWith = ctrl.needRefreshAppStatus(app, 1*time.Hour, 2*time.Hour)
|
||||
needRefresh, refreshType, compareWith := ctrl.needRefreshAppStatus(app, 1*time.Hour, 2*time.Hour)
|
||||
assert.True(t, needRefresh)
|
||||
assert.Equal(t, v1alpha1.RefreshTypeHard, refreshType)
|
||||
assert.Equal(t, CompareWithLatestForceResolve, compareWith)
|
||||
@@ -1030,7 +1098,8 @@ func TestNeedRefreshAppStatus(t *testing.T) {
|
||||
|
||||
t.Run("ensure that CompareWithLatest level is used if application source has changed", func(t *testing.T) {
|
||||
app := app.DeepCopy()
|
||||
ctrl.requestAppRefresh(app.Name, ComparisonWithNothing.Pointer(), nil)
|
||||
needRefresh, _, _ := ctrl.needRefreshAppStatus(app, 1*time.Hour, 2*time.Hour)
|
||||
assert.False(t, needRefresh)
|
||||
// sample app source change
|
||||
if app.Spec.HasMultipleSources() {
|
||||
app.Spec.Sources[0].Helm = &v1alpha1.ApplicationSourceHelm{
|
||||
@@ -1048,11 +1117,32 @@ func TestNeedRefreshAppStatus(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
needRefresh, refreshType, compareWith = ctrl.needRefreshAppStatus(app, 1*time.Hour, 2*time.Hour)
|
||||
needRefresh, refreshType, compareWith := ctrl.needRefreshAppStatus(app, 1*time.Hour, 2*time.Hour)
|
||||
assert.True(t, needRefresh)
|
||||
assert.Equal(t, v1alpha1.RefreshTypeNormal, refreshType)
|
||||
assert.Equal(t, CompareWithLatestForceResolve, compareWith)
|
||||
})
|
||||
|
||||
t.Run("ensure that CompareWithLatest level is used if ignored differences change", func(t *testing.T) {
|
||||
app := app.DeepCopy()
|
||||
needRefresh, _, _ := ctrl.needRefreshAppStatus(app, 1*time.Hour, 2*time.Hour)
|
||||
assert.False(t, needRefresh)
|
||||
|
||||
app.Spec.IgnoreDifferences = []v1alpha1.ResourceIgnoreDifferences{
|
||||
{
|
||||
Group: "apps",
|
||||
Kind: "Deployment",
|
||||
JSONPointers: []string{
|
||||
"/spec/template/spec/containers/0/image",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
needRefresh, refreshType, compareWith := ctrl.needRefreshAppStatus(app, 1*time.Hour, 2*time.Hour)
|
||||
assert.True(t, needRefresh)
|
||||
assert.Equal(t, v1alpha1.RefreshTypeNormal, refreshType)
|
||||
assert.Equal(t, CompareWithLatest, compareWith)
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1163,7 +1253,7 @@ func TestUpdateReconciledAt(t *testing.T) {
|
||||
app := newFakeApp()
|
||||
reconciledAt := metav1.NewTime(time.Now().Add(-1 * time.Second))
|
||||
app.Status = v1alpha1.ApplicationStatus{ReconciledAt: &reconciledAt}
|
||||
app.Status.Sync = v1alpha1.SyncStatus{ComparedTo: v1alpha1.ComparedTo{Source: app.Spec.GetSource(), Destination: app.Spec.Destination}}
|
||||
app.Status.Sync = v1alpha1.SyncStatus{ComparedTo: v1alpha1.ComparedTo{Source: app.Spec.GetSource(), Destination: app.Spec.Destination, IgnoreDifferences: app.Spec.IgnoreDifferences}}
|
||||
ctrl := newFakeController(&fakeData{
|
||||
apps: []runtime.Object{app, &defaultProj},
|
||||
manifestResponse: &apiclient.ManifestResponse{
|
||||
|
||||
28
controller/cache/cache.go
vendored
28
controller/cache/cache.go
vendored
@@ -52,6 +52,9 @@ const (
|
||||
// EnvClusterCacheListPageSize is the env variable to control size of the list page size when making K8s queries
|
||||
EnvClusterCacheListPageSize = "ARGOCD_CLUSTER_CACHE_LIST_PAGE_SIZE"
|
||||
|
||||
// EnvClusterCacheListPageBufferSize is the env variable to control the number of pages to buffer when making a K8s query to list resources
|
||||
EnvClusterCacheListPageBufferSize = "ARGOCD_CLUSTER_CACHE_LIST_PAGE_BUFFER_SIZE"
|
||||
|
||||
// EnvClusterCacheListSemaphore is the env variable to control size of the list semaphore
|
||||
// This is used to limit the number of concurrent memory consuming operations on the
|
||||
// k8s list queries results across all clusters to avoid memory spikes during cache initialization.
|
||||
@@ -84,6 +87,9 @@ var (
|
||||
// 500 is equal to kubectl's size
|
||||
clusterCacheListPageSize int64 = 500
|
||||
|
||||
// clusterCacheListPageBufferSize is the number of pages to buffer when performing K8s list requests
|
||||
clusterCacheListPageBufferSize int32 = 1
|
||||
|
||||
// clusterCacheRetryLimit sets a retry limit for failed requests during cluster cache sync
|
||||
// If set to 1, retries are disabled.
|
||||
clusterCacheAttemptLimit int32 = 1
|
||||
@@ -97,8 +103,9 @@ func init() {
|
||||
clusterCacheWatchResyncDuration = env.ParseDurationFromEnv(EnvClusterCacheWatchResyncDuration, clusterCacheWatchResyncDuration, 0, math.MaxInt64)
|
||||
clusterSyncRetryTimeoutDuration = env.ParseDurationFromEnv(EnvClusterSyncRetryTimeoutDuration, clusterSyncRetryTimeoutDuration, 0, math.MaxInt64)
|
||||
clusterCacheListPageSize = env.ParseInt64FromEnv(EnvClusterCacheListPageSize, clusterCacheListPageSize, 0, math.MaxInt64)
|
||||
clusterCacheListPageBufferSize = int32(env.ParseNumFromEnv(EnvClusterCacheListPageBufferSize, int(clusterCacheListPageBufferSize), 1, math.MaxInt32))
|
||||
clusterCacheListSemaphoreSize = env.ParseInt64FromEnv(EnvClusterCacheListSemaphore, clusterCacheListSemaphoreSize, 0, math.MaxInt64)
|
||||
clusterCacheAttemptLimit = int32(env.ParseInt64FromEnv(EnvClusterCacheAttemptLimit, 1, 1, math.MaxInt32))
|
||||
clusterCacheAttemptLimit = int32(env.ParseNumFromEnv(EnvClusterCacheAttemptLimit, int(clusterCacheAttemptLimit), 1, math.MaxInt32))
|
||||
clusterCacheRetryUseBackoff = env.ParseBoolFromEnv(EnvClusterCacheRetryUseBackoff, false)
|
||||
}
|
||||
|
||||
@@ -433,6 +440,11 @@ func (c *liveStateCache) getCluster(server string) (clustercache.ClusterCache, e
|
||||
return nil, fmt.Errorf("error getting custom label: %w", err)
|
||||
}
|
||||
|
||||
respectRBAC, err := c.settingsMgr.RespectRBAC()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error getting value for %v: %w", settings.RespectRBAC, err)
|
||||
}
|
||||
|
||||
clusterCacheConfig := cluster.RESTConfig()
|
||||
// Controller dynamically fetches all resource types available on the cluster
|
||||
// using a discovery API that may contain deprecated APIs.
|
||||
@@ -450,6 +462,7 @@ func (c *liveStateCache) getCluster(server string) (clustercache.ClusterCache, e
|
||||
clusterCacheOpts := []clustercache.UpdateSettingsFunc{
|
||||
clustercache.SetListSemaphore(semaphore.NewWeighted(clusterCacheListSemaphoreSize)),
|
||||
clustercache.SetListPageSize(clusterCacheListPageSize),
|
||||
clustercache.SetListPageBufferSize(clusterCacheListPageBufferSize),
|
||||
clustercache.SetWatchResyncTimeout(clusterCacheWatchResyncDuration),
|
||||
clustercache.SetClusterSyncRetryTimeout(clusterSyncRetryTimeoutDuration),
|
||||
clustercache.SetResyncTimeout(clusterCacheResyncDuration),
|
||||
@@ -487,6 +500,7 @@ func (c *liveStateCache) getCluster(server string) (clustercache.ClusterCache, e
|
||||
}),
|
||||
clustercache.SetLogr(logutils.NewLogrusLogger(log.WithField("server", cluster.Server))),
|
||||
clustercache.SetRetryOptions(clusterCacheAttemptLimit, clusterCacheRetryUseBackoff, isRetryableError),
|
||||
clustercache.SetRespectRBAC(respectRBAC),
|
||||
}
|
||||
|
||||
clusterCache = clustercache.NewClusterCache(clusterCacheConfig, clusterCacheOpts...)
|
||||
@@ -513,7 +527,7 @@ func (c *liveStateCache) getCluster(server string) (clustercache.ClusterCache, e
|
||||
namespace = "(cluster-scoped)"
|
||||
}
|
||||
log.WithFields(log.Fields{
|
||||
"server": clusterCache.GetClusterInfo().Server,
|
||||
"server": cluster.Server,
|
||||
"namespace": namespace,
|
||||
"name": ref.Name,
|
||||
"api-version": ref.APIVersion,
|
||||
@@ -620,7 +634,7 @@ func (c *liveStateCache) GetNamespaceTopLevelResources(server string, namespace
|
||||
func (c *liveStateCache) GetManagedLiveObjs(a *appv1.Application, targetObjs []*unstructured.Unstructured) (map[kube.ResourceKey]*unstructured.Unstructured, error) {
|
||||
clusterInfo, err := c.getSyncedCluster(a.Spec.Destination.Server)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("failed to get cluster info for %q: %w", a.Spec.Destination.Server, err)
|
||||
}
|
||||
return clusterInfo.GetManagedLiveObjs(targetObjs, func(r *clustercache.Resource) bool {
|
||||
return resInfo(r).AppName == a.InstanceName(c.settingsMgr.GetNamespace())
|
||||
@@ -630,7 +644,7 @@ func (c *liveStateCache) GetManagedLiveObjs(a *appv1.Application, targetObjs []*
|
||||
func (c *liveStateCache) GetVersionsInfo(serverURL string) (string, []kube.APIResourceInfo, error) {
|
||||
clusterInfo, err := c.getSyncedCluster(serverURL)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
return "", nil, fmt.Errorf("failed to get cluster info for %q: %w", serverURL, err)
|
||||
}
|
||||
return clusterInfo.GetServerVersion(), clusterInfo.GetAPIResources(), nil
|
||||
}
|
||||
@@ -775,12 +789,14 @@ func (c *liveStateCache) handleModEvent(oldCluster *appv1.Cluster, newCluster *a
|
||||
}
|
||||
|
||||
func (c *liveStateCache) handleDeleteEvent(clusterServer string) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
c.lock.RLock()
|
||||
cluster, ok := c.clusters[clusterServer]
|
||||
c.lock.RUnlock()
|
||||
if ok {
|
||||
cluster.Invalidate()
|
||||
c.lock.Lock()
|
||||
delete(c.clusters, clusterServer)
|
||||
c.lock.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user