mirror of
https://github.com/argoproj/argo-cd.git
synced 2026-02-21 10:08:47 +01:00
Compare commits
355 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5bcd846fa1 | ||
|
|
7af7aaa08f | ||
|
|
ccb64f1c7e | ||
|
|
6d4de2ec5d | ||
|
|
bc13533afa | ||
|
|
3260ecc729 | ||
|
|
d3f81decf3 | ||
|
|
25823b88d9 | ||
|
|
56a8ce5ff2 | ||
|
|
153bf967e9 | ||
|
|
2c10c033db | ||
|
|
e883e7498f | ||
|
|
0c3c7e2fa9 | ||
|
|
de63eb4e52 | ||
|
|
a119a5cc93 | ||
|
|
dc744bb21d | ||
|
|
4d5f9bdb5d | ||
|
|
60104aca6f | ||
|
|
3ea15f05f2 | ||
|
|
d557447214 | ||
|
|
67379d881b | ||
|
|
7d335432cd | ||
|
|
f7b6b82a04 | ||
|
|
e81b22bc61 | ||
|
|
65d43364ec | ||
|
|
7be094f38d | ||
|
|
db2869c866 | ||
|
|
2b6d55bfe5 | ||
|
|
e81ddb0855 | ||
|
|
8dcdbb588d | ||
|
|
1e7aab19aa | ||
|
|
ec6e05afca | ||
|
|
09ea76364c | ||
|
|
b795fcad3d | ||
|
|
896b143866 | ||
|
|
10051833a5 | ||
|
|
705ca3c95a | ||
|
|
fca7f58a93 | ||
|
|
85d1c0fac7 | ||
|
|
bb7ec0ff32 | ||
|
|
57f6703d08 | ||
|
|
f016977b5d | ||
|
|
e05298b9c6 | ||
|
|
57c30b5e04 | ||
|
|
8e544cbb97 | ||
|
|
a5fb6ffe6b | ||
|
|
691f20461e | ||
|
|
7756a816ab | ||
|
|
fd4c5f694a | ||
|
|
f148fa42a1 | ||
|
|
c3e96be67f | ||
|
|
d17aaf23c9 | ||
|
|
21cdcc124d | ||
|
|
d195568c90 | ||
|
|
860503fc07 | ||
|
|
27c6913bfe | ||
|
|
9c0ca76d2e | ||
|
|
0d0570de53 | ||
|
|
a731a38026 | ||
|
|
03d1c052f6 | ||
|
|
312683216e | ||
|
|
4412a770d5 | ||
|
|
7bbedff9da | ||
|
|
6e02f8b232 | ||
|
|
e2b280c3dd | ||
|
|
74726cf11e | ||
|
|
d4a0c252b8 | ||
|
|
3f143c9307 | ||
|
|
7fadddcba9 | ||
|
|
cac327cf64 | ||
|
|
acc554f3d9 | ||
|
|
f91b9f45ab | ||
|
|
893569aab4 | ||
|
|
17e0875ed7 | ||
|
|
1c343837e9 | ||
|
|
0ce5183857 | ||
|
|
70669be1cc | ||
|
|
212b3948dd | ||
|
|
8074e6f9cc | ||
|
|
a951ccacf0 | ||
|
|
119116258b | ||
|
|
278fd1e175 | ||
|
|
9a35eb63a5 | ||
|
|
95b52df1ff | ||
|
|
97761cc557 | ||
|
|
edb95e891c | ||
|
|
1569dba5ce | ||
|
|
278a29c875 | ||
|
|
d0862cd275 | ||
|
|
f31e1aaf4c | ||
|
|
4175d894da | ||
|
|
4c7a024875 | ||
|
|
e6ffce7e34 | ||
|
|
d44b353a4e | ||
|
|
0ac6d0ca5a | ||
|
|
70a7b9bd00 | ||
|
|
52365f88a0 | ||
|
|
a0b7fc5cab | ||
|
|
e6df2fcfca | ||
|
|
719b0aae82 | ||
|
|
9a27a20c5d | ||
|
|
01ef2e0c32 | ||
|
|
063dce443f | ||
|
|
5094ee988f | ||
|
|
671f3f493d | ||
|
|
10caed890c | ||
|
|
bc88f98f6d | ||
|
|
e790028e5c | ||
|
|
6ba9975245 | ||
|
|
79baabc837 | ||
|
|
9b6815f06f | ||
|
|
16f2de0b29 | ||
|
|
d744465cdf | ||
|
|
240ffc30ef | ||
|
|
22662559fe | ||
|
|
590ea32083 | ||
|
|
f8483d2be4 | ||
|
|
25d1d7aca2 | ||
|
|
678d773a6a | ||
|
|
e51d0b3224 | ||
|
|
0f5f41ebb0 | ||
|
|
ee4b3cacc9 | ||
|
|
62a521ccf6 | ||
|
|
eb6474c524 | ||
|
|
826507897d | ||
|
|
5fcebcc799 | ||
|
|
7f00420b3d | ||
|
|
6ebca0bd01 | ||
|
|
93fa2a46a5 | ||
|
|
72f92b6f2a | ||
|
|
7d54482d42 | ||
|
|
fe8049fc50 | ||
|
|
8065748cca | ||
|
|
57560b32f6 | ||
|
|
d5eb10c24d | ||
|
|
cab9b5769f | ||
|
|
bb8ef6dfa3 | ||
|
|
6a9f37ca7d | ||
|
|
b357fd61c0 | ||
|
|
f8d275c50d | ||
|
|
053cfaf378 | ||
|
|
f869cc4feb | ||
|
|
fab4a3cb92 | ||
|
|
7dab9b23bf | ||
|
|
c8d010ceb0 | ||
|
|
3fa9a9197b | ||
|
|
af00900049 | ||
|
|
e67f4b151e | ||
|
|
3a8802f083 | ||
|
|
cdaf2b2c73 | ||
|
|
222cdf4711 | ||
|
|
c58d3843d5 | ||
|
|
383a65fe71 | ||
|
|
acfdc3d3be | ||
|
|
80f4ab9d7b | ||
|
|
44d13a73c9 | ||
|
|
a6469140b9 | ||
|
|
9a4179b1b6 | ||
|
|
0cd4854ffa | ||
|
|
81e40d53fe | ||
|
|
8532cfec4a | ||
|
|
cb467bc231 | ||
|
|
234e44aaad | ||
|
|
5bffa7b1ca | ||
|
|
0dad6c9321 | ||
|
|
cf7eb6aecb | ||
|
|
7a7636d61d | ||
|
|
0864a0212f | ||
|
|
c4f6ed857d | ||
|
|
2f16fcad6e | ||
|
|
2b1f00dd4e | ||
|
|
ec4e2f26d5 | ||
|
|
c6fa942e94 | ||
|
|
66a1e40e39 | ||
|
|
a2d756e4ac | ||
|
|
46975dac36 | ||
|
|
a89e489316 | ||
|
|
396f5a2186 | ||
|
|
daf547407e | ||
|
|
9fe4ad3253 | ||
|
|
ed0273039d | ||
|
|
9decefe1d6 | ||
|
|
1730e4bf4f | ||
|
|
4f7a7f6a1b | ||
|
|
358088ae1d | ||
|
|
3dcab562e2 | ||
|
|
22e62fe173 | ||
|
|
d29c07beaf | ||
|
|
ac009c4860 | ||
|
|
586c00d689 | ||
|
|
f4c05ae94c | ||
|
|
c4c6bfad16 | ||
|
|
4431ace96a | ||
|
|
99b222ce39 | ||
|
|
95c95cf9e6 | ||
|
|
627b91ef0e | ||
|
|
ea7a264508 | ||
|
|
be50b1d9af | ||
|
|
9b6a34260d | ||
|
|
cac7ad9917 | ||
|
|
2fef0dee73 | ||
|
|
85ea242ea7 | ||
|
|
9832ce8fa7 | ||
|
|
550b6732e5 | ||
|
|
11fde44428 | ||
|
|
eb576a5f3e | ||
|
|
b4c12815f0 | ||
|
|
42911db31a | ||
|
|
f0359fbd78 | ||
|
|
fba9154ab2 | ||
|
|
9f2523b46d | ||
|
|
f80d3bee84 | ||
|
|
9dab4f659d | ||
|
|
07c0ee55ba | ||
|
|
a3a3e8d65d | ||
|
|
4462debe28 | ||
|
|
9ceef4a1a1 | ||
|
|
74798a68e3 | ||
|
|
6c5da3f60b | ||
|
|
661afe0ad9 | ||
|
|
30e37b7bb4 | ||
|
|
13dd04fa71 | ||
|
|
6ecd70ae74 | ||
|
|
c9f54652ba | ||
|
|
5a8f64b428 | ||
|
|
434f35eec5 | ||
|
|
2e9f57316a | ||
|
|
507bd9d2d0 | ||
|
|
4018fd8924 | ||
|
|
d2699888d1 | ||
|
|
cf7bf14541 | ||
|
|
5bb937cf3a | ||
|
|
f25f0b2cee | ||
|
|
89792b4cc1 | ||
|
|
cbb430337f | ||
|
|
44a4ec16b5 | ||
|
|
ac71ec8dfb | ||
|
|
8e57f5383e | ||
|
|
42efcb36bf | ||
|
|
62c85d6f39 | ||
|
|
9839564a67 | ||
|
|
2b4c5659ea | ||
|
|
f6501accee | ||
|
|
2b3ac697d7 | ||
|
|
c9af1f8eec | ||
|
|
ac7be1f4b4 | ||
|
|
48da592248 | ||
|
|
7a1e8273f3 | ||
|
|
cfad9edd4a | ||
|
|
36c3d72a87 | ||
|
|
16d50c2267 | ||
|
|
ac6949b0de | ||
|
|
3ac4483052 | ||
|
|
470ac1343f | ||
|
|
9a90bf8b1e | ||
|
|
addc758963 | ||
|
|
2a9cc279ab | ||
|
|
2bb3820a62 | ||
|
|
489b670a96 | ||
|
|
f9f27cc644 | ||
|
|
67ae39e99d | ||
|
|
981a1f1c9f | ||
|
|
95b38f1db4 | ||
|
|
362abff610 | ||
|
|
012ffd584a | ||
|
|
50ed4bcc02 | ||
|
|
a14aa81169 | ||
|
|
18435c998e | ||
|
|
b8685bb243 | ||
|
|
c30b7775ce | ||
|
|
f6f183d1d9 | ||
|
|
47c17932ac | ||
|
|
7c880c6006 | ||
|
|
b9712fc908 | ||
|
|
83463c67be | ||
|
|
3db9bff3da | ||
|
|
752bc5f80d | ||
|
|
ba8931b67d | ||
|
|
9b6992af61 | ||
|
|
a0d7b5c2ba | ||
|
|
e9598a3677 | ||
|
|
e824f5e210 | ||
|
|
ab51bc3cde | ||
|
|
1eaaa56a45 | ||
|
|
6509d0b92f | ||
|
|
a9da5d83d6 | ||
|
|
15b0785ba4 | ||
|
|
437cd4f1b5 | ||
|
|
3f5c2eb60c | ||
|
|
13026e28ac | ||
|
|
967999df79 | ||
|
|
a259654e32 | ||
|
|
bd6bb09320 | ||
|
|
eaf6e9e0a0 | ||
|
|
9960f54166 | ||
|
|
dc959e900f | ||
|
|
ea3215bc0a | ||
|
|
7d24fe7e08 | ||
|
|
17e9ef409a | ||
|
|
f73e7014ce | ||
|
|
097ed6f108 | ||
|
|
812664cbb1 | ||
|
|
299af2172f | ||
|
|
c9dfd41308 | ||
|
|
aa02e79401 | ||
|
|
3b36f34d8f | ||
|
|
0ea88b7d72 | ||
|
|
0c242fe535 | ||
|
|
a72b262b5e | ||
|
|
2173b87b99 | ||
|
|
8f00d347b5 | ||
|
|
c1b0c7ba4c | ||
|
|
e1c21480e4 | ||
|
|
9bd0ed0d56 | ||
|
|
d2da477791 | ||
|
|
0240958c15 | ||
|
|
4328bfc1ff | ||
|
|
817161a33b | ||
|
|
87a0d02b3a | ||
|
|
16c6e36ee4 | ||
|
|
8375840344 | ||
|
|
41b1ebef83 | ||
|
|
4cf807e67a | ||
|
|
6344d9623e | ||
|
|
1a8dd249c0 | ||
|
|
b0dab38c7d | ||
|
|
1f0d4cfccc | ||
|
|
1ae5aff2f8 | ||
|
|
fe151a1c0e | ||
|
|
149926dec4 | ||
|
|
d5a961c7f6 | ||
|
|
428bf48734 | ||
|
|
4d6d20427e | ||
|
|
9bada88f0f | ||
|
|
234fc9d1cd | ||
|
|
79f4b5ceb6 | ||
|
|
1ae0317423 | ||
|
|
cfb24701d2 | ||
|
|
e21a82fcc1 | ||
|
|
ac69e27fd2 | ||
|
|
46ae472cee | ||
|
|
08124045dc | ||
|
|
777302191f | ||
|
|
ebf2682214 | ||
|
|
a773b1effb | ||
|
|
7101965bd6 | ||
|
|
c8cae4a293 | ||
|
|
3047db9709 | ||
|
|
327936d164 | ||
|
|
9fa3c4fa94 | ||
|
|
297eb7168a | ||
|
|
21731c9c2d | ||
|
|
ec649b6d86 | ||
|
|
f60ae50f38 | ||
|
|
4aaed135b8 |
32
.github/ISSUE_TEMPLATE/release.md
vendored
Normal file
32
.github/ISSUE_TEMPLATE/release.md
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
---
|
||||
name: Argo CD Release
|
||||
about: Used by our Release Champion to track progress of a minor release
|
||||
title: 'Argo CD Release vX.X'
|
||||
labels: 'release'
|
||||
assignees: ''
|
||||
---
|
||||
|
||||
Target RC1 date: ___. __, ____
|
||||
Target GA date: ___. __, ____
|
||||
|
||||
- [ ] Create new section in the [Release Planning doc](https://docs.google.com/document/d/1trJIomcgXcfvLw0aYnERrFWfPjQOfYMDJOCh1S8nMBc/edit?usp=sharing)
|
||||
- [ ] Schedule a Release Planning meeting roughly two weeks before the scheduled Release freeze date by adding it to the community calendar (or delegate this task to someone with write access to the community calendar)
|
||||
- [ ] Include Zoom link in the invite
|
||||
- [ ] Post in #argo-cd and #argo-contributors one week before the meeting
|
||||
- [ ] Post again one hour before the meeting
|
||||
- [ ] At the meeting, remove issues/PRs from the project's column for that release which have not been “claimed” by at least one Approver (add it to the next column if Approver requests that)
|
||||
- [ ] 1wk before feature freeze post in #argo-contributors that PRs must be merged by DD-MM-YYYY to be included in the release - ask approvers to drop items from milestone they can’t merge
|
||||
- [ ] At least two days before RC1 date, draft RC blog post and submit it for review (or delegate this task)
|
||||
- [ ] Cut RC1 (or delegate this task to an Approver and coordinate timing)
|
||||
- [ ] Create new release branch
|
||||
- [ ] Add the release branch to ReadTheDocs
|
||||
- [ ] Confirm that tweet and blog post are ready
|
||||
- [ ] Trigger the release
|
||||
- [ ] After the release is finished, publish tweet and blog post
|
||||
- [ ] Post in #argo-cd and #argo-announcements with lots of emojis announcing the release and requesting help testing
|
||||
- [ ] Monitor support channels for issues, cherry-picking bugfixes and docs fixes as appropriate (or delegate this task to an Approver and coordinate timing)
|
||||
- [ ] At release date, evaluate if any bugs justify delaying the release. If not, cut the release (or delegate this task to an Approver and coordinate timing)
|
||||
- [ ] If unreleased changes are on the release branch for {current minor version minus 3}, cut a final patch release for that series (or delegate this task to an Approver and coordinate timing)
|
||||
- [ ] After the release, post in #argo-cd that the {current minor version minus 3} has reached EOL (example: https://cloud-native.slack.com/archives/C01TSERG0KZ/p1667336234059729)
|
||||
- [ ] (For the next release champion) Review the [items scheduled for the next release](https://github.com/orgs/argoproj/projects/25). If any item does not have an assignee who can commit to finish the feature, move it to the next release.
|
||||
- [ ] (For the next release champion) Schedule a time mid-way through the release cycle to review items again.
|
||||
18
.github/dependabot.yml
vendored
Normal file
18
.github/dependabot.yml
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "gomod"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
ignore:
|
||||
- dependency-name: k8s.io/*
|
||||
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
|
||||
- package-ecosystem: "npm"
|
||||
directory: "/ui/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
79
.github/workflows/ci-build.yaml
vendored
79
.github/workflows/ci-build.yaml
vendored
@@ -9,6 +9,7 @@ on:
|
||||
pull_request:
|
||||
branches:
|
||||
- 'master'
|
||||
- 'release-*'
|
||||
|
||||
env:
|
||||
# Golang version to use across CI steps
|
||||
@@ -27,9 +28,9 @@ jobs:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@v3
|
||||
uses: actions/setup-go@4d34df0c2316fe8122ab82dc22947d607c0c91f9 # v4.0.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Download all Go modules
|
||||
@@ -45,13 +46,13 @@ jobs:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@v3
|
||||
uses: actions/setup-go@4d34df0c2316fe8122ab82dc22947d607c0c91f9 # v4.0.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Restore go build cache
|
||||
uses: actions/cache@v1
|
||||
uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1
|
||||
with:
|
||||
path: ~/.cache/go-build
|
||||
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
|
||||
@@ -69,13 +70,13 @@ jobs:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@v3
|
||||
uses: actions/setup-go@4d34df0c2316fe8122ab82dc22947d607c0c91f9 # v4.0.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Run golangci-lint
|
||||
uses: golangci/golangci-lint-action@v3
|
||||
uses: golangci/golangci-lint-action@0ad9a0988b3973e851ab0a07adf248ec2e100376 # v3.3.1
|
||||
with:
|
||||
version: v1.46.2
|
||||
args: --timeout 10m --exclude SA5011 --verbose
|
||||
@@ -92,11 +93,11 @@ jobs:
|
||||
- name: Create checkout directory
|
||||
run: mkdir -p ~/go/src/github.com/argoproj
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
|
||||
- name: Create symlink in GOPATH
|
||||
run: ln -s $(pwd) ~/go/src/github.com/argoproj/argo-cd
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@v3
|
||||
uses: actions/setup-go@4d34df0c2316fe8122ab82dc22947d607c0c91f9 # v4.0.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Install required packages
|
||||
@@ -116,13 +117,17 @@ jobs:
|
||||
run: |
|
||||
echo "/usr/local/bin" >> $GITHUB_PATH
|
||||
- name: Restore go build cache
|
||||
uses: actions/cache@v1
|
||||
uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1
|
||||
with:
|
||||
path: ~/.cache/go-build
|
||||
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
|
||||
- name: Install all tools required for building & testing
|
||||
run: |
|
||||
make install-test-tools-local
|
||||
# We install kustomize in the dist directory
|
||||
- name: Add dist to PATH
|
||||
run: |
|
||||
echo "/home/runner/work/argo-cd/argo-cd/dist" >> $GITHUB_PATH
|
||||
- name: Setup git username and email
|
||||
run: |
|
||||
git config --global user.name "John Doe"
|
||||
@@ -133,12 +138,12 @@ jobs:
|
||||
- name: Run all unit tests
|
||||
run: make test-local
|
||||
- name: Generate code coverage artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2
|
||||
with:
|
||||
name: code-coverage
|
||||
path: coverage.out
|
||||
- name: Generate test results artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2
|
||||
with:
|
||||
name: test-results
|
||||
path: test-results/
|
||||
@@ -155,11 +160,11 @@ jobs:
|
||||
- name: Create checkout directory
|
||||
run: mkdir -p ~/go/src/github.com/argoproj
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
|
||||
- name: Create symlink in GOPATH
|
||||
run: ln -s $(pwd) ~/go/src/github.com/argoproj/argo-cd
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@v3
|
||||
uses: actions/setup-go@4d34df0c2316fe8122ab82dc22947d607c0c91f9 # v4.0.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Install required packages
|
||||
@@ -179,13 +184,17 @@ jobs:
|
||||
run: |
|
||||
echo "/usr/local/bin" >> $GITHUB_PATH
|
||||
- name: Restore go build cache
|
||||
uses: actions/cache@v1
|
||||
uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1
|
||||
with:
|
||||
path: ~/.cache/go-build
|
||||
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
|
||||
- name: Install all tools required for building & testing
|
||||
run: |
|
||||
make install-test-tools-local
|
||||
# We install kustomize in the dist directory
|
||||
- name: Add dist to PATH
|
||||
run: |
|
||||
echo "/home/runner/work/argo-cd/argo-cd/dist" >> $GITHUB_PATH
|
||||
- name: Setup git username and email
|
||||
run: |
|
||||
git config --global user.name "John Doe"
|
||||
@@ -196,7 +205,7 @@ jobs:
|
||||
- name: Run all unit tests
|
||||
run: make test-race-local
|
||||
- name: Generate test results artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2
|
||||
with:
|
||||
name: race-results
|
||||
path: test-results/
|
||||
@@ -206,9 +215,9 @@ jobs:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@v3
|
||||
uses: actions/setup-go@4d34df0c2316fe8122ab82dc22947d607c0c91f9 # v4.0.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Create symlink in GOPATH
|
||||
@@ -232,6 +241,10 @@ jobs:
|
||||
make install-codegen-tools-local
|
||||
make install-go-tools-local
|
||||
working-directory: /home/runner/go/src/github.com/argoproj/argo-cd
|
||||
# We install kustomize in the dist directory
|
||||
- name: Add dist to PATH
|
||||
run: |
|
||||
echo "/home/runner/work/argo-cd/argo-cd/dist" >> $GITHUB_PATH
|
||||
- name: Run codegen
|
||||
run: |
|
||||
set -x
|
||||
@@ -250,14 +263,14 @@ jobs:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
|
||||
- name: Setup NodeJS
|
||||
uses: actions/setup-node@v1
|
||||
uses: actions/setup-node@64ed1c7eab4cce3362f8c340dee64e5eaeef8f7c # v3.6.0
|
||||
with:
|
||||
node-version: '12.18.4'
|
||||
- name: Restore node dependency cache
|
||||
id: cache-dependencies
|
||||
uses: actions/cache@v1
|
||||
uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1
|
||||
with:
|
||||
path: ui/node_modules
|
||||
key: ${{ runner.os }}-node-dep-v2-${{ hashFiles('**/yarn.lock') }}
|
||||
@@ -287,12 +300,12 @@ jobs:
|
||||
sonar_secret: ${{ secrets.SONAR_TOKEN }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Restore node dependency cache
|
||||
id: cache-dependencies
|
||||
uses: actions/cache@v1
|
||||
uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1
|
||||
with:
|
||||
path: ui/node_modules
|
||||
key: ${{ runner.os }}-node-dep-v2-${{ hashFiles('**/yarn.lock') }}
|
||||
@@ -303,16 +316,16 @@ jobs:
|
||||
run: |
|
||||
mkdir -p test-results
|
||||
- name: Get code coverage artifiact
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2
|
||||
with:
|
||||
name: code-coverage
|
||||
- name: Get test result artifact
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2
|
||||
with:
|
||||
name: test-results
|
||||
path: test-results
|
||||
- name: Upload code coverage information to codecov.io
|
||||
uses: codecov/codecov-action@v1
|
||||
uses: codecov/codecov-action@d9f34f8cd5cb3b3eb79b3e4b5dae3a16df499a70 # v3.1.1
|
||||
with:
|
||||
file: coverage.out
|
||||
- name: Perform static code analysis using SonarCloud
|
||||
@@ -366,9 +379,9 @@ jobs:
|
||||
GITLAB_TOKEN: ${{ secrets.E2E_TEST_GITLAB_TOKEN }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@v3
|
||||
uses: actions/setup-go@4d34df0c2316fe8122ab82dc22947d607c0c91f9 # v4.0.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: GH actions workaround - Kill XSP4 process
|
||||
@@ -386,7 +399,7 @@ jobs:
|
||||
sudo chown runner $HOME/.kube/config
|
||||
kubectl version
|
||||
- name: Restore go build cache
|
||||
uses: actions/cache@v1
|
||||
uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1
|
||||
with:
|
||||
path: ~/.cache/go-build
|
||||
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
|
||||
@@ -412,9 +425,9 @@ jobs:
|
||||
git config --global user.email "john.doe@example.com"
|
||||
- name: Pull Docker image required for tests
|
||||
run: |
|
||||
docker pull ghcr.io/dexidp/dex:v2.35.3-distroless
|
||||
docker pull ghcr.io/dexidp/dex:v2.35.3
|
||||
docker pull argoproj/argo-cd-ci-builder:v1.0.0
|
||||
docker pull redis:7.0.5-alpine
|
||||
docker pull redis:7.0.8-alpine
|
||||
- name: Create target directory for binaries in the build-process
|
||||
run: |
|
||||
mkdir -p dist
|
||||
@@ -442,7 +455,7 @@ jobs:
|
||||
set -x
|
||||
make test-e2e-local
|
||||
- name: Upload e2e-server logs
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2
|
||||
with:
|
||||
name: e2e-server-k8s${{ matrix.k3s-version }}.log
|
||||
path: /tmp/e2e-server.log
|
||||
|
||||
8
.github/workflows/codeql.yml
vendored
8
.github/workflows/codeql.yml
vendored
@@ -29,11 +29,11 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v2
|
||||
uses: github/codeql-action/init@8aff97f12c99086bdb92ff62ae06dbbcdf07941b # v2.1.33
|
||||
# Override language selection by uncommenting this and choosing your languages
|
||||
# with:
|
||||
# languages: go, javascript, csharp, python, cpp, java
|
||||
@@ -41,7 +41,7 @@ jobs:
|
||||
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||
# If this step fails, then you should remove it and run the build manually (see below)
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@v2
|
||||
uses: github/codeql-action/autobuild@8aff97f12c99086bdb92ff62ae06dbbcdf07941b # v2.1.33
|
||||
|
||||
# ℹ️ Command-line programs to run using the OS shell.
|
||||
# 📚 https://git.io/JvXDl
|
||||
@@ -55,4 +55,4 @@ jobs:
|
||||
# make release
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v2
|
||||
uses: github/codeql-action/analyze@8aff97f12c99086bdb92ff62ae06dbbcdf07941b # v2.1.33
|
||||
|
||||
38
.github/workflows/image.yaml
vendored
38
.github/workflows/image.yaml
vendored
@@ -23,37 +23,38 @@ jobs:
|
||||
publish:
|
||||
permissions:
|
||||
contents: write # for git to push upgrade commit if not already deployed
|
||||
packages: write # for pushing packages to GHCR, which is used by cd.apps.argoproj.io to avoid polluting Quay with tags
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
runs-on: ubuntu-22.04
|
||||
env:
|
||||
GOPATH: /home/runner/work/argo-cd/argo-cd
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
- uses: actions/setup-go@4d34df0c2316fe8122ab82dc22947d607c0c91f9 # v4.0.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- uses: actions/checkout@master
|
||||
- uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
|
||||
with:
|
||||
path: src/github.com/argoproj/argo-cd
|
||||
|
||||
# get image tag
|
||||
- run: echo ::set-output name=tag::$(cat ./VERSION)-${GITHUB_SHA::8}
|
||||
- run: echo "tag=$(cat ./VERSION)-${GITHUB_SHA::8}" >> $GITHUB_OUTPUT
|
||||
working-directory: ./src/github.com/argoproj/argo-cd
|
||||
id: image
|
||||
|
||||
# login
|
||||
- run: |
|
||||
docker login ghcr.io --username $USERNAME --password $PASSWORD
|
||||
docker login quay.io --username "${DOCKER_USERNAME}" --password "${DOCKER_TOKEN}"
|
||||
docker login ghcr.io --username $USERNAME --password-stdin <<< "$PASSWORD"
|
||||
docker login quay.io --username "$DOCKER_USERNAME" --password-stdin <<< "$DOCKER_TOKEN"
|
||||
if: github.event_name == 'push'
|
||||
env:
|
||||
USERNAME: ${{ secrets.USERNAME }}
|
||||
PASSWORD: ${{ secrets.TOKEN }}
|
||||
USERNAME: ${{ github.actor }}
|
||||
PASSWORD: ${{ secrets.GITHUB_TOKEN }}
|
||||
DOCKER_USERNAME: ${{ secrets.RELEASE_QUAY_USERNAME }}
|
||||
DOCKER_TOKEN: ${{ secrets.RELEASE_QUAY_TOKEN }}
|
||||
|
||||
# build
|
||||
- uses: docker/setup-qemu-action@v2
|
||||
- uses: docker/setup-buildx-action@v2
|
||||
- uses: docker/setup-qemu-action@e81a89b1732b9c48d79cd809d8d81d79c4647a18 # v2.1.0
|
||||
- uses: docker/setup-buildx-action@f03ac48505955848960e80bbb68046aa35c7b9e7 # v2.4.1
|
||||
- run: |
|
||||
IMAGE_PLATFORMS=linux/amd64
|
||||
if [[ "${{ github.event_name }}" == "push" || "${{ contains(github.event.pull_request.labels.*.name, 'test-arm-image') }}" == "true" ]]
|
||||
@@ -61,20 +62,27 @@ jobs:
|
||||
IMAGE_PLATFORMS=linux/amd64,linux/arm64,linux/s390x,linux/ppc64le
|
||||
fi
|
||||
echo "Building image for platforms: $IMAGE_PLATFORMS"
|
||||
docker buildx build --platform $IMAGE_PLATFORMS --push="${{ github.event_name == 'push' }}" \
|
||||
-t ghcr.io/argoproj/argocd:${{ steps.image.outputs.tag }} \
|
||||
docker buildx build --platform $IMAGE_PLATFORMS --sbom=false --provenance=false --push="${{ github.event_name == 'push' }}" \
|
||||
-t ghcr.io/argoproj/argo-cd/argocd:${{ steps.image.outputs.tag }} \
|
||||
-t quay.io/argoproj/argocd:latest .
|
||||
working-directory: ./src/github.com/argoproj/argo-cd
|
||||
|
||||
# sign container images
|
||||
- name: Install cosign
|
||||
uses: sigstore/cosign-installer@main
|
||||
uses: sigstore/cosign-installer@c3667d99424e7e6047999fb6246c0da843953c65 # v3.0.1
|
||||
with:
|
||||
cosign-release: 'v1.13.0'
|
||||
cosign-release: 'v1.13.1'
|
||||
|
||||
- name: Install crane to get digest of image
|
||||
uses: imjasonh/setup-crane@00c9e93efa4e1138c9a7a5c594acd6c75a2fbf0c
|
||||
|
||||
- name: Get digest of image
|
||||
run: |
|
||||
echo "IMAGE_DIGEST=$(crane digest quay.io/argoproj/argocd:latest)" >> $GITHUB_ENV
|
||||
|
||||
- name: Sign Argo CD latest image
|
||||
run: |
|
||||
cosign sign --key env://COSIGN_PRIVATE_KEY quay.io/argoproj/argocd:latest
|
||||
cosign sign --key env://COSIGN_PRIVATE_KEY quay.io/argoproj/argocd@${{ env.IMAGE_DIGEST }}
|
||||
# Displays the public key to share.
|
||||
cosign public-key --key env://COSIGN_PRIVATE_KEY
|
||||
env:
|
||||
@@ -88,7 +96,7 @@ jobs:
|
||||
env:
|
||||
TOKEN: ${{ secrets.TOKEN }}
|
||||
- run: |
|
||||
docker run -u $(id -u):$(id -g) -v $(pwd):/src -w /src --rm -t ghcr.io/argoproj/argocd:${{ steps.image.outputs.tag }} kustomize edit set image quay.io/argoproj/argocd=ghcr.io/argoproj/argocd:${{ steps.image.outputs.tag }}
|
||||
docker run -u $(id -u):$(id -g) -v $(pwd):/src -w /src --rm -t ghcr.io/argoproj/argo-cd/argocd:${{ steps.image.outputs.tag }} kustomize edit set image quay.io/argoproj/argocd=ghcr.io/argoproj/argo-cd/argocd:${{ steps.image.outputs.tag }}
|
||||
git config --global user.email 'ci@argoproj.com'
|
||||
git config --global user.name 'CI'
|
||||
git diff --exit-code && echo 'Already deployed' || (git commit -am 'Upgrade argocd to ${{ steps.image.outputs.tag }}' && git push)
|
||||
|
||||
44
.github/workflows/release.yaml
vendored
44
.github/workflows/release.yaml
vendored
@@ -12,7 +12,7 @@ on:
|
||||
- "!release-v0*"
|
||||
|
||||
env:
|
||||
GOLANG_VERSION: '1.18'
|
||||
GOLANG_VERSION: '1.18'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
@@ -43,7 +43,7 @@ jobs:
|
||||
GIT_EMAIL: argoproj@gmail.com
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
@@ -147,7 +147,7 @@ jobs:
|
||||
echo "RELEASE_NOTES=${RELEASE_NOTES}" >> $GITHUB_ENV
|
||||
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@v3
|
||||
uses: actions/setup-go@4d34df0c2316fe8122ab82dc22947d607c0c91f9 # v4.0.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
|
||||
@@ -177,6 +177,10 @@ jobs:
|
||||
run: |
|
||||
set -ue
|
||||
make install-codegen-tools-local
|
||||
|
||||
# We install kustomize in the dist directory
|
||||
echo "/home/runner/work/argo-cd/argo-cd/dist" >> $GITHUB_PATH
|
||||
|
||||
make manifests-local VERSION=${TARGET_VERSION}
|
||||
git diff
|
||||
git commit manifests/ -m "Bump version to ${TARGET_VERSION}"
|
||||
@@ -195,19 +199,19 @@ jobs:
|
||||
QUAY_TOKEN: ${{ secrets.RELEASE_QUAY_TOKEN }}
|
||||
run: |
|
||||
set -ue
|
||||
docker login quay.io --username "${QUAY_USERNAME}" --password "${QUAY_TOKEN}"
|
||||
docker login quay.io --username "${QUAY_USERNAME}" --password-stdin <<< "${QUAY_TOKEN}"
|
||||
# Remove the following when Docker Hub is gone
|
||||
docker login --username "${DOCKER_USERNAME}" --password "${DOCKER_TOKEN}"
|
||||
docker login --username "${DOCKER_USERNAME}" --password-stdin <<< "${DOCKER_TOKEN}"
|
||||
if: ${{ env.DRY_RUN != 'true' }}
|
||||
|
||||
- uses: docker/setup-qemu-action@v2
|
||||
- uses: docker/setup-buildx-action@v2
|
||||
- uses: docker/setup-qemu-action@e81a89b1732b9c48d79cd809d8d81d79c4647a18 # v2.1.0
|
||||
- uses: docker/setup-buildx-action@f03ac48505955848960e80bbb68046aa35c7b9e7 # v2.4.1
|
||||
- name: Build and push Docker image for release
|
||||
run: |
|
||||
set -ue
|
||||
git clean -fd
|
||||
mkdir -p dist/
|
||||
docker buildx build --platform linux/amd64,linux/arm64,linux/s390x,linux/ppc64le --push -t ${IMAGE_NAMESPACE}/argocd:v${TARGET_VERSION} -t argoproj/argocd:v${TARGET_VERSION} .
|
||||
docker buildx build --platform linux/amd64,linux/arm64,linux/s390x,linux/ppc64le --sbom=false --provenance=false --push -t ${IMAGE_NAMESPACE}/argocd:v${TARGET_VERSION} -t argoproj/argocd:v${TARGET_VERSION} .
|
||||
make release-cli
|
||||
make checksums
|
||||
chmod +x ./dist/argocd-linux-amd64
|
||||
@@ -215,13 +219,20 @@ jobs:
|
||||
if: ${{ env.DRY_RUN != 'true' }}
|
||||
|
||||
- name: Install cosign
|
||||
uses: sigstore/cosign-installer@main
|
||||
uses: sigstore/cosign-installer@c3667d99424e7e6047999fb6246c0da843953c65 # v3.0.1
|
||||
with:
|
||||
cosign-release: 'v1.13.0'
|
||||
cosign-release: 'v1.13.1'
|
||||
|
||||
- name: Install crane to get digest of image
|
||||
uses: imjasonh/setup-crane@00c9e93efa4e1138c9a7a5c594acd6c75a2fbf0c
|
||||
|
||||
- name: Get digest of image
|
||||
run: |
|
||||
echo "IMAGE_DIGEST=$(crane digest quay.io/argoproj/argocd:v${TARGET_VERSION})" >> $GITHUB_ENV
|
||||
|
||||
- name: Sign Argo CD container images and assets
|
||||
run: |
|
||||
cosign sign --key env://COSIGN_PRIVATE_KEY ${IMAGE_NAMESPACE}/argocd:v${TARGET_VERSION}
|
||||
cosign sign --key env://COSIGN_PRIVATE_KEY ${IMAGE_NAMESPACE}/argocd@${{ env.IMAGE_DIGEST }}
|
||||
cosign sign-blob --key env://COSIGN_PRIVATE_KEY ./dist/argocd-${TARGET_VERSION}-checksums.txt > ./dist/argocd-${TARGET_VERSION}-checksums.sig
|
||||
# Retrieves the public key to release as an asset
|
||||
cosign public-key --key env://COSIGN_PRIVATE_KEY > ./dist/argocd-cosign.pub
|
||||
@@ -232,7 +243,7 @@ jobs:
|
||||
|
||||
- name: Read release notes file
|
||||
id: release-notes
|
||||
uses: juliangruber/read-file-action@v1
|
||||
uses: juliangruber/read-file-action@02bbba9876a8f870efd4ad64e3b9088d3fb94d4b # v1.1.6
|
||||
with:
|
||||
path: ${{ env.RELEASE_NOTES }}
|
||||
|
||||
@@ -243,7 +254,7 @@ jobs:
|
||||
git push origin ${RELEASE_TAG}
|
||||
|
||||
- name: Dry run GitHub release
|
||||
uses: actions/create-release@v1
|
||||
uses: actions/create-release@0cb9c9b65d5d1901c1f53e5e66eaf4afd303e70e # v1.1.4
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
id: create_release
|
||||
@@ -264,7 +275,7 @@ jobs:
|
||||
SIGS_BOM_VERSION: v0.2.1
|
||||
# comma delimited list of project relative folders to inspect for package
|
||||
# managers (gomod, yarn, npm).
|
||||
PROJECT_FOLDERS: ".,./ui"
|
||||
PROJECT_FOLDERS: ".,./ui"
|
||||
# full qualified name of the docker image to be inspected
|
||||
DOCKER_IMAGE: ${{env.IMAGE_NAMESPACE}}/argocd:v${{env.TARGET_VERSION}}
|
||||
run: |
|
||||
@@ -295,7 +306,7 @@ jobs:
|
||||
if: ${{ env.DRY_RUN != 'true' }}
|
||||
|
||||
- name: Create GitHub release
|
||||
uses: softprops/action-gh-release@v1
|
||||
uses: softprops/action-gh-release@de2c0eb89ae2a093876385947365aca7b0e5f844 # v0.1.15
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
@@ -303,7 +314,6 @@ jobs:
|
||||
tag_name: ${{ env.RELEASE_TAG }}
|
||||
draft: ${{ env.DRAFT_RELEASE }}
|
||||
prerelease: ${{ env.PRE_RELEASE }}
|
||||
generate_release_notes: true
|
||||
body: ${{ steps.release-notes.outputs.content }} # Pre-pended to the generated notes
|
||||
files: |
|
||||
dist/argocd-*
|
||||
@@ -314,7 +324,7 @@ jobs:
|
||||
- name: Update homebrew formula
|
||||
env:
|
||||
HOMEBREW_TOKEN: ${{ secrets.RELEASE_HOMEBREW_TOKEN }}
|
||||
uses: dawidd6/action-homebrew-bump-formula@v3
|
||||
uses: dawidd6/action-homebrew-bump-formula@02e79d9da43d79efa846d73695b6052cbbdbf48a # v3.8.3
|
||||
with:
|
||||
token: ${{env.HOMEBREW_TOKEN}}
|
||||
formula: argocd
|
||||
|
||||
2
.github/workflows/update-snyk.yaml
vendored
2
.github/workflows/update-snyk.yaml
vendored
@@ -17,7 +17,7 @@ jobs:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Build reports
|
||||
|
||||
4
Makefile
4
Makefile
@@ -512,7 +512,7 @@ build-docs-local:
|
||||
|
||||
.PHONY: build-docs
|
||||
build-docs:
|
||||
docker run ${MKDOCS_RUN_ARGS} --rm -it -p 8000:8000 -v ${CURRENT_DIR}:/docs ${MKDOCS_DOCKER_IMAGE} build
|
||||
docker run ${MKDOCS_RUN_ARGS} --rm -it -v ${CURRENT_DIR}:/docs --entrypoint "" ${MKDOCS_DOCKER_IMAGE} sh -c 'pip install -r docs/requirements.txt; mkdocs build'
|
||||
|
||||
.PHONY: serve-docs-local
|
||||
serve-docs-local:
|
||||
@@ -520,7 +520,7 @@ serve-docs-local:
|
||||
|
||||
.PHONY: serve-docs
|
||||
serve-docs:
|
||||
docker run ${MKDOCS_RUN_ARGS} --rm -it -p 8000:8000 -v ${CURRENT_DIR}:/docs ${MKDOCS_DOCKER_IMAGE} serve -a 0.0.0.0:8000
|
||||
docker run ${MKDOCS_RUN_ARGS} --rm -it -p 8000:8000 -v ${CURRENT_DIR}/site:/site -w /site --entrypoint "" ${MKDOCS_DOCKER_IMAGE} python3 -m http.server --bind 0.0.0.0 8000
|
||||
|
||||
|
||||
# Verify that kubectl can connect to your K8s cluster from Docker
|
||||
|
||||
2
OWNERS
2
OWNERS
@@ -27,3 +27,5 @@ reviewers:
|
||||
- wanghong230
|
||||
- ciiay
|
||||
- saumeya
|
||||
- zachaller
|
||||
- 34fathombelow
|
||||
|
||||
24
USERS.md
24
USERS.md
@@ -11,9 +11,11 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Adevinta](https://www.adevinta.com/)
|
||||
1. [Adfinis](https://adfinis.com)
|
||||
1. [Adventure](https://jp.adventurekk.com/)
|
||||
1. [AirQo](https://airqo.net/)
|
||||
1. [Akuity](https://akuity.io/)
|
||||
1. [Alibaba Group](https://www.alibabagroup.com/)
|
||||
1. [Allianz Direct](https://www.allianzdirect.de/)
|
||||
1. [Amadeus IT Group](https://amadeus.com/)
|
||||
1. [Ambassador Labs](https://www.getambassador.io/)
|
||||
1. [ANSTO - Australian Synchrotron](https://www.synchrotron.org.au/)
|
||||
1. [Ant Group](https://www.antgroup.com/)
|
||||
@@ -35,12 +37,14 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Camptocamp](https://camptocamp.com)
|
||||
1. [Capital One](https://www.capitalone.com)
|
||||
1. [CARFAX](https://www.carfax.com)
|
||||
1. [CARFAX Europe](https://www.carfax.eu)
|
||||
1. [Casavo](https://casavo.com)
|
||||
1. [Celonis](https://www.celonis.com/)
|
||||
1. [CERN](https://home.cern/)
|
||||
1. [Chargetrip](https://chargetrip.com)
|
||||
1. [Chime](https://www.chime.com)
|
||||
1. [Cisco ET&I](https://eti.cisco.com/)
|
||||
1. [Cloud Scale](https://cloudscaleinc.com/)
|
||||
1. [Cobalt](https://www.cobalt.io/)
|
||||
1. [Codefresh](https://www.codefresh.io/)
|
||||
1. [Codility](https://www.codility.com/)
|
||||
@@ -56,6 +60,8 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Deutsche Telekom AG](https://telekom.com)
|
||||
1. [Devopsi - Poland Software/DevOps Consulting](https://devopsi.pl/)
|
||||
1. [Devtron Labs](https://github.com/devtron-labs/devtron)
|
||||
1. [Divistant](https://divistant.com)
|
||||
1. [Doximity](https://www.doximity.com/)
|
||||
1. [EDF Renewables](https://www.edf-re.com/)
|
||||
1. [edX](https://edx.org)
|
||||
1. [Elastic](https://elastic.co/)
|
||||
@@ -65,6 +71,7 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [END.](https://www.endclothing.com/)
|
||||
1. [Energisme](https://energisme.com/)
|
||||
1. [enigmo](https://enigmo.co.jp/)
|
||||
1. [Envoy](https://envoy.com/)
|
||||
1. [Faro](https://www.faro.com/)
|
||||
1. [Fave](https://myfave.com)
|
||||
1. [Flip](https://flip.id)
|
||||
@@ -75,7 +82,8 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [G DATA CyberDefense AG](https://www.gdata-software.com/)
|
||||
1. [Garner](https://www.garnercorp.com)
|
||||
1. [Generali Deutschland AG](https://www.generali.de/)
|
||||
2. [Gepardec](https://gepardec.com/)
|
||||
1. [Gepardec](https://gepardec.com/)
|
||||
1. [GetYourGuide](https://www.getyourguide.com/)
|
||||
1. [Gitpod](https://www.gitpod.io)
|
||||
1. [Gllue](https://gllue.com)
|
||||
1. [gloat](https://gloat.com/)
|
||||
@@ -97,7 +105,9 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Ibotta](https://home.ibotta.com)
|
||||
1. [IITS-Consulting](https://iits-consulting.de)
|
||||
1. [imaware](https://imaware.health)
|
||||
1. [Indeed](https://indeed.com)
|
||||
1. [Index Exchange](https://www.indexexchange.com/)
|
||||
1. [Info Support](https://www.infosupport.com/)
|
||||
1. [InsideBoard](https://www.insideboard.com)
|
||||
1. [Intuit](https://www.intuit.com/)
|
||||
1. [Joblift](https://joblift.com/)
|
||||
@@ -116,6 +126,7 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Kurly](https://www.kurly.com/)
|
||||
1. [LexisNexis](https://www.lexisnexis.com/)
|
||||
1. [Lian Chu Securities](https://lczq.com)
|
||||
1. [Liatrio](https://www.liatrio.com)
|
||||
1. [Lightricks](https://www.lightricks.com/)
|
||||
1. [LINE](https://linecorp.com/en/)
|
||||
1. [Lytt](https://www.lytt.co/)
|
||||
@@ -128,6 +139,7 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Max Kelsen](https://www.maxkelsen.com/)
|
||||
1. [MeDirect](https://medirect.com.mt/)
|
||||
1. [Meican](https://meican.com/)
|
||||
1. [Mercedes-Benz Tech Innovation](https://www.mercedes-benz-techinnovation.com/)
|
||||
1. [Metanet](http://www.metanet.co.kr/en/)
|
||||
1. [MindSpore](https://mindspore.cn)
|
||||
1. [Mirantis](https://mirantis.com/)
|
||||
@@ -142,6 +154,7 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Nextdoor](https://nextdoor.com/)
|
||||
1. [Nikkei](https://www.nikkei.co.jp/nikkeiinfo/en/)
|
||||
1. [Nitro](https://gonitro.com)
|
||||
1. [Objective](https://www.objective.com.br/)
|
||||
1. [OCCMundial](https://occ.com.mx)
|
||||
1. [Octadesk](https://octadesk.com)
|
||||
1. [omegaUp](https://omegaUp.com)
|
||||
@@ -157,11 +170,14 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Packlink](https://www.packlink.com/)
|
||||
1. [Pandosearch](https://www.pandosearch.com/en/home)
|
||||
1. [PagerDuty](https://www.pagerduty.com/)
|
||||
1. [Patreon](https://www.patreon.com/)
|
||||
1. [PayPay](https://paypay.ne.jp/)
|
||||
1. [Peloton Interactive](https://www.onepeloton.com/)
|
||||
1. [Pigment](https://www.gopigment.com/)
|
||||
1. [Pipefy](https://www.pipefy.com/)
|
||||
1. [Pismo](https://pismo.io/)
|
||||
1. [Polarpoint.io](https://polarpoint.io)
|
||||
1. [PostFinance](https://github.com/postfinance)
|
||||
1. [Preferred Networks](https://preferred.jp/en/)
|
||||
1. [Productboard](https://www.productboard.com/)
|
||||
1. [Prudential](https://prudential.com.sg)
|
||||
@@ -172,6 +188,7 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [RapidAPI](https://www.rapidapi.com/)
|
||||
1. [Recreation.gov](https://www.recreation.gov/)
|
||||
1. [Red Hat](https://www.redhat.com/)
|
||||
1. [Redpill Linpro](https://www.redpill-linpro.com/)
|
||||
1. [reev.com](https://www.reev.com/)
|
||||
1. [RightRev](https://rightrev.com/)
|
||||
1. [Rise](https://www.risecard.eu/)
|
||||
@@ -182,6 +199,7 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Saloodo! GmbH](https://www.saloodo.com)
|
||||
1. [Sap Labs](http://sap.com)
|
||||
1. [Schwarz IT](https://jobs.schwarz/it-mission)
|
||||
1. [SI Analytics](https://si-analytics.ai)
|
||||
1. [Skit](https://skit.ai/)
|
||||
1. [Skyscanner](https://www.skyscanner.net/)
|
||||
1. [Smilee.io](https://smilee.io)
|
||||
@@ -213,6 +231,7 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Toss](https://toss.im/en)
|
||||
1. [Trendyol](https://www.trendyol.com/)
|
||||
1. [tru.ID](https://tru.id)
|
||||
1. [Trusting Social](https://trustingsocial.com/)
|
||||
1. [Twilio SendGrid](https://sendgrid.com)
|
||||
1. [tZERO](https://www.tzero.com/)
|
||||
1. [UBIO](https://ub.io/)
|
||||
@@ -221,9 +240,11 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Unifonic Inc](https://www.unifonic.com/)
|
||||
1. [Universidad Mesoamericana](https://www.umes.edu.gt/)
|
||||
1. [Viaduct](https://www.viaduct.ai/)
|
||||
1. [Vinted](https://vinted.com/)
|
||||
1. [Virtuo](https://www.govirtuo.com/)
|
||||
1. [VISITS Technologies](https://visits.world/en)
|
||||
1. [Volvo Cars](https://www.volvocars.com/)
|
||||
1. [Voyager Digital](https://www.investvoyager.com/)
|
||||
1. [VSHN - The DevOps Company](https://vshn.ch/)
|
||||
1. [Walkbase](https://www.walkbase.com/)
|
||||
1. [Webstores](https://www.webstores.nl)
|
||||
@@ -232,6 +253,7 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [WeMo Scooter](https://www.wemoscooter.com/)
|
||||
1. [Whitehat Berlin](https://whitehat.berlin) by Guido Maria Serra +Fenaroli
|
||||
1. [Witick](https://witick.io/)
|
||||
1. [Wolffun Game](https://www.wolffungame.com/)
|
||||
1. [WooliesX](https://wooliesx.com.au/)
|
||||
1. [Woolworths Group](https://www.woolworthsgroup.com.au/)
|
||||
1. [WSpot](https://www.wspot.com.br/)
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/record"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
@@ -69,6 +70,8 @@ type ApplicationSetReconciler struct {
|
||||
KubeClientset kubernetes.Interface
|
||||
utils.Policy
|
||||
utils.Renderer
|
||||
|
||||
EnableProgressiveSyncs bool
|
||||
}
|
||||
|
||||
// +kubebuilder:rbac:groups=argoproj.io,resources=applicationsets,verbs=get;list;watch;create;update;patch;delete
|
||||
@@ -134,6 +137,27 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
||||
return ctrl.Result{RequeueAfter: ReconcileRequeueOnValidationError}, nil
|
||||
}
|
||||
|
||||
// appMap is a name->app collection of Applications in this ApplicationSet.
|
||||
appMap := map[string]argov1alpha1.Application{}
|
||||
// appSyncMap tracks which apps will be synced during this reconciliation.
|
||||
appSyncMap := map[string]bool{}
|
||||
|
||||
if r.EnableProgressiveSyncs && applicationSetInfo.Spec.Strategy != nil {
|
||||
applications, err := r.getCurrentApplications(ctx, applicationSetInfo)
|
||||
if err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("failed to get current applications for application set: %w", err)
|
||||
}
|
||||
|
||||
for _, app := range applications {
|
||||
appMap[app.Name] = app
|
||||
}
|
||||
|
||||
appSyncMap, err = r.performProgressiveSyncs(ctx, applicationSetInfo, applications, desiredApplications, appMap)
|
||||
if err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("failed to perform progressive sync reconciliation for application set: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
var validApps []argov1alpha1.Application
|
||||
for i := range desiredApplications {
|
||||
if validateErrors[i] == nil {
|
||||
@@ -162,6 +186,26 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
||||
)
|
||||
}
|
||||
|
||||
if r.EnableProgressiveSyncs {
|
||||
// trigger appropriate application syncs if RollingSync strategy is enabled
|
||||
if progressiveSyncsStrategyEnabled(&applicationSetInfo, "RollingSync") {
|
||||
validApps, err = r.syncValidApplications(ctx, &applicationSetInfo, appSyncMap, appMap, validApps)
|
||||
|
||||
if err != nil {
|
||||
_ = r.setApplicationSetStatusCondition(ctx,
|
||||
&applicationSetInfo,
|
||||
argov1alpha1.ApplicationSetCondition{
|
||||
Type: argov1alpha1.ApplicationSetConditionErrorOccurred,
|
||||
Message: err.Error(),
|
||||
Reason: argov1alpha1.ApplicationSetReasonSyncApplicationError,
|
||||
Status: argov1alpha1.ApplicationSetConditionStatusTrue,
|
||||
}, parametersGenerated,
|
||||
)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if r.Policy.Update() {
|
||||
err = r.createOrUpdateInCluster(ctx, applicationSetInfo, validApps)
|
||||
if err != nil {
|
||||
@@ -528,6 +572,11 @@ func (r *ApplicationSetReconciler) createOrUpdateInCluster(ctx context.Context,
|
||||
// Copy only the Application/ObjectMeta fields that are significant, from the generatedApp
|
||||
found.Spec = generatedApp.Spec
|
||||
|
||||
// allow setting the Operation field to trigger a sync operation on an Application
|
||||
if generatedApp.Operation != nil {
|
||||
found.Operation = generatedApp.Operation
|
||||
}
|
||||
|
||||
// Preserve specially treated argo cd annotations:
|
||||
// * https://github.com/argoproj/applicationset/issues/180
|
||||
// * https://github.com/argoproj/argo-cd/issues/10500
|
||||
@@ -726,4 +775,541 @@ func (r *ApplicationSetReconciler) removeFinalizerOnInvalidDestination(ctx conte
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ApplicationSetReconciler) performProgressiveSyncs(ctx context.Context, appset argov1alpha1.ApplicationSet, applications []argov1alpha1.Application, desiredApplications []argov1alpha1.Application, appMap map[string]argov1alpha1.Application) (map[string]bool, error) {
|
||||
|
||||
appDependencyList, appStepMap, err := r.buildAppDependencyList(ctx, appset, desiredApplications)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to build app dependency list: %w", err)
|
||||
}
|
||||
|
||||
_, err = r.updateApplicationSetApplicationStatus(ctx, &appset, applications, appStepMap)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to update applicationset app status: %w", err)
|
||||
}
|
||||
|
||||
log.Infof("ApplicationSet %v step list:", appset.Name)
|
||||
for i, step := range appDependencyList {
|
||||
log.Infof("step %v: %+v", i+1, step)
|
||||
}
|
||||
|
||||
appSyncMap, err := r.buildAppSyncMap(ctx, appset, appDependencyList, appMap)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to build app sync map: %w", err)
|
||||
}
|
||||
|
||||
log.Infof("Application allowed to sync before maxUpdate?: %+v", appSyncMap)
|
||||
|
||||
_, err = r.updateApplicationSetApplicationStatusProgress(ctx, &appset, appSyncMap, appStepMap, appMap)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to update applicationset application status progress: %w", err)
|
||||
}
|
||||
|
||||
_, err = r.updateApplicationSetApplicationStatusConditions(ctx, &appset)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to update applicationset application status conditions: %w", err)
|
||||
}
|
||||
|
||||
return appSyncMap, nil
|
||||
}
|
||||
|
||||
// this list tracks which Applications belong to each RollingUpdate step
|
||||
func (r *ApplicationSetReconciler) buildAppDependencyList(ctx context.Context, applicationSet argov1alpha1.ApplicationSet, applications []argov1alpha1.Application) ([][]string, map[string]int, error) {
|
||||
|
||||
if applicationSet.Spec.Strategy == nil || applicationSet.Spec.Strategy.Type == "" || applicationSet.Spec.Strategy.Type == "AllAtOnce" {
|
||||
return [][]string{}, map[string]int{}, nil
|
||||
}
|
||||
|
||||
steps := []argov1alpha1.ApplicationSetRolloutStep{}
|
||||
if progressiveSyncsStrategyEnabled(&applicationSet, "RollingSync") {
|
||||
steps = applicationSet.Spec.Strategy.RollingSync.Steps
|
||||
}
|
||||
|
||||
appDependencyList := make([][]string, 0)
|
||||
for range steps {
|
||||
appDependencyList = append(appDependencyList, make([]string, 0))
|
||||
}
|
||||
|
||||
appStepMap := map[string]int{}
|
||||
|
||||
// use applicationLabelSelectors to filter generated Applications into steps and status by name
|
||||
for _, app := range applications {
|
||||
for i, step := range steps {
|
||||
|
||||
selected := true // default to true, assuming the current Application is a match for the given step matchExpression
|
||||
|
||||
allNotInMatched := true // needed to support correct AND behavior between multiple NotIn MatchExpressions
|
||||
notInUsed := false // since we default to allNotInMatched == true, track whether a NotIn expression was actually used
|
||||
|
||||
for _, matchExpression := range step.MatchExpressions {
|
||||
|
||||
if matchExpression.Operator == "In" {
|
||||
if val, ok := app.Labels[matchExpression.Key]; ok {
|
||||
valueMatched := labelMatchedExpression(val, matchExpression)
|
||||
|
||||
if !valueMatched { // none of the matchExpression values was a match with the Application'ss labels
|
||||
selected = false
|
||||
break
|
||||
}
|
||||
} else {
|
||||
selected = false // no matching label key with In means this Application will not be included in the current step
|
||||
break
|
||||
}
|
||||
} else if matchExpression.Operator == "NotIn" {
|
||||
notInUsed = true // a NotIn selector was used in this matchExpression
|
||||
if val, ok := app.Labels[matchExpression.Key]; ok {
|
||||
valueMatched := labelMatchedExpression(val, matchExpression)
|
||||
|
||||
if !valueMatched { // none of the matchExpression values was a match with the Application's labels
|
||||
allNotInMatched = false
|
||||
}
|
||||
} else {
|
||||
allNotInMatched = false // no matching label key with NotIn means this Application may still be included in the current step
|
||||
}
|
||||
} else { // handle invalid operator selection
|
||||
log.Warnf("skipping AppSet rollingUpdate step Application selection for %q, invalid matchExpression operator provided: %q ", applicationSet.Name, matchExpression.Operator)
|
||||
selected = false
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if notInUsed && allNotInMatched { // check if all NotIn Expressions matched, if so exclude this Application
|
||||
selected = false
|
||||
}
|
||||
|
||||
if selected {
|
||||
appDependencyList[i] = append(appDependencyList[i], app.Name)
|
||||
if val, ok := appStepMap[app.Name]; ok {
|
||||
log.Warnf("AppSet '%v' has a invalid matchExpression that selects Application '%v' label twice, in steps %v and %v", applicationSet.Name, app.Name, val+1, i+1)
|
||||
} else {
|
||||
appStepMap[app.Name] = i
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return appDependencyList, appStepMap, nil
|
||||
}
|
||||
|
||||
func labelMatchedExpression(val string, matchExpression argov1alpha1.ApplicationMatchExpression) bool {
|
||||
valueMatched := false
|
||||
for _, value := range matchExpression.Values {
|
||||
if val == value {
|
||||
valueMatched = true
|
||||
break
|
||||
}
|
||||
}
|
||||
return valueMatched
|
||||
}
|
||||
|
||||
// this map is used to determine which stage of Applications are ready to be updated in the reconciler loop
|
||||
func (r *ApplicationSetReconciler) buildAppSyncMap(ctx context.Context, applicationSet argov1alpha1.ApplicationSet, appDependencyList [][]string, appMap map[string]argov1alpha1.Application) (map[string]bool, error) {
|
||||
appSyncMap := map[string]bool{}
|
||||
syncEnabled := true
|
||||
|
||||
// healthy stages and the first non-healthy stage should have sync enabled
|
||||
// every stage after should have sync disabled
|
||||
|
||||
for i := range appDependencyList {
|
||||
// set the syncEnabled boolean for every Application in the current step
|
||||
for _, appName := range appDependencyList[i] {
|
||||
appSyncMap[appName] = syncEnabled
|
||||
}
|
||||
|
||||
// detect if we need to halt before progressing to the next step
|
||||
for _, appName := range appDependencyList[i] {
|
||||
|
||||
idx := findApplicationStatusIndex(applicationSet.Status.ApplicationStatus, appName)
|
||||
if idx == -1 {
|
||||
// no Application status found, likely because the Application is being newly created
|
||||
syncEnabled = false
|
||||
break
|
||||
}
|
||||
|
||||
appStatus := applicationSet.Status.ApplicationStatus[idx]
|
||||
|
||||
if app, ok := appMap[appName]; ok {
|
||||
|
||||
syncEnabled = appSyncEnabledForNextStep(&applicationSet, app, appStatus)
|
||||
if !syncEnabled {
|
||||
break
|
||||
}
|
||||
} else {
|
||||
// application name not found in the list of applications managed by this ApplicationSet, maybe because it's being deleted
|
||||
syncEnabled = false
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return appSyncMap, nil
|
||||
}
|
||||
|
||||
func appSyncEnabledForNextStep(appset *argov1alpha1.ApplicationSet, app argov1alpha1.Application, appStatus argov1alpha1.ApplicationSetApplicationStatus) bool {
|
||||
|
||||
if progressiveSyncsStrategyEnabled(appset, "RollingSync") {
|
||||
// we still need to complete the current step if the Application is not yet Healthy or there are still pending Application changes
|
||||
return isApplicationHealthy(app) && appStatus.Status == "Healthy"
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func progressiveSyncsStrategyEnabled(appset *argov1alpha1.ApplicationSet, strategyType string) bool {
|
||||
if appset.Spec.Strategy == nil || appset.Spec.Strategy.Type != strategyType {
|
||||
return false
|
||||
}
|
||||
|
||||
if strategyType == "RollingSync" && appset.Spec.Strategy.RollingSync == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func isApplicationHealthy(app argov1alpha1.Application) bool {
|
||||
healthStatusString, syncStatusString, operationPhaseString := statusStrings(app)
|
||||
|
||||
if healthStatusString == "Healthy" && syncStatusString != "OutOfSync" && (operationPhaseString == "Succeeded" || operationPhaseString == "") {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func statusStrings(app argov1alpha1.Application) (string, string, string) {
|
||||
healthStatusString := string(app.Status.Health.Status)
|
||||
syncStatusString := string(app.Status.Sync.Status)
|
||||
operationPhaseString := ""
|
||||
if app.Status.OperationState != nil {
|
||||
operationPhaseString = string(app.Status.OperationState.Phase)
|
||||
}
|
||||
|
||||
return healthStatusString, syncStatusString, operationPhaseString
|
||||
}
|
||||
|
||||
// check the status of each Application's status and promote Applications to the next status if needed
|
||||
func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatus(ctx context.Context, applicationSet *argov1alpha1.ApplicationSet, applications []argov1alpha1.Application, appStepMap map[string]int) ([]argov1alpha1.ApplicationSetApplicationStatus, error) {
|
||||
|
||||
now := metav1.Now()
|
||||
appStatuses := make([]argov1alpha1.ApplicationSetApplicationStatus, 0, len(applications))
|
||||
|
||||
for _, app := range applications {
|
||||
|
||||
healthStatusString, syncStatusString, operationPhaseString := statusStrings(app)
|
||||
|
||||
idx := findApplicationStatusIndex(applicationSet.Status.ApplicationStatus, app.Name)
|
||||
|
||||
currentAppStatus := argov1alpha1.ApplicationSetApplicationStatus{}
|
||||
|
||||
if idx == -1 {
|
||||
// AppStatus not found, set default status of "Waiting"
|
||||
currentAppStatus = argov1alpha1.ApplicationSetApplicationStatus{
|
||||
Application: app.Name,
|
||||
LastTransitionTime: &now,
|
||||
Message: "No Application status found, defaulting status to Waiting.",
|
||||
Status: "Waiting",
|
||||
Step: fmt.Sprint(appStepMap[app.Name] + 1),
|
||||
}
|
||||
} else {
|
||||
// we have an existing AppStatus
|
||||
currentAppStatus = applicationSet.Status.ApplicationStatus[idx]
|
||||
}
|
||||
|
||||
appOutdated := false
|
||||
if progressiveSyncsStrategyEnabled(applicationSet, "RollingSync") {
|
||||
appOutdated = syncStatusString == "OutOfSync"
|
||||
}
|
||||
|
||||
if appOutdated && currentAppStatus.Status != "Waiting" && currentAppStatus.Status != "Pending" {
|
||||
log.Infof("Application %v is outdated, updating its ApplicationSet status to Waiting", app.Name)
|
||||
currentAppStatus.LastTransitionTime = &now
|
||||
currentAppStatus.Status = "Waiting"
|
||||
currentAppStatus.Message = "Application has pending changes, setting status to Waiting."
|
||||
currentAppStatus.Step = fmt.Sprint(appStepMap[currentAppStatus.Application] + 1)
|
||||
}
|
||||
|
||||
if currentAppStatus.Status == "Pending" {
|
||||
if operationPhaseString == "Succeeded" && app.Status.OperationState.StartedAt.After(currentAppStatus.LastTransitionTime.Time) {
|
||||
log.Infof("Application %v has completed a sync successfully, updating its ApplicationSet status to Progressing", app.Name)
|
||||
currentAppStatus.LastTransitionTime = &now
|
||||
currentAppStatus.Status = "Progressing"
|
||||
currentAppStatus.Message = "Application resource completed a sync successfully, updating status from Pending to Progressing."
|
||||
currentAppStatus.Step = fmt.Sprint(appStepMap[currentAppStatus.Application] + 1)
|
||||
} else if operationPhaseString == "Running" || healthStatusString == "Progressing" {
|
||||
log.Infof("Application %v has entered Progressing status, updating its ApplicationSet status to Progressing", app.Name)
|
||||
currentAppStatus.LastTransitionTime = &now
|
||||
currentAppStatus.Status = "Progressing"
|
||||
currentAppStatus.Message = "Application resource became Progressing, updating status from Pending to Progressing."
|
||||
currentAppStatus.Step = fmt.Sprint(appStepMap[currentAppStatus.Application] + 1)
|
||||
}
|
||||
}
|
||||
|
||||
if currentAppStatus.Status == "Waiting" && isApplicationHealthy(app) {
|
||||
log.Infof("Application %v is already synced and healthy, updating its ApplicationSet status to Healthy", app.Name)
|
||||
currentAppStatus.LastTransitionTime = &now
|
||||
currentAppStatus.Status = healthStatusString
|
||||
currentAppStatus.Message = "Application resource is already Healthy, updating status from Waiting to Healthy."
|
||||
currentAppStatus.Step = fmt.Sprint(appStepMap[currentAppStatus.Application] + 1)
|
||||
}
|
||||
|
||||
if currentAppStatus.Status == "Progressing" && isApplicationHealthy(app) {
|
||||
log.Infof("Application %v has completed Progressing status, updating its ApplicationSet status to Healthy", app.Name)
|
||||
currentAppStatus.LastTransitionTime = &now
|
||||
currentAppStatus.Status = healthStatusString
|
||||
currentAppStatus.Message = "Application resource became Healthy, updating status from Progressing to Healthy."
|
||||
currentAppStatus.Step = fmt.Sprint(appStepMap[currentAppStatus.Application] + 1)
|
||||
}
|
||||
|
||||
appStatuses = append(appStatuses, currentAppStatus)
|
||||
}
|
||||
|
||||
err := r.setAppSetApplicationStatus(ctx, applicationSet, appStatuses)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to set AppSet application statuses: %w", err)
|
||||
}
|
||||
|
||||
return appStatuses, nil
|
||||
}
|
||||
|
||||
// check Applications that are in Waiting status and promote them to Pending if needed
|
||||
func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatusProgress(ctx context.Context, applicationSet *argov1alpha1.ApplicationSet, appSyncMap map[string]bool, appStepMap map[string]int, appMap map[string]argov1alpha1.Application) ([]argov1alpha1.ApplicationSetApplicationStatus, error) {
|
||||
now := metav1.Now()
|
||||
|
||||
appStatuses := make([]argov1alpha1.ApplicationSetApplicationStatus, 0, len(applicationSet.Status.ApplicationStatus))
|
||||
|
||||
// if we have no RollingUpdate steps, clear out the existing ApplicationStatus entries
|
||||
if applicationSet.Spec.Strategy != nil && applicationSet.Spec.Strategy.Type != "" && applicationSet.Spec.Strategy.Type != "AllAtOnce" {
|
||||
updateCountMap := []int{}
|
||||
totalCountMap := []int{}
|
||||
|
||||
length := 0
|
||||
if progressiveSyncsStrategyEnabled(applicationSet, "RollingSync") {
|
||||
length = len(applicationSet.Spec.Strategy.RollingSync.Steps)
|
||||
}
|
||||
for s := 0; s < length; s++ {
|
||||
updateCountMap = append(updateCountMap, 0)
|
||||
totalCountMap = append(totalCountMap, 0)
|
||||
}
|
||||
|
||||
// populate updateCountMap with counts of existing Pending and Progressing Applications
|
||||
for _, appStatus := range applicationSet.Status.ApplicationStatus {
|
||||
totalCountMap[appStepMap[appStatus.Application]] += 1
|
||||
|
||||
if progressiveSyncsStrategyEnabled(applicationSet, "RollingSync") {
|
||||
if appStatus.Status == "Pending" || appStatus.Status == "Progressing" {
|
||||
updateCountMap[appStepMap[appStatus.Application]] += 1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, appStatus := range applicationSet.Status.ApplicationStatus {
|
||||
|
||||
maxUpdateAllowed := true
|
||||
maxUpdate := &intstr.IntOrString{}
|
||||
if progressiveSyncsStrategyEnabled(applicationSet, "RollingSync") {
|
||||
maxUpdate = applicationSet.Spec.Strategy.RollingSync.Steps[appStepMap[appStatus.Application]].MaxUpdate
|
||||
}
|
||||
|
||||
// by default allow all applications to update if maxUpdate is unset
|
||||
if maxUpdate != nil {
|
||||
maxUpdateVal, err := intstr.GetScaledValueFromIntOrPercent(maxUpdate, totalCountMap[appStepMap[appStatus.Application]], false)
|
||||
if err != nil {
|
||||
log.Warnf("AppSet '%v' has a invalid maxUpdate value '%+v', ignoring maxUpdate logic for this step: %v", applicationSet.Name, maxUpdate, err)
|
||||
}
|
||||
|
||||
// ensure that percentage values greater than 0% always result in at least 1 Application being selected
|
||||
if maxUpdate.Type == intstr.String && maxUpdate.StrVal != "0%" && maxUpdateVal < 1 {
|
||||
maxUpdateVal = 1
|
||||
}
|
||||
|
||||
if updateCountMap[appStepMap[appStatus.Application]] >= maxUpdateVal {
|
||||
maxUpdateAllowed = false
|
||||
log.Infof("Application %v is not allowed to update yet, %v/%v Applications already updating in step %v in AppSet %v", appStatus.Application, updateCountMap[appStepMap[appStatus.Application]], maxUpdateVal, appStepMap[appStatus.Application]+1, applicationSet.Name)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if appStatus.Status == "Waiting" && appSyncMap[appStatus.Application] && maxUpdateAllowed {
|
||||
log.Infof("Application %v moved to Pending status, watching for the Application to start Progressing", appStatus.Application)
|
||||
appStatus.LastTransitionTime = &now
|
||||
appStatus.Status = "Pending"
|
||||
appStatus.Message = "Application moved to Pending status, watching for the Application resource to start Progressing."
|
||||
appStatus.Step = fmt.Sprint(appStepMap[appStatus.Application] + 1)
|
||||
|
||||
updateCountMap[appStepMap[appStatus.Application]] += 1
|
||||
}
|
||||
|
||||
appStatuses = append(appStatuses, appStatus)
|
||||
}
|
||||
}
|
||||
|
||||
err := r.setAppSetApplicationStatus(ctx, applicationSet, appStatuses)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to set AppSet app status: %w", err)
|
||||
}
|
||||
|
||||
return appStatuses, nil
|
||||
}
|
||||
|
||||
func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatusConditions(ctx context.Context, applicationSet *argov1alpha1.ApplicationSet) ([]argov1alpha1.ApplicationSetCondition, error) {
|
||||
|
||||
appSetProgressing := false
|
||||
for _, appStatus := range applicationSet.Status.ApplicationStatus {
|
||||
if appStatus.Status != "Healthy" {
|
||||
appSetProgressing = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
appSetConditionProgressing := false
|
||||
for _, appSetCondition := range applicationSet.Status.Conditions {
|
||||
if appSetCondition.Type == argov1alpha1.ApplicationSetConditionRolloutProgressing && appSetCondition.Status == argov1alpha1.ApplicationSetConditionStatusTrue {
|
||||
appSetConditionProgressing = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if appSetProgressing && !appSetConditionProgressing {
|
||||
_ = r.setApplicationSetStatusCondition(ctx,
|
||||
applicationSet,
|
||||
argov1alpha1.ApplicationSetCondition{
|
||||
Type: argov1alpha1.ApplicationSetConditionRolloutProgressing,
|
||||
Message: "ApplicationSet Rollout Rollout started",
|
||||
Reason: argov1alpha1.ApplicationSetReasonApplicationSetModified,
|
||||
Status: argov1alpha1.ApplicationSetConditionStatusTrue,
|
||||
}, false,
|
||||
)
|
||||
} else if !appSetProgressing && appSetConditionProgressing {
|
||||
_ = r.setApplicationSetStatusCondition(ctx,
|
||||
applicationSet,
|
||||
argov1alpha1.ApplicationSetCondition{
|
||||
Type: argov1alpha1.ApplicationSetConditionRolloutProgressing,
|
||||
Message: "ApplicationSet Rollout Rollout complete",
|
||||
Reason: argov1alpha1.ApplicationSetReasonApplicationSetRolloutComplete,
|
||||
Status: argov1alpha1.ApplicationSetConditionStatusFalse,
|
||||
}, false,
|
||||
)
|
||||
}
|
||||
|
||||
return applicationSet.Status.Conditions, nil
|
||||
}
|
||||
|
||||
func findApplicationStatusIndex(appStatuses []argov1alpha1.ApplicationSetApplicationStatus, application string) int {
|
||||
for i := range appStatuses {
|
||||
if appStatuses[i].Application == application {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// setApplicationSetApplicationStatus updates the ApplicatonSet's status field
|
||||
// with any new/changed Application statuses.
|
||||
func (r *ApplicationSetReconciler) setAppSetApplicationStatus(ctx context.Context, applicationSet *argov1alpha1.ApplicationSet, applicationStatuses []argov1alpha1.ApplicationSetApplicationStatus) error {
|
||||
needToUpdateStatus := false
|
||||
for i := range applicationStatuses {
|
||||
appStatus := applicationStatuses[i]
|
||||
idx := findApplicationStatusIndex(applicationSet.Status.ApplicationStatus, appStatus.Application)
|
||||
if idx == -1 {
|
||||
needToUpdateStatus = true
|
||||
break
|
||||
}
|
||||
currentStatus := applicationSet.Status.ApplicationStatus[idx]
|
||||
if currentStatus.Message != appStatus.Message || currentStatus.Status != appStatus.Status {
|
||||
needToUpdateStatus = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if needToUpdateStatus {
|
||||
// fetch updated Application Set object before updating it
|
||||
namespacedName := types.NamespacedName{Namespace: applicationSet.Namespace, Name: applicationSet.Name}
|
||||
if err := r.Get(ctx, namespacedName, applicationSet); err != nil {
|
||||
if client.IgnoreNotFound(err) != nil {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("error fetching updated application set: %v", err)
|
||||
}
|
||||
|
||||
for i := range applicationStatuses {
|
||||
applicationSet.Status.SetApplicationStatus(applicationStatuses[i])
|
||||
}
|
||||
|
||||
// Update the newly fetched object with new set of ApplicationStatus
|
||||
err := r.Client.Status().Update(ctx, applicationSet)
|
||||
if err != nil {
|
||||
|
||||
log.Errorf("unable to set application set status: %v", err)
|
||||
return fmt.Errorf("unable to set application set status: %v", err)
|
||||
}
|
||||
|
||||
if err := r.Get(ctx, namespacedName, applicationSet); err != nil {
|
||||
if client.IgnoreNotFound(err) != nil {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("error fetching updated application set: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ApplicationSetReconciler) syncValidApplications(ctx context.Context, applicationSet *argov1alpha1.ApplicationSet, appSyncMap map[string]bool, appMap map[string]argov1alpha1.Application, validApps []argov1alpha1.Application) ([]argov1alpha1.Application, error) {
|
||||
rolloutApps := []argov1alpha1.Application{}
|
||||
for i := range validApps {
|
||||
pruneEnabled := false
|
||||
|
||||
// ensure that Applications generated with RollingSync do not have an automated sync policy, since the AppSet controller will handle triggering the sync operation instead
|
||||
if validApps[i].Spec.SyncPolicy != nil && validApps[i].Spec.SyncPolicy.Automated != nil {
|
||||
pruneEnabled = validApps[i].Spec.SyncPolicy.Automated.Prune
|
||||
validApps[i].Spec.SyncPolicy.Automated = nil
|
||||
}
|
||||
|
||||
appSetStatusPending := false
|
||||
idx := findApplicationStatusIndex(applicationSet.Status.ApplicationStatus, validApps[i].Name)
|
||||
if idx > -1 && applicationSet.Status.ApplicationStatus[idx].Status == "Pending" {
|
||||
// only trigger a sync for Applications that are in Pending status, since this is governed by maxUpdate
|
||||
appSetStatusPending = true
|
||||
}
|
||||
|
||||
// check appSyncMap to determine which Applications are ready to be updated and which should be skipped
|
||||
if appSyncMap[validApps[i].Name] && appMap[validApps[i].Name].Status.Sync.Status == "OutOfSync" && appSetStatusPending {
|
||||
log.Infof("triggering sync for application: %v, prune enabled: %v", validApps[i].Name, pruneEnabled)
|
||||
validApps[i], _ = syncApplication(validApps[i], pruneEnabled)
|
||||
}
|
||||
rolloutApps = append(rolloutApps, validApps[i])
|
||||
}
|
||||
return rolloutApps, nil
|
||||
}
|
||||
|
||||
// used by the RollingSync Progressive Sync strategy to trigger a sync of a particular Application resource
|
||||
func syncApplication(application argov1alpha1.Application, prune bool) (argov1alpha1.Application, error) {
|
||||
|
||||
operation := argov1alpha1.Operation{
|
||||
InitiatedBy: argov1alpha1.OperationInitiator{
|
||||
Username: "applicationset-controller",
|
||||
Automated: true,
|
||||
},
|
||||
Info: []*argov1alpha1.Info{
|
||||
{
|
||||
Name: "Reason",
|
||||
Value: "ApplicationSet RollingSync triggered a sync of this Application resource.",
|
||||
},
|
||||
},
|
||||
Sync: &argov1alpha1.SyncOperation{},
|
||||
}
|
||||
|
||||
if application.Spec.SyncPolicy != nil {
|
||||
if application.Spec.SyncPolicy.Retry != nil {
|
||||
operation.Retry = *application.Spec.SyncPolicy.Retry
|
||||
}
|
||||
if application.Spec.SyncPolicy.SyncOptions != nil {
|
||||
operation.Sync.SyncOptions = application.Spec.SyncPolicy.SyncOptions
|
||||
}
|
||||
operation.Sync.Prune = prune
|
||||
}
|
||||
application.Operation = &operation
|
||||
|
||||
return application, nil
|
||||
}
|
||||
|
||||
var _ handler.EventHandler = &clusterSecretEventHandler{}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
179
applicationset/controllers/requeue_after_test.go
Normal file
179
applicationset/controllers/requeue_after_test.go
Normal file
@@ -0,0 +1,179 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/generators"
|
||||
argov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
dynfake "k8s.io/client-go/dynamic/fake"
|
||||
kubefake "k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
)
|
||||
|
||||
func TestRequeueAfter(t *testing.T) {
|
||||
mockServer := argoCDServiceMock{}
|
||||
ctx := context.Background()
|
||||
scheme := runtime.NewScheme()
|
||||
err := argov1alpha1.AddToScheme(scheme)
|
||||
assert.Nil(t, err)
|
||||
gvrToListKind := map[schema.GroupVersionResource]string{{
|
||||
Group: "mallard.io",
|
||||
Version: "v1",
|
||||
Resource: "ducks",
|
||||
}: "DuckList"}
|
||||
appClientset := kubefake.NewSimpleClientset()
|
||||
k8sClient := fake.NewClientBuilder().Build()
|
||||
duckType := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"apiVersion": "v2quack",
|
||||
"kind": "Duck",
|
||||
"metadata": map[string]interface{}{
|
||||
"name": "mightyduck",
|
||||
"namespace": "namespace",
|
||||
"labels": map[string]interface{}{"duck": "all-species"},
|
||||
},
|
||||
"status": map[string]interface{}{
|
||||
"decisions": []interface{}{
|
||||
map[string]interface{}{
|
||||
"clusterName": "staging-01",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"clusterName": "production-01",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
fakeDynClient := dynfake.NewSimpleDynamicClientWithCustomListKinds(runtime.NewScheme(), gvrToListKind, duckType)
|
||||
|
||||
terminalGenerators := map[string]generators.Generator{
|
||||
"List": generators.NewListGenerator(),
|
||||
"Clusters": generators.NewClusterGenerator(k8sClient, ctx, appClientset, "argocd"),
|
||||
"Git": generators.NewGitGenerator(mockServer),
|
||||
"SCMProvider": generators.NewSCMProviderGenerator(fake.NewClientBuilder().WithObjects(&corev1.Secret{}).Build(), generators.SCMAuthProviders{}),
|
||||
"ClusterDecisionResource": generators.NewDuckTypeGenerator(ctx, fakeDynClient, appClientset, "argocd"),
|
||||
"PullRequest": generators.NewPullRequestGenerator(k8sClient, generators.SCMAuthProviders{}),
|
||||
}
|
||||
|
||||
nestedGenerators := map[string]generators.Generator{
|
||||
"List": terminalGenerators["List"],
|
||||
"Clusters": terminalGenerators["Clusters"],
|
||||
"Git": terminalGenerators["Git"],
|
||||
"SCMProvider": terminalGenerators["SCMProvider"],
|
||||
"ClusterDecisionResource": terminalGenerators["ClusterDecisionResource"],
|
||||
"PullRequest": terminalGenerators["PullRequest"],
|
||||
"Matrix": generators.NewMatrixGenerator(terminalGenerators),
|
||||
"Merge": generators.NewMergeGenerator(terminalGenerators),
|
||||
}
|
||||
|
||||
topLevelGenerators := map[string]generators.Generator{
|
||||
"List": terminalGenerators["List"],
|
||||
"Clusters": terminalGenerators["Clusters"],
|
||||
"Git": terminalGenerators["Git"],
|
||||
"SCMProvider": terminalGenerators["SCMProvider"],
|
||||
"ClusterDecisionResource": terminalGenerators["ClusterDecisionResource"],
|
||||
"PullRequest": terminalGenerators["PullRequest"],
|
||||
"Matrix": generators.NewMatrixGenerator(nestedGenerators),
|
||||
"Merge": generators.NewMergeGenerator(nestedGenerators),
|
||||
}
|
||||
|
||||
client := fake.NewClientBuilder().WithScheme(scheme).Build()
|
||||
r := ApplicationSetReconciler{
|
||||
Client: client,
|
||||
Scheme: scheme,
|
||||
Recorder: record.NewFakeRecorder(0),
|
||||
Generators: topLevelGenerators,
|
||||
}
|
||||
|
||||
type args struct {
|
||||
appset *argov1alpha1.ApplicationSet
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want time.Duration
|
||||
wantErr assert.ErrorAssertionFunc
|
||||
}{
|
||||
{name: "Cluster", args: args{appset: &argov1alpha1.ApplicationSet{
|
||||
Spec: argov1alpha1.ApplicationSetSpec{
|
||||
Generators: []argov1alpha1.ApplicationSetGenerator{{Clusters: &argov1alpha1.ClusterGenerator{}}},
|
||||
},
|
||||
}}, want: generators.NoRequeueAfter, wantErr: assert.NoError},
|
||||
{name: "ClusterMergeNested", args: args{&argov1alpha1.ApplicationSet{
|
||||
Spec: argov1alpha1.ApplicationSetSpec{
|
||||
Generators: []argov1alpha1.ApplicationSetGenerator{
|
||||
{Clusters: &argov1alpha1.ClusterGenerator{}},
|
||||
{Merge: &argov1alpha1.MergeGenerator{
|
||||
Generators: []argov1alpha1.ApplicationSetNestedGenerator{
|
||||
{
|
||||
Clusters: &argov1alpha1.ClusterGenerator{},
|
||||
Git: &argov1alpha1.GitGenerator{},
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
},
|
||||
}}, want: generators.DefaultRequeueAfterSeconds, wantErr: assert.NoError},
|
||||
{name: "ClusterMatrixNested", args: args{&argov1alpha1.ApplicationSet{
|
||||
Spec: argov1alpha1.ApplicationSetSpec{
|
||||
Generators: []argov1alpha1.ApplicationSetGenerator{
|
||||
{Clusters: &argov1alpha1.ClusterGenerator{}},
|
||||
{Matrix: &argov1alpha1.MatrixGenerator{
|
||||
Generators: []argov1alpha1.ApplicationSetNestedGenerator{
|
||||
{
|
||||
Clusters: &argov1alpha1.ClusterGenerator{},
|
||||
Git: &argov1alpha1.GitGenerator{},
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
},
|
||||
}}, want: generators.DefaultRequeueAfterSeconds, wantErr: assert.NoError},
|
||||
{name: "ListGenerator", args: args{appset: &argov1alpha1.ApplicationSet{
|
||||
Spec: argov1alpha1.ApplicationSetSpec{
|
||||
Generators: []argov1alpha1.ApplicationSetGenerator{{List: &argov1alpha1.ListGenerator{}}},
|
||||
},
|
||||
}}, want: generators.NoRequeueAfter, wantErr: assert.NoError},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.Equalf(t, tt.want, r.getMinRequeueAfter(tt.args.appset), "getMinRequeueAfter(%v)", tt.args.appset)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type argoCDServiceMock struct {
|
||||
mock *mock.Mock
|
||||
}
|
||||
|
||||
func (a argoCDServiceMock) GetApps(ctx context.Context, repoURL string, revision string) ([]string, error) {
|
||||
args := a.mock.Called(ctx, repoURL, revision)
|
||||
|
||||
return args.Get(0).([]string), args.Error(1)
|
||||
}
|
||||
|
||||
func (a argoCDServiceMock) GetFiles(ctx context.Context, repoURL string, revision string, pattern string) (map[string][]byte, error) {
|
||||
args := a.mock.Called(ctx, repoURL, revision, pattern)
|
||||
|
||||
return args.Get(0).(map[string][]byte), args.Error(1)
|
||||
}
|
||||
|
||||
func (a argoCDServiceMock) GetFileContent(ctx context.Context, repoURL string, revision string, path string) ([]byte, error) {
|
||||
args := a.mock.Called(ctx, repoURL, revision, path)
|
||||
|
||||
return args.Get(0).([]byte), args.Error(1)
|
||||
}
|
||||
|
||||
func (a argoCDServiceMock) GetDirectories(ctx context.Context, repoURL string, revision string) ([]string, error) {
|
||||
args := a.mock.Called(ctx, repoURL, revision)
|
||||
return args.Get(0).([]string), args.Error(1)
|
||||
}
|
||||
@@ -23,6 +23,8 @@ spec:
|
||||
template:
|
||||
metadata:
|
||||
name: 'myapp-{{ .branch }}-{{ .number }}'
|
||||
labels:
|
||||
key1: '{{ index .labels 0 }}'
|
||||
spec:
|
||||
source:
|
||||
repoURL: 'https://github.com/myorg/myrepo.git'
|
||||
|
||||
@@ -51,6 +51,8 @@ func NewClusterGenerator(c client.Client, ctx context.Context, clientset kuberne
|
||||
return g
|
||||
}
|
||||
|
||||
// GetRequeueAfter never requeue the cluster generator because the `clusterSecretEventHandler` will requeue the appsets
|
||||
// when the cluster secrets change
|
||||
func (g *ClusterGenerator) GetRequeueAfter(appSetGenerator *argoappsetv1alpha1.ApplicationSetGenerator) time.Duration {
|
||||
return NoRequeueAfter
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ package generators
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/applicationset/utils"
|
||||
@@ -25,7 +24,7 @@ type TransformResult struct {
|
||||
Template argoprojiov1alpha1.ApplicationSetTemplate
|
||||
}
|
||||
|
||||
//Transform a spec generator to list of paramSets and a template
|
||||
// Transform a spec generator to list of paramSets and a template
|
||||
func Transform(requestedGenerator argoprojiov1alpha1.ApplicationSetGenerator, allGenerators map[string]Generator, baseTemplate argoprojiov1alpha1.ApplicationSetTemplate, appSet *argoprojiov1alpha1.ApplicationSet, genParams map[string]interface{}) ([]TransformResult, error) {
|
||||
selector, err := metav1.LabelSelectorAsSelector(requestedGenerator.Selector)
|
||||
if err != nil {
|
||||
@@ -132,27 +131,15 @@ func mergeGeneratorTemplate(g Generator, requestedGenerator *argoprojiov1alpha1.
|
||||
return *dest, err
|
||||
}
|
||||
|
||||
// Currently for Matrix Generator. Allows interpolating the matrix's 2nd child generator with values from the 1st child generator
|
||||
// InterpolateGenerator allows interpolating the matrix's 2nd child generator with values from the 1st child generator
|
||||
// "params" parameter is an array, where each index corresponds to a generator. Each index contains a map w/ that generator's parameters.
|
||||
func InterpolateGenerator(requestedGenerator *argoprojiov1alpha1.ApplicationSetGenerator, params map[string]interface{}, useGoTemplate bool) (argoprojiov1alpha1.ApplicationSetGenerator, error) {
|
||||
interpolatedGenerator := requestedGenerator.DeepCopy()
|
||||
tmplBytes, err := json.Marshal(interpolatedGenerator)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("requestedGenerator", interpolatedGenerator).Error("error marshalling requested generator for interpolation")
|
||||
return *interpolatedGenerator, err
|
||||
}
|
||||
|
||||
render := utils.Render{}
|
||||
replacedTmplStr, err := render.Replace(string(tmplBytes), params, useGoTemplate)
|
||||
interpolatedGenerator, err := render.RenderGeneratorParams(requestedGenerator, params, useGoTemplate)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("interpolatedGeneratorString", replacedTmplStr).Error("error interpolating generator with other generator's parameter")
|
||||
log.WithError(err).WithField("interpolatedGenerator", interpolatedGenerator).Error("error interpolating generator with other generator's parameter")
|
||||
return *interpolatedGenerator, err
|
||||
}
|
||||
|
||||
err = json.Unmarshal([]byte(replacedTmplStr), interpolatedGenerator)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("requestedGenerator", interpolatedGenerator).Error("error unmarshalling requested generator for interpolation")
|
||||
return *interpolatedGenerator, err
|
||||
}
|
||||
return *interpolatedGenerator, nil
|
||||
}
|
||||
|
||||
@@ -6,9 +6,11 @@ import (
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
testutils "github.com/argoproj/argo-cd/v2/applicationset/utils/test"
|
||||
argov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
|
||||
"github.com/stretchr/testify/mock"
|
||||
@@ -159,8 +161,8 @@ func getMockClusterGenerator() Generator {
|
||||
}
|
||||
|
||||
func getMockGitGenerator() Generator {
|
||||
argoCDServiceMock := argoCDServiceMock{mock: &mock.Mock{}}
|
||||
argoCDServiceMock.mock.On("GetDirectories", mock.Anything, mock.Anything, mock.Anything).Return([]string{"app1", "app2", "app_3", "p1/app4"}, nil)
|
||||
argoCDServiceMock := testutils.ArgoCDServiceMock{Mock: &mock.Mock{}}
|
||||
argoCDServiceMock.Mock.On("GetDirectories", mock.Anything, mock.Anything, mock.Anything).Return([]string{"app1", "app2", "app_3", "p1/app4"}, nil)
|
||||
var gitGenerator = NewGitGenerator(argoCDServiceMock)
|
||||
return gitGenerator
|
||||
}
|
||||
@@ -248,6 +250,60 @@ func TestInterpolateGenerator(t *testing.T) {
|
||||
Path: "{{server}}",
|
||||
}
|
||||
|
||||
requestedGenerator = &argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
Git: &argoprojiov1alpha1.GitGenerator{
|
||||
Files: append([]argoprojiov1alpha1.GitFileGeneratorItem{}, fileNamePath, fileServerPath),
|
||||
Template: argoprojiov1alpha1.ApplicationSetTemplate{},
|
||||
},
|
||||
}
|
||||
clusterGeneratorParams := map[string]interface{}{
|
||||
"name": "production_01/west", "server": "https://production-01.example.com",
|
||||
}
|
||||
interpolatedGenerator, err = InterpolateGenerator(requestedGenerator, clusterGeneratorParams, false)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("requestedGenerator", requestedGenerator).Error("error interpolating Generator")
|
||||
return
|
||||
}
|
||||
assert.Equal(t, "production_01/west", interpolatedGenerator.Git.Files[0].Path)
|
||||
assert.Equal(t, "https://production-01.example.com", interpolatedGenerator.Git.Files[1].Path)
|
||||
}
|
||||
|
||||
func TestInterpolateGenerator_go(t *testing.T) {
|
||||
requestedGenerator := &argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
Clusters: &argoprojiov1alpha1.ClusterGenerator{
|
||||
Selector: metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"argocd.argoproj.io/secret-type": "cluster",
|
||||
"path-basename": "{{base .path.path}}",
|
||||
"path-zero": "{{index .path.segments 0}}",
|
||||
"path-full": "{{.path.path}}",
|
||||
"kubernetes.io/environment": `{{default "foo" .my_label}}`,
|
||||
}},
|
||||
},
|
||||
}
|
||||
gitGeneratorParams := map[string]interface{}{
|
||||
"path": map[string]interface{}{
|
||||
"path": "p1/p2/app3",
|
||||
"segments": []string{"p1", "p2", "app3"},
|
||||
},
|
||||
}
|
||||
interpolatedGenerator, err := InterpolateGenerator(requestedGenerator, gitGeneratorParams, true)
|
||||
require.NoError(t, err)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("requestedGenerator", requestedGenerator).Error("error interpolating Generator")
|
||||
return
|
||||
}
|
||||
assert.Equal(t, "app3", interpolatedGenerator.Clusters.Selector.MatchLabels["path-basename"])
|
||||
assert.Equal(t, "p1", interpolatedGenerator.Clusters.Selector.MatchLabels["path-zero"])
|
||||
assert.Equal(t, "p1/p2/app3", interpolatedGenerator.Clusters.Selector.MatchLabels["path-full"])
|
||||
|
||||
fileNamePath := argoprojiov1alpha1.GitFileGeneratorItem{
|
||||
Path: "{{.name}}",
|
||||
}
|
||||
fileServerPath := argoprojiov1alpha1.GitFileGeneratorItem{
|
||||
Path: "{{.server}}",
|
||||
}
|
||||
|
||||
requestedGenerator = &argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
Git: &argoprojiov1alpha1.GitGenerator{
|
||||
Files: append([]argoprojiov1alpha1.GitFileGeneratorItem{}, fileNamePath, fileServerPath),
|
||||
|
||||
@@ -58,9 +58,9 @@ func (g *GitGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha1.Applic
|
||||
|
||||
var err error
|
||||
var res []map[string]interface{}
|
||||
if appSetGenerator.Git.Directories != nil {
|
||||
if len(appSetGenerator.Git.Directories) != 0 {
|
||||
res, err = g.generateParamsForGitDirectories(appSetGenerator, appSet.Spec.GoTemplate)
|
||||
} else if appSetGenerator.Git.Files != nil {
|
||||
} else if len(appSetGenerator.Git.Files) != 0 {
|
||||
res, err = g.generateParamsForGitFiles(appSetGenerator, appSet.Spec.GoTemplate)
|
||||
} else {
|
||||
return nil, EmptyAppSetGeneratorError
|
||||
@@ -85,6 +85,7 @@ func (g *GitGenerator) generateParamsForGitDirectories(appSetGenerator *argoproj
|
||||
"total": len(allPaths),
|
||||
"repoURL": appSetGenerator.Git.RepoURL,
|
||||
"revision": appSetGenerator.Git.Revision,
|
||||
"pathParamPrefix": appSetGenerator.Git.PathParamPrefix,
|
||||
}).Info("applications result from the repo service")
|
||||
|
||||
requestedApps := g.filterApps(appSetGenerator.Git.Directories, allPaths)
|
||||
@@ -121,7 +122,7 @@ func (g *GitGenerator) generateParamsForGitFiles(appSetGenerator *argoprojiov1al
|
||||
for _, path := range allPaths {
|
||||
|
||||
// A JSON / YAML file path can contain multiple sets of parameters (ie it is an array)
|
||||
paramsArray, err := g.generateParamsFromGitFile(path, allFiles[path], useGoTemplate)
|
||||
paramsArray, err := g.generateParamsFromGitFile(path, allFiles[path], useGoTemplate, appSetGenerator.Git.PathParamPrefix)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to process file '%s': %v", path, err)
|
||||
}
|
||||
@@ -133,7 +134,7 @@ func (g *GitGenerator) generateParamsForGitFiles(appSetGenerator *argoprojiov1al
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (g *GitGenerator) generateParamsFromGitFile(filePath string, fileContent []byte, useGoTemplate bool) ([]map[string]interface{}, error) {
|
||||
func (g *GitGenerator) generateParamsFromGitFile(filePath string, fileContent []byte, useGoTemplate bool, pathParamPrefix string) ([]map[string]interface{}, error) {
|
||||
objectsFound := []map[string]interface{}{}
|
||||
|
||||
// First, we attempt to parse as an array
|
||||
@@ -167,7 +168,11 @@ func (g *GitGenerator) generateParamsFromGitFile(filePath string, fileContent []
|
||||
paramPath["basenameNormalized"] = utils.SanitizeName(path.Base(paramPath["path"].(string)))
|
||||
paramPath["filenameNormalized"] = utils.SanitizeName(path.Base(paramPath["filename"].(string)))
|
||||
paramPath["segments"] = strings.Split(paramPath["path"].(string), "/")
|
||||
params["path"] = paramPath
|
||||
if pathParamPrefix != "" {
|
||||
params[pathParamPrefix] = map[string]interface{}{"path": paramPath}
|
||||
} else {
|
||||
params["path"] = paramPath
|
||||
}
|
||||
} else {
|
||||
flat, err := flatten.Flatten(objectFound, "", flatten.DotStyle)
|
||||
if err != nil {
|
||||
@@ -176,14 +181,18 @@ func (g *GitGenerator) generateParamsFromGitFile(filePath string, fileContent []
|
||||
for k, v := range flat {
|
||||
params[k] = fmt.Sprintf("%v", v)
|
||||
}
|
||||
params["path"] = path.Dir(filePath)
|
||||
params["path.basename"] = path.Base(params["path"].(string))
|
||||
params["path.filename"] = path.Base(filePath)
|
||||
params["path.basenameNormalized"] = utils.SanitizeName(path.Base(params["path"].(string)))
|
||||
params["path.filenameNormalized"] = utils.SanitizeName(path.Base(params["path.filename"].(string)))
|
||||
for k, v := range strings.Split(params["path"].(string), "/") {
|
||||
pathParamName := "path"
|
||||
if pathParamPrefix != "" {
|
||||
pathParamName = pathParamPrefix+"."+pathParamName
|
||||
}
|
||||
params[pathParamName] = path.Dir(filePath)
|
||||
params[pathParamName+".basename"] = path.Base(params[pathParamName].(string))
|
||||
params[pathParamName+".filename"] = path.Base(filePath)
|
||||
params[pathParamName+".basenameNormalized"] = utils.SanitizeName(path.Base(params[pathParamName].(string)))
|
||||
params[pathParamName+".filenameNormalized"] = utils.SanitizeName(path.Base(params[pathParamName+".filename"].(string)))
|
||||
for k, v := range strings.Split(params[pathParamName].(string), "/") {
|
||||
if len(v) > 0 {
|
||||
params["path["+strconv.Itoa(k)+"]"] = v
|
||||
params[pathParamName+"["+strconv.Itoa(k)+"]"] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -192,7 +201,6 @@ func (g *GitGenerator) generateParamsFromGitFile(filePath string, fileContent []
|
||||
}
|
||||
|
||||
return res, nil
|
||||
|
||||
}
|
||||
|
||||
func (g *GitGenerator) filterApps(Directories []argoprojiov1alpha1.GitDirectoryGeneratorItem, allPaths []string) []string {
|
||||
@@ -223,9 +231,7 @@ func (g *GitGenerator) filterApps(Directories []argoprojiov1alpha1.GitDirectoryG
|
||||
return res
|
||||
}
|
||||
|
||||
func (g *GitGenerator) generateParamsFromApps(requestedApps []string, _ *argoprojiov1alpha1.ApplicationSetGenerator, useGoTemplate bool) []map[string]interface{} {
|
||||
// TODO: At some point, the applicationSetGenerator param should be used
|
||||
|
||||
func (g *GitGenerator) generateParamsFromApps(requestedApps []string, appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator, useGoTemplate bool) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, len(requestedApps))
|
||||
for i, a := range requestedApps {
|
||||
|
||||
@@ -237,14 +243,22 @@ func (g *GitGenerator) generateParamsFromApps(requestedApps []string, _ *argopro
|
||||
paramPath["basename"] = path.Base(a)
|
||||
paramPath["basenameNormalized"] = utils.SanitizeName(path.Base(a))
|
||||
paramPath["segments"] = strings.Split(paramPath["path"].(string), "/")
|
||||
params["path"] = paramPath
|
||||
if appSetGenerator.Git.PathParamPrefix != "" {
|
||||
params[appSetGenerator.Git.PathParamPrefix] = map[string]interface{}{"path": paramPath}
|
||||
} else {
|
||||
params["path"] = paramPath
|
||||
}
|
||||
} else {
|
||||
params["path"] = a
|
||||
params["path.basename"] = path.Base(a)
|
||||
params["path.basenameNormalized"] = utils.SanitizeName(path.Base(a))
|
||||
for k, v := range strings.Split(params["path"].(string), "/") {
|
||||
pathParamName := "path"
|
||||
if appSetGenerator.Git.PathParamPrefix != "" {
|
||||
pathParamName = appSetGenerator.Git.PathParamPrefix+"."+pathParamName
|
||||
}
|
||||
params[pathParamName] = a
|
||||
params[pathParamName+".basename"] = path.Base(a)
|
||||
params[pathParamName+".basenameNormalized"] = utils.SanitizeName(path.Base(a))
|
||||
for k, v := range strings.Split(params[pathParamName].(string), "/") {
|
||||
if len(v) > 0 {
|
||||
params["path["+strconv.Itoa(k)+"]"] = v
|
||||
params[pathParamName+"["+strconv.Itoa(k)+"]"] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package generators
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
@@ -9,6 +8,7 @@ import (
|
||||
"github.com/stretchr/testify/mock"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
testutils "github.com/argoproj/argo-cd/v2/applicationset/utils/test"
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
)
|
||||
|
||||
@@ -20,38 +20,11 @@ import (
|
||||
// return io.NewCloser(func() error { return nil }), c.RepoServerServiceClient, nil
|
||||
// }
|
||||
|
||||
type argoCDServiceMock struct {
|
||||
mock *mock.Mock
|
||||
}
|
||||
|
||||
func (a argoCDServiceMock) GetApps(ctx context.Context, repoURL string, revision string) ([]string, error) {
|
||||
args := a.mock.Called(ctx, repoURL, revision)
|
||||
|
||||
return args.Get(0).([]string), args.Error(1)
|
||||
}
|
||||
|
||||
func (a argoCDServiceMock) GetFiles(ctx context.Context, repoURL string, revision string, pattern string) (map[string][]byte, error) {
|
||||
args := a.mock.Called(ctx, repoURL, revision, pattern)
|
||||
|
||||
return args.Get(0).(map[string][]byte), args.Error(1)
|
||||
}
|
||||
|
||||
func (a argoCDServiceMock) GetFileContent(ctx context.Context, repoURL string, revision string, path string) ([]byte, error) {
|
||||
args := a.mock.Called(ctx, repoURL, revision, path)
|
||||
|
||||
return args.Get(0).([]byte), args.Error(1)
|
||||
}
|
||||
|
||||
func (a argoCDServiceMock) GetDirectories(ctx context.Context, repoURL string, revision string) ([]string, error) {
|
||||
args := a.mock.Called(ctx, repoURL, revision)
|
||||
return args.Get(0).([]string), args.Error(1)
|
||||
}
|
||||
|
||||
func Test_generateParamsFromGitFile(t *testing.T) {
|
||||
params, err := (*GitGenerator)(nil).generateParamsFromGitFile("path/dir/file_name.yaml", []byte(`
|
||||
foo:
|
||||
bar: baz
|
||||
`), false)
|
||||
`), false, "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -69,11 +42,33 @@ foo:
|
||||
}, params)
|
||||
}
|
||||
|
||||
func Test_generatePrefixedParamsFromGitFile(t *testing.T) {
|
||||
params, err := (*GitGenerator)(nil).generateParamsFromGitFile("path/dir/file_name.yaml", []byte(`
|
||||
foo:
|
||||
bar: baz
|
||||
`), false, "myRepo")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assert.Equal(t, []map[string]interface{}{
|
||||
{
|
||||
"foo.bar": "baz",
|
||||
"myRepo.path": "path/dir",
|
||||
"myRepo.path.basename": "dir",
|
||||
"myRepo.path.filename": "file_name.yaml",
|
||||
"myRepo.path.basenameNormalized": "dir",
|
||||
"myRepo.path.filenameNormalized": "file-name.yaml",
|
||||
"myRepo.path[0]": "path",
|
||||
"myRepo.path[1]": "dir",
|
||||
},
|
||||
}, params)
|
||||
}
|
||||
|
||||
func Test_generateParamsFromGitFileGoTemplate(t *testing.T) {
|
||||
params, err := (*GitGenerator)(nil).generateParamsFromGitFile("path/dir/file_name.yaml", []byte(`
|
||||
foo:
|
||||
bar: baz
|
||||
`), true)
|
||||
`), true, "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -97,15 +92,46 @@ foo:
|
||||
}, params)
|
||||
}
|
||||
|
||||
func Test_generatePrefixedParamsFromGitFileGoTemplate(t *testing.T) {
|
||||
params, err := (*GitGenerator)(nil).generateParamsFromGitFile("path/dir/file_name.yaml", []byte(`
|
||||
foo:
|
||||
bar: baz
|
||||
`), true, "myRepo")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assert.Equal(t, []map[string]interface{}{
|
||||
{
|
||||
"foo": map[string]interface{}{
|
||||
"bar": "baz",
|
||||
},
|
||||
"myRepo": map[string]interface{}{
|
||||
"path": map[string]interface{}{
|
||||
"path": "path/dir",
|
||||
"basename": "dir",
|
||||
"filename": "file_name.yaml",
|
||||
"basenameNormalized": "dir",
|
||||
"filenameNormalized": "file-name.yaml",
|
||||
"segments": []string{
|
||||
"path",
|
||||
"dir",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, params)
|
||||
}
|
||||
|
||||
func TestGitGenerateParamsFromDirectories(t *testing.T) {
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
directories []argoprojiov1alpha1.GitDirectoryGeneratorItem
|
||||
repoApps []string
|
||||
repoError error
|
||||
expected []map[string]interface{}
|
||||
expectedError error
|
||||
name string
|
||||
directories []argoprojiov1alpha1.GitDirectoryGeneratorItem
|
||||
pathParamPrefix string
|
||||
repoApps []string
|
||||
repoError error
|
||||
expected []map[string]interface{}
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "happy flow - created apps",
|
||||
@@ -124,6 +150,24 @@ func TestGitGenerateParamsFromDirectories(t *testing.T) {
|
||||
},
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "It prefixes path parameters with PathParamPrefix",
|
||||
directories: []argoprojiov1alpha1.GitDirectoryGeneratorItem{{Path: "*"}},
|
||||
pathParamPrefix: "myRepo",
|
||||
repoApps: []string{
|
||||
"app1",
|
||||
"app2",
|
||||
"app_3",
|
||||
"p1/app4",
|
||||
},
|
||||
repoError: nil,
|
||||
expected: []map[string]interface{}{
|
||||
{"myRepo.path": "app1", "myRepo.path.basename": "app1", "myRepo.path.basenameNormalized": "app1", "myRepo.path[0]": "app1"},
|
||||
{"myRepo.path": "app2", "myRepo.path.basename": "app2", "myRepo.path.basenameNormalized": "app2", "myRepo.path[0]": "app2"},
|
||||
{"myRepo.path": "app_3", "myRepo.path.basename": "app_3", "myRepo.path.basenameNormalized": "app-3", "myRepo.path[0]": "app_3"},
|
||||
},
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "It filters application according to the paths",
|
||||
directories: []argoprojiov1alpha1.GitDirectoryGeneratorItem{{Path: "p1/*"}, {Path: "p1/*/*"}},
|
||||
@@ -200,9 +244,9 @@ func TestGitGenerateParamsFromDirectories(t *testing.T) {
|
||||
t.Run(testCaseCopy.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
argoCDServiceMock := argoCDServiceMock{mock: &mock.Mock{}}
|
||||
argoCDServiceMock := testutils.ArgoCDServiceMock{Mock: &mock.Mock{}}
|
||||
|
||||
argoCDServiceMock.mock.On("GetDirectories", mock.Anything, mock.Anything, mock.Anything).Return(testCaseCopy.repoApps, testCaseCopy.repoError)
|
||||
argoCDServiceMock.Mock.On("GetDirectories", mock.Anything, mock.Anything, mock.Anything).Return(testCaseCopy.repoApps, testCaseCopy.repoError)
|
||||
|
||||
var gitGenerator = NewGitGenerator(argoCDServiceMock)
|
||||
applicationSetInfo := argoprojiov1alpha1.ApplicationSet{
|
||||
@@ -212,9 +256,10 @@ func TestGitGenerateParamsFromDirectories(t *testing.T) {
|
||||
Spec: argoprojiov1alpha1.ApplicationSetSpec{
|
||||
Generators: []argoprojiov1alpha1.ApplicationSetGenerator{{
|
||||
Git: &argoprojiov1alpha1.GitGenerator{
|
||||
RepoURL: "RepoURL",
|
||||
Revision: "Revision",
|
||||
Directories: testCaseCopy.directories,
|
||||
RepoURL: "RepoURL",
|
||||
Revision: "Revision",
|
||||
Directories: testCaseCopy.directories,
|
||||
PathParamPrefix: testCaseCopy.pathParamPrefix,
|
||||
},
|
||||
}},
|
||||
},
|
||||
@@ -229,7 +274,7 @@ func TestGitGenerateParamsFromDirectories(t *testing.T) {
|
||||
assert.Equal(t, testCaseCopy.expected, got)
|
||||
}
|
||||
|
||||
argoCDServiceMock.mock.AssertExpectations(t)
|
||||
argoCDServiceMock.Mock.AssertExpectations(t)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -237,12 +282,13 @@ func TestGitGenerateParamsFromDirectories(t *testing.T) {
|
||||
func TestGitGenerateParamsFromDirectoriesGoTemplate(t *testing.T) {
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
directories []argoprojiov1alpha1.GitDirectoryGeneratorItem
|
||||
repoApps []string
|
||||
repoError error
|
||||
expected []map[string]interface{}
|
||||
expectedError error
|
||||
name string
|
||||
directories []argoprojiov1alpha1.GitDirectoryGeneratorItem
|
||||
pathParamPrefix string
|
||||
repoApps []string
|
||||
repoError error
|
||||
expected []map[string]interface{}
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "happy flow - created apps",
|
||||
@@ -288,6 +334,57 @@ func TestGitGenerateParamsFromDirectoriesGoTemplate(t *testing.T) {
|
||||
},
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "It prefixes path parameters with PathParamPrefix",
|
||||
directories: []argoprojiov1alpha1.GitDirectoryGeneratorItem{{Path: "*"}},
|
||||
pathParamPrefix: "myRepo",
|
||||
repoApps: []string{
|
||||
"app1",
|
||||
"app2",
|
||||
"app_3",
|
||||
"p1/app4",
|
||||
},
|
||||
repoError: nil,
|
||||
expected: []map[string]interface{}{
|
||||
{
|
||||
"myRepo": map[string]interface{}{
|
||||
"path": map[string]interface{}{
|
||||
"path": "app1",
|
||||
"basename": "app1",
|
||||
"basenameNormalized": "app1",
|
||||
"segments": []string{
|
||||
"app1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"myRepo": map[string]interface{}{
|
||||
"path": map[string]interface{}{
|
||||
"path": "app2",
|
||||
"basename": "app2",
|
||||
"basenameNormalized": "app2",
|
||||
"segments": []string{
|
||||
"app2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"myRepo": map[string]interface{}{
|
||||
"path": map[string]interface{}{
|
||||
"path": "app_3",
|
||||
"basename": "app_3",
|
||||
"basenameNormalized": "app-3",
|
||||
"segments": []string{
|
||||
"app_3",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "It filters application according to the paths",
|
||||
directories: []argoprojiov1alpha1.GitDirectoryGeneratorItem{{Path: "p1/*"}, {Path: "p1/*/*"}},
|
||||
@@ -442,9 +539,9 @@ func TestGitGenerateParamsFromDirectoriesGoTemplate(t *testing.T) {
|
||||
t.Run(testCaseCopy.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
argoCDServiceMock := argoCDServiceMock{mock: &mock.Mock{}}
|
||||
argoCDServiceMock := testutils.ArgoCDServiceMock{Mock: &mock.Mock{}}
|
||||
|
||||
argoCDServiceMock.mock.On("GetDirectories", mock.Anything, mock.Anything, mock.Anything).Return(testCaseCopy.repoApps, testCaseCopy.repoError)
|
||||
argoCDServiceMock.Mock.On("GetDirectories", mock.Anything, mock.Anything, mock.Anything).Return(testCaseCopy.repoApps, testCaseCopy.repoError)
|
||||
|
||||
var gitGenerator = NewGitGenerator(argoCDServiceMock)
|
||||
applicationSetInfo := argoprojiov1alpha1.ApplicationSet{
|
||||
@@ -455,9 +552,10 @@ func TestGitGenerateParamsFromDirectoriesGoTemplate(t *testing.T) {
|
||||
GoTemplate: true,
|
||||
Generators: []argoprojiov1alpha1.ApplicationSetGenerator{{
|
||||
Git: &argoprojiov1alpha1.GitGenerator{
|
||||
RepoURL: "RepoURL",
|
||||
Revision: "Revision",
|
||||
Directories: testCaseCopy.directories,
|
||||
RepoURL: "RepoURL",
|
||||
Revision: "Revision",
|
||||
Directories: testCaseCopy.directories,
|
||||
PathParamPrefix: testCaseCopy.pathParamPrefix,
|
||||
},
|
||||
}},
|
||||
},
|
||||
@@ -472,7 +570,7 @@ func TestGitGenerateParamsFromDirectoriesGoTemplate(t *testing.T) {
|
||||
assert.Equal(t, testCaseCopy.expected, got)
|
||||
}
|
||||
|
||||
argoCDServiceMock.mock.AssertExpectations(t)
|
||||
argoCDServiceMock.Mock.AssertExpectations(t)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -732,8 +830,8 @@ cluster:
|
||||
t.Run(testCaseCopy.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
argoCDServiceMock := argoCDServiceMock{mock: &mock.Mock{}}
|
||||
argoCDServiceMock.mock.On("GetFiles", mock.Anything, mock.Anything, mock.Anything, mock.Anything).
|
||||
argoCDServiceMock := testutils.ArgoCDServiceMock{Mock: &mock.Mock{}}
|
||||
argoCDServiceMock.Mock.On("GetFiles", mock.Anything, mock.Anything, mock.Anything, mock.Anything).
|
||||
Return(testCaseCopy.repoFileContents, testCaseCopy.repoPathsError)
|
||||
|
||||
var gitGenerator = NewGitGenerator(argoCDServiceMock)
|
||||
@@ -762,7 +860,7 @@ cluster:
|
||||
assert.ElementsMatch(t, testCaseCopy.expected, got)
|
||||
}
|
||||
|
||||
argoCDServiceMock.mock.AssertExpectations(t)
|
||||
argoCDServiceMock.Mock.AssertExpectations(t)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1081,8 +1179,8 @@ cluster:
|
||||
t.Run(testCaseCopy.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
argoCDServiceMock := argoCDServiceMock{mock: &mock.Mock{}}
|
||||
argoCDServiceMock.mock.On("GetFiles", mock.Anything, mock.Anything, mock.Anything, mock.Anything).
|
||||
argoCDServiceMock := testutils.ArgoCDServiceMock{Mock: &mock.Mock{}}
|
||||
argoCDServiceMock.Mock.On("GetFiles", mock.Anything, mock.Anything, mock.Anything, mock.Anything).
|
||||
Return(testCaseCopy.repoFileContents, testCaseCopy.repoPathsError)
|
||||
|
||||
var gitGenerator = NewGitGenerator(argoCDServiceMock)
|
||||
@@ -1112,7 +1210,7 @@ cluster:
|
||||
assert.ElementsMatch(t, testCaseCopy.expected, got)
|
||||
}
|
||||
|
||||
argoCDServiceMock.mock.AssertExpectations(t)
|
||||
argoCDServiceMock.Mock.AssertExpectations(t)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -80,28 +80,13 @@ func (m *MatrixGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha1.App
|
||||
}
|
||||
|
||||
func (m *MatrixGenerator) getParams(appSetBaseGenerator argoprojiov1alpha1.ApplicationSetNestedGenerator, appSet *argoprojiov1alpha1.ApplicationSet, params map[string]interface{}) ([]map[string]interface{}, error) {
|
||||
var matrix *argoprojiov1alpha1.MatrixGenerator
|
||||
if appSetBaseGenerator.Matrix != nil {
|
||||
// Since nested matrix generator is represented as a JSON object in the CRD, we unmarshall it back to a Go struct here.
|
||||
nestedMatrix, err := argoprojiov1alpha1.ToNestedMatrixGenerator(appSetBaseGenerator.Matrix)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to unmarshall nested matrix generator: %v", err)
|
||||
}
|
||||
if nestedMatrix != nil {
|
||||
matrix = nestedMatrix.ToMatrixGenerator()
|
||||
}
|
||||
matrixGen, err := getMatrixGenerator(appSetBaseGenerator)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var mergeGenerator *argoprojiov1alpha1.MergeGenerator
|
||||
if appSetBaseGenerator.Merge != nil {
|
||||
// Since nested merge generator is represented as a JSON object in the CRD, we unmarshall it back to a Go struct here.
|
||||
nestedMerge, err := argoprojiov1alpha1.ToNestedMergeGenerator(appSetBaseGenerator.Merge)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to unmarshall nested merge generator: %v", err)
|
||||
}
|
||||
if nestedMerge != nil {
|
||||
mergeGenerator = nestedMerge.ToMergeGenerator()
|
||||
}
|
||||
mergeGen, err := getMergeGenerator(appSetBaseGenerator)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
t, err := Transform(
|
||||
@@ -112,8 +97,8 @@ func (m *MatrixGenerator) getParams(appSetBaseGenerator argoprojiov1alpha1.Appli
|
||||
SCMProvider: appSetBaseGenerator.SCMProvider,
|
||||
ClusterDecisionResource: appSetBaseGenerator.ClusterDecisionResource,
|
||||
PullRequest: appSetBaseGenerator.PullRequest,
|
||||
Matrix: matrix,
|
||||
Merge: mergeGenerator,
|
||||
Matrix: matrixGen,
|
||||
Merge: mergeGen,
|
||||
Selector: appSetBaseGenerator.Selector,
|
||||
},
|
||||
m.supportedGenerators,
|
||||
@@ -143,10 +128,15 @@ func (m *MatrixGenerator) GetRequeueAfter(appSetGenerator *argoprojiov1alpha1.Ap
|
||||
var found bool
|
||||
|
||||
for _, r := range appSetGenerator.Matrix.Generators {
|
||||
matrixGen, _ := getMatrixGenerator(r)
|
||||
mergeGen, _ := getMergeGenerator(r)
|
||||
base := &argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
List: r.List,
|
||||
Clusters: r.Clusters,
|
||||
Git: r.Git,
|
||||
List: r.List,
|
||||
Clusters: r.Clusters,
|
||||
Git: r.Git,
|
||||
PullRequest: r.PullRequest,
|
||||
Matrix: matrixGen,
|
||||
Merge: mergeGen,
|
||||
}
|
||||
generators := GetRelevantGenerators(base, m.supportedGenerators)
|
||||
|
||||
@@ -167,6 +157,17 @@ func (m *MatrixGenerator) GetRequeueAfter(appSetGenerator *argoprojiov1alpha1.Ap
|
||||
|
||||
}
|
||||
|
||||
func getMatrixGenerator(r argoprojiov1alpha1.ApplicationSetNestedGenerator) (*argoprojiov1alpha1.MatrixGenerator, error) {
|
||||
if r.Matrix == nil {
|
||||
return nil, nil
|
||||
}
|
||||
matrix, err := argoprojiov1alpha1.ToNestedMatrixGenerator(r.Matrix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return matrix.ToMatrixGenerator(), nil
|
||||
}
|
||||
|
||||
func (m *MatrixGenerator) GetTemplate(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) *argoprojiov1alpha1.ApplicationSetTemplate {
|
||||
return &appSetGenerator.Matrix.Template
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@@ -16,6 +17,7 @@ import (
|
||||
"github.com/stretchr/testify/mock"
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
|
||||
testutils "github.com/argoproj/argo-cd/v2/applicationset/utils/test"
|
||||
argoprojiov1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
)
|
||||
|
||||
@@ -399,6 +401,8 @@ func TestMatrixGetRequeueAfter(t *testing.T) {
|
||||
Elements: []apiextensionsv1.JSON{{Raw: []byte(`{"cluster": "Cluster","url": "Url"}`)}},
|
||||
}
|
||||
|
||||
pullRequestGenerator := &argoprojiov1alpha1.PullRequestGenerator{}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
baseGenerators []argoprojiov1alpha1.ApplicationSetNestedGenerator
|
||||
@@ -431,6 +435,31 @@ func TestMatrixGetRequeueAfter(t *testing.T) {
|
||||
gitGetRequeueAfter: time.Duration(1),
|
||||
expected: time.Duration(1),
|
||||
},
|
||||
{
|
||||
name: "returns the minimal time for pull request",
|
||||
baseGenerators: []argoprojiov1alpha1.ApplicationSetNestedGenerator{
|
||||
{
|
||||
Git: gitGenerator,
|
||||
},
|
||||
{
|
||||
PullRequest: pullRequestGenerator,
|
||||
},
|
||||
},
|
||||
gitGetRequeueAfter: time.Duration(15 * time.Second),
|
||||
expected: time.Duration(15 * time.Second),
|
||||
},
|
||||
{
|
||||
name: "returns the default time if no requeueAfterSeconds is provided",
|
||||
baseGenerators: []argoprojiov1alpha1.ApplicationSetNestedGenerator{
|
||||
{
|
||||
Git: gitGenerator,
|
||||
},
|
||||
{
|
||||
PullRequest: pullRequestGenerator,
|
||||
},
|
||||
},
|
||||
expected: time.Duration(30 * time.Minute),
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
@@ -441,16 +470,18 @@ func TestMatrixGetRequeueAfter(t *testing.T) {
|
||||
|
||||
for _, g := range testCaseCopy.baseGenerators {
|
||||
gitGeneratorSpec := argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
Git: g.Git,
|
||||
List: g.List,
|
||||
Git: g.Git,
|
||||
List: g.List,
|
||||
PullRequest: g.PullRequest,
|
||||
}
|
||||
mock.On("GetRequeueAfter", &gitGeneratorSpec).Return(testCaseCopy.gitGetRequeueAfter, nil)
|
||||
}
|
||||
|
||||
var matrixGenerator = NewMatrixGenerator(
|
||||
map[string]Generator{
|
||||
"Git": mock,
|
||||
"List": &ListGenerator{},
|
||||
"Git": mock,
|
||||
"List": &ListGenerator{},
|
||||
"PullRequest": &PullRequestGenerator{},
|
||||
},
|
||||
)
|
||||
|
||||
@@ -828,3 +859,72 @@ func (g *generatorMock) GetRequeueAfter(appSetGenerator *argoprojiov1alpha1.Appl
|
||||
return args.Get(0).(time.Duration)
|
||||
|
||||
}
|
||||
|
||||
func TestGitGenerator_GenerateParams_list_x_git_matrix_generator(t *testing.T) {
|
||||
// Given a matrix generator over a list generator and a git files generator, the nested git files generator should
|
||||
// be treated as a files generator, and it should produce parameters.
|
||||
|
||||
// This tests for a specific bug where a nested git files generator was being treated as a directory generator. This
|
||||
// happened because, when the matrix generator was being processed, the nested git files generator was being
|
||||
// interpolated by the deeplyReplace function. That function cannot differentiate between a nil slice and an empty
|
||||
// slice. So it was replacing the `Directories` field with an empty slice, which the ApplicationSet controller
|
||||
// interpreted as meaning this was a directory generator, not a files generator.
|
||||
|
||||
// Now instead of checking for nil, we check whether the field is a non-empty slice. This test prevents a regression
|
||||
// of that bug.
|
||||
|
||||
listGeneratorMock := &generatorMock{}
|
||||
listGeneratorMock.On("GenerateParams", mock.AnythingOfType("*v1alpha1.ApplicationSetGenerator"), mock.AnythingOfType("*v1alpha1.ApplicationSet")).Return([]map[string]interface{}{
|
||||
{"some": "value"},
|
||||
}, nil)
|
||||
listGeneratorMock.On("GetTemplate", mock.AnythingOfType("*v1alpha1.ApplicationSetGenerator")).Return(&argoprojiov1alpha1.ApplicationSetTemplate{})
|
||||
|
||||
gitGeneratorSpec := &argoprojiov1alpha1.GitGenerator{
|
||||
RepoURL: "https://git.example.com",
|
||||
Files: []argoprojiov1alpha1.GitFileGeneratorItem{
|
||||
{Path: "some/path.json"},
|
||||
},
|
||||
}
|
||||
|
||||
repoServiceMock := testutils.ArgoCDServiceMock{Mock: &mock.Mock{}}
|
||||
repoServiceMock.Mock.On("GetFiles", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(map[string][]byte{
|
||||
"some/path.json": []byte("test: content"),
|
||||
}, nil)
|
||||
gitGenerator := NewGitGenerator(repoServiceMock)
|
||||
|
||||
matrixGenerator := NewMatrixGenerator(map[string]Generator{
|
||||
"List": listGeneratorMock,
|
||||
"Git": gitGenerator,
|
||||
})
|
||||
|
||||
matrixGeneratorSpec := &argoprojiov1alpha1.MatrixGenerator{
|
||||
Generators: []argoprojiov1alpha1.ApplicationSetNestedGenerator{
|
||||
{
|
||||
List: &argoprojiov1alpha1.ListGenerator{
|
||||
Elements: []apiextensionsv1.JSON{
|
||||
{
|
||||
Raw: []byte(`{"some": "value"}`),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Git: gitGeneratorSpec,
|
||||
},
|
||||
},
|
||||
}
|
||||
params, err := matrixGenerator.GenerateParams(&argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
Matrix: matrixGeneratorSpec,
|
||||
}, &argoprojiov1alpha1.ApplicationSet{})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []map[string]interface{}{{
|
||||
"path": "some",
|
||||
"path.basename": "some",
|
||||
"path.basenameNormalized": "some",
|
||||
"path.filename": "path.json",
|
||||
"path.filenameNormalized": "path.json",
|
||||
"path[0]": "some",
|
||||
"some": "value",
|
||||
"test": "content",
|
||||
}}, params)
|
||||
}
|
||||
|
||||
@@ -137,27 +137,13 @@ func getParamSetsByMergeKey(mergeKeys []string, paramSets []map[string]interface
|
||||
|
||||
// getParams get the parameters generated by this generator.
|
||||
func (m *MergeGenerator) getParams(appSetBaseGenerator argoprojiov1alpha1.ApplicationSetNestedGenerator, appSet *argoprojiov1alpha1.ApplicationSet) ([]map[string]interface{}, error) {
|
||||
|
||||
var matrix *argoprojiov1alpha1.MatrixGenerator
|
||||
if appSetBaseGenerator.Matrix != nil {
|
||||
nestedMatrix, err := argoprojiov1alpha1.ToNestedMatrixGenerator(appSetBaseGenerator.Matrix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if nestedMatrix != nil {
|
||||
matrix = nestedMatrix.ToMatrixGenerator()
|
||||
}
|
||||
matrixGen, err := getMatrixGenerator(appSetBaseGenerator)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var mergeGenerator *argoprojiov1alpha1.MergeGenerator
|
||||
if appSetBaseGenerator.Merge != nil {
|
||||
nestedMerge, err := argoprojiov1alpha1.ToNestedMergeGenerator(appSetBaseGenerator.Merge)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if nestedMerge != nil {
|
||||
mergeGenerator = nestedMerge.ToMergeGenerator()
|
||||
}
|
||||
mergeGen, err := getMergeGenerator(appSetBaseGenerator)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
t, err := Transform(
|
||||
@@ -168,8 +154,8 @@ func (m *MergeGenerator) getParams(appSetBaseGenerator argoprojiov1alpha1.Applic
|
||||
SCMProvider: appSetBaseGenerator.SCMProvider,
|
||||
ClusterDecisionResource: appSetBaseGenerator.ClusterDecisionResource,
|
||||
PullRequest: appSetBaseGenerator.PullRequest,
|
||||
Matrix: matrix,
|
||||
Merge: mergeGenerator,
|
||||
Matrix: matrixGen,
|
||||
Merge: mergeGen,
|
||||
Selector: appSetBaseGenerator.Selector,
|
||||
},
|
||||
m.supportedGenerators,
|
||||
@@ -197,10 +183,15 @@ func (m *MergeGenerator) GetRequeueAfter(appSetGenerator *argoprojiov1alpha1.App
|
||||
var found bool
|
||||
|
||||
for _, r := range appSetGenerator.Merge.Generators {
|
||||
matrixGen, _ := getMatrixGenerator(r)
|
||||
mergeGen, _ := getMergeGenerator(r)
|
||||
base := &argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
List: r.List,
|
||||
Clusters: r.Clusters,
|
||||
Git: r.Git,
|
||||
List: r.List,
|
||||
Clusters: r.Clusters,
|
||||
Git: r.Git,
|
||||
PullRequest: r.PullRequest,
|
||||
Matrix: matrixGen,
|
||||
Merge: mergeGen,
|
||||
}
|
||||
generators := GetRelevantGenerators(base, m.supportedGenerators)
|
||||
|
||||
@@ -221,6 +212,17 @@ func (m *MergeGenerator) GetRequeueAfter(appSetGenerator *argoprojiov1alpha1.App
|
||||
|
||||
}
|
||||
|
||||
func getMergeGenerator(r argoprojiov1alpha1.ApplicationSetNestedGenerator) (*argoprojiov1alpha1.MergeGenerator, error) {
|
||||
if r.Merge == nil {
|
||||
return nil, nil
|
||||
}
|
||||
merge, err := argoprojiov1alpha1.ToNestedMergeGenerator(r.Merge)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return merge.ToMergeGenerator(), nil
|
||||
}
|
||||
|
||||
// GetTemplate gets the Template field for the MergeGenerator.
|
||||
func (m *MergeGenerator) GetTemplate(appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator) *argoprojiov1alpha1.ApplicationSetTemplate {
|
||||
return &appSetGenerator.Merge.Template
|
||||
|
||||
@@ -90,13 +90,19 @@ func (g *PullRequestGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha
|
||||
shortSHALength = len(pull.HeadSHA)
|
||||
}
|
||||
|
||||
params = append(params, map[string]interface{}{
|
||||
paramMap := map[string]interface{}{
|
||||
"number": strconv.Itoa(pull.Number),
|
||||
"branch": pull.Branch,
|
||||
"branch_slug": slug.Make(pull.Branch),
|
||||
"head_sha": pull.HeadSHA,
|
||||
"head_short_sha": pull.HeadSHA[:shortSHALength],
|
||||
})
|
||||
}
|
||||
|
||||
// PR lables will only be supported for Go Template appsets, since fasttemplate will be deprecated.
|
||||
if applicationSetInfo != nil && applicationSetInfo.Spec.GoTemplate {
|
||||
paramMap["labels"] = pull.Labels
|
||||
}
|
||||
params = append(params, paramMap)
|
||||
}
|
||||
return params, nil
|
||||
}
|
||||
|
||||
@@ -17,9 +17,10 @@ import (
|
||||
func TestPullRequestGithubGenerateParams(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
cases := []struct {
|
||||
selectFunc func(context.Context, *argoprojiov1alpha1.PullRequestGenerator, *argoprojiov1alpha1.ApplicationSet) (pullrequest.PullRequestService, error)
|
||||
expected []map[string]interface{}
|
||||
expectedErr error
|
||||
selectFunc func(context.Context, *argoprojiov1alpha1.PullRequestGenerator, *argoprojiov1alpha1.ApplicationSet) (pullrequest.PullRequestService, error)
|
||||
expected []map[string]interface{}
|
||||
expectedErr error
|
||||
applicationSet argoprojiov1alpha1.ApplicationSet
|
||||
}{
|
||||
{
|
||||
selectFunc: func(context.Context, *argoprojiov1alpha1.PullRequestGenerator, *argoprojiov1alpha1.ApplicationSet) (pullrequest.PullRequestService, error) {
|
||||
@@ -107,6 +108,71 @@ func TestPullRequestGithubGenerateParams(t *testing.T) {
|
||||
expected: nil,
|
||||
expectedErr: fmt.Errorf("error listing repos: fake error"),
|
||||
},
|
||||
{
|
||||
selectFunc: func(context.Context, *argoprojiov1alpha1.PullRequestGenerator, *argoprojiov1alpha1.ApplicationSet) (pullrequest.PullRequestService, error) {
|
||||
return pullrequest.NewFakeService(
|
||||
ctx,
|
||||
[]*pullrequest.PullRequest{
|
||||
&pullrequest.PullRequest{
|
||||
Number: 1,
|
||||
Branch: "branch1",
|
||||
HeadSHA: "089d92cbf9ff857a39e6feccd32798ca700fb958",
|
||||
Labels: []string{"preview"},
|
||||
},
|
||||
},
|
||||
nil,
|
||||
)
|
||||
},
|
||||
expected: []map[string]interface{}{
|
||||
{
|
||||
"number": "1",
|
||||
"branch": "branch1",
|
||||
"branch_slug": "branch1",
|
||||
"head_sha": "089d92cbf9ff857a39e6feccd32798ca700fb958",
|
||||
"head_short_sha": "089d92cb",
|
||||
"labels": []string{"preview"},
|
||||
},
|
||||
},
|
||||
expectedErr: nil,
|
||||
applicationSet: argoprojiov1alpha1.ApplicationSet{
|
||||
Spec: argoprojiov1alpha1.ApplicationSetSpec{
|
||||
// Application set is using Go Template.
|
||||
GoTemplate: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
selectFunc: func(context.Context, *argoprojiov1alpha1.PullRequestGenerator, *argoprojiov1alpha1.ApplicationSet) (pullrequest.PullRequestService, error) {
|
||||
return pullrequest.NewFakeService(
|
||||
ctx,
|
||||
[]*pullrequest.PullRequest{
|
||||
&pullrequest.PullRequest{
|
||||
Number: 1,
|
||||
Branch: "branch1",
|
||||
HeadSHA: "089d92cbf9ff857a39e6feccd32798ca700fb958",
|
||||
Labels: []string{"preview"},
|
||||
},
|
||||
},
|
||||
nil,
|
||||
)
|
||||
},
|
||||
expected: []map[string]interface{}{
|
||||
{
|
||||
"number": "1",
|
||||
"branch": "branch1",
|
||||
"branch_slug": "branch1",
|
||||
"head_sha": "089d92cbf9ff857a39e6feccd32798ca700fb958",
|
||||
"head_short_sha": "089d92cb",
|
||||
},
|
||||
},
|
||||
expectedErr: nil,
|
||||
applicationSet: argoprojiov1alpha1.ApplicationSet{
|
||||
Spec: argoprojiov1alpha1.ApplicationSetSpec{
|
||||
// Application set is using fasttemplate.
|
||||
GoTemplate: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
@@ -117,7 +183,7 @@ func TestPullRequestGithubGenerateParams(t *testing.T) {
|
||||
PullRequest: &argoprojiov1alpha1.PullRequestGenerator{},
|
||||
}
|
||||
|
||||
got, gotErr := gen.GenerateParams(&generatorConfig, nil)
|
||||
got, gotErr := gen.GenerateParams(&generatorConfig, &c.applicationSet)
|
||||
assert.Equal(t, c.expectedErr, gotErr)
|
||||
assert.ElementsMatch(t, c.expected, got)
|
||||
}
|
||||
|
||||
@@ -122,6 +122,15 @@ func (g *SCMProviderGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error initializing Azure Devops service: %v", err)
|
||||
}
|
||||
} else if providerConfig.Bitbucket != nil {
|
||||
appPassword, err := g.getSecretRef(ctx, providerConfig.Bitbucket.AppPasswordRef, applicationSetInfo.Namespace)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error fetching Bitbucket cloud appPassword: %v", err)
|
||||
}
|
||||
provider, err = scm_provider.NewBitBucketCloudProvider(ctx, providerConfig.Bitbucket.Owner, providerConfig.Bitbucket.User, appPassword, providerConfig.Bitbucket.AllBranches)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error initializing Bitbucket cloud service: %v", err)
|
||||
}
|
||||
} else {
|
||||
return nil, fmt.Errorf("no SCM provider implementation configured")
|
||||
}
|
||||
|
||||
@@ -20,10 +20,12 @@ func Client(g github_app_auth.Authentication, url string) (*github.Client, error
|
||||
url = g.EnterpriseBaseURL
|
||||
}
|
||||
var client *github.Client
|
||||
httpClient := http.Client{Transport: rt}
|
||||
if url == "" {
|
||||
httpClient := http.Client{Transport: rt}
|
||||
client = github.NewClient(&httpClient)
|
||||
} else {
|
||||
rt.BaseURL = url
|
||||
httpClient := http.Client{Transport: rt}
|
||||
client, err = github.NewEnterpriseClient(url, url, &httpClient)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create github enterprise client: %w", err)
|
||||
|
||||
@@ -69,6 +69,7 @@ func (b *BitbucketService) List(_ context.Context) ([]*PullRequest, error) {
|
||||
Number: pull.ID,
|
||||
Branch: pull.FromRef.DisplayID, // ID: refs/heads/main DisplayID: main
|
||||
HeadSHA: pull.FromRef.LatestCommit, // This is not defined in the official docs, but works in practice
|
||||
Labels: []string{}, // Not supported by library
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -122,16 +122,19 @@ func TestListPullRequestPagination(t *testing.T) {
|
||||
Number: 101,
|
||||
Branch: "feature-101",
|
||||
HeadSHA: "ab3cf2e4d1517c83e720d2585b9402dbef71f992",
|
||||
Labels: []string{},
|
||||
}, *pullRequests[0])
|
||||
assert.Equal(t, PullRequest{
|
||||
Number: 102,
|
||||
Branch: "feature-102",
|
||||
HeadSHA: "bb3cf2e4d1517c83e720d2585b9402dbef71f992",
|
||||
Labels: []string{},
|
||||
}, *pullRequests[1])
|
||||
assert.Equal(t, PullRequest{
|
||||
Number: 200,
|
||||
Branch: "feature-200",
|
||||
HeadSHA: "cb3cf2e4d1517c83e720d2585b9402dbef71f992",
|
||||
Labels: []string{},
|
||||
}, *pullRequests[2])
|
||||
}
|
||||
|
||||
@@ -284,11 +287,13 @@ func TestListPullRequestBranchMatch(t *testing.T) {
|
||||
Number: 101,
|
||||
Branch: "feature-101",
|
||||
HeadSHA: "ab3cf2e4d1517c83e720d2585b9402dbef71f992",
|
||||
Labels: []string{},
|
||||
}, *pullRequests[0])
|
||||
assert.Equal(t, PullRequest{
|
||||
Number: 102,
|
||||
Branch: "feature-102",
|
||||
HeadSHA: "bb3cf2e4d1517c83e720d2585b9402dbef71f992",
|
||||
Labels: []string{},
|
||||
}, *pullRequests[1])
|
||||
|
||||
regexp = `.*2$`
|
||||
@@ -305,6 +310,7 @@ func TestListPullRequestBranchMatch(t *testing.T) {
|
||||
Number: 102,
|
||||
Branch: "feature-102",
|
||||
HeadSHA: "bb3cf2e4d1517c83e720d2585b9402dbef71f992",
|
||||
Labels: []string{},
|
||||
}, *pullRequests[0])
|
||||
|
||||
regexp = `[\d{2}`
|
||||
|
||||
@@ -57,7 +57,17 @@ func (g *GiteaService) List(ctx context.Context) ([]*PullRequest, error) {
|
||||
Number: int(pr.Index),
|
||||
Branch: pr.Head.Ref,
|
||||
HeadSHA: pr.Head.Sha,
|
||||
Labels: getGiteaPRLabelNames(pr.Labels),
|
||||
})
|
||||
}
|
||||
return list, nil
|
||||
}
|
||||
|
||||
// Get the Gitea pull request label names.
|
||||
func getGiteaPRLabelNames(giteaLabels []*gitea.Label) []string {
|
||||
var labelNames []string
|
||||
for _, giteaLabel := range giteaLabels {
|
||||
labelNames = append(labelNames, giteaLabel.Name)
|
||||
}
|
||||
return labelNames
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"code.gitea.io/sdk/gitea"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
@@ -257,3 +258,32 @@ func TestGiteaList(t *testing.T) {
|
||||
assert.Equal(t, prs[0].Branch, "test")
|
||||
assert.Equal(t, prs[0].HeadSHA, "7bbaf62d92ddfafd9cc8b340c619abaec32bc09f")
|
||||
}
|
||||
|
||||
func TestGetGiteaPRLabelNames(t *testing.T) {
|
||||
Tests := []struct {
|
||||
Name string
|
||||
PullLabels []*gitea.Label
|
||||
ExpectedResult []string
|
||||
}{
|
||||
{
|
||||
Name: "PR has labels",
|
||||
PullLabels: []*gitea.Label{
|
||||
&gitea.Label{Name: "label1"},
|
||||
&gitea.Label{Name: "label2"},
|
||||
&gitea.Label{Name: "label3"},
|
||||
},
|
||||
ExpectedResult: []string{"label1", "label2", "label3"},
|
||||
},
|
||||
{
|
||||
Name: "PR does not have labels",
|
||||
PullLabels: []*gitea.Label{},
|
||||
ExpectedResult: nil,
|
||||
},
|
||||
}
|
||||
for _, test := range Tests {
|
||||
t.Run(test.Name, func(t *testing.T) {
|
||||
labels := getGiteaPRLabelNames(test.PullLabels)
|
||||
assert.Equal(t, test.ExpectedResult, labels)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -68,6 +68,7 @@ func (g *GithubService) List(ctx context.Context) ([]*PullRequest, error) {
|
||||
Number: *pull.Number,
|
||||
Branch: *pull.Head.Ref,
|
||||
HeadSHA: *pull.Head.SHA,
|
||||
Labels: getGithubPRLabelNames(pull.Labels),
|
||||
})
|
||||
}
|
||||
if resp.NextPage == 0 {
|
||||
@@ -97,3 +98,12 @@ func containLabels(expectedLabels []string, gotLabels []*github.Label) bool {
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Get the Github pull request label names.
|
||||
func getGithubPRLabelNames(gitHubLabels []*github.Label) []string {
|
||||
var labelNames []string
|
||||
for _, gitHubLabel := range gitHubLabels {
|
||||
labelNames = append(labelNames, *gitHubLabel.Name)
|
||||
}
|
||||
return labelNames
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-github/v35/github"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func toPtr(s string) *string {
|
||||
@@ -57,3 +58,32 @@ func TestContainLabels(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetGitHubPRLabelNames(t *testing.T) {
|
||||
Tests := []struct {
|
||||
Name string
|
||||
PullLabels []*github.Label
|
||||
ExpectedResult []string
|
||||
}{
|
||||
{
|
||||
Name: "PR has labels",
|
||||
PullLabels: []*github.Label{
|
||||
&github.Label{Name: toPtr("label1")},
|
||||
&github.Label{Name: toPtr("label2")},
|
||||
&github.Label{Name: toPtr("label3")},
|
||||
},
|
||||
ExpectedResult: []string{"label1", "label2", "label3"},
|
||||
},
|
||||
{
|
||||
Name: "PR does not have labels",
|
||||
PullLabels: []*github.Label{},
|
||||
ExpectedResult: nil,
|
||||
},
|
||||
}
|
||||
for _, test := range Tests {
|
||||
t.Run(test.Name, func(t *testing.T) {
|
||||
labels := getGithubPRLabelNames(test.PullLabels)
|
||||
assert.Equal(t, test.ExpectedResult, labels)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -72,6 +72,7 @@ func (g *GitLabService) List(ctx context.Context) ([]*PullRequest, error) {
|
||||
Number: mr.IID,
|
||||
Branch: mr.SourceBranch,
|
||||
HeadSHA: mr.SHA,
|
||||
Labels: mr.Labels,
|
||||
})
|
||||
}
|
||||
if resp.NextPage == 0 {
|
||||
|
||||
@@ -12,6 +12,8 @@ type PullRequest struct {
|
||||
Branch string
|
||||
// HeadSHA is the SHA of the HEAD from which the pull request originated.
|
||||
HeadSHA string
|
||||
// Labels of the pull request.
|
||||
Labels []string
|
||||
}
|
||||
|
||||
type PullRequestService interface {
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"net/http"
|
||||
pathpkg "path"
|
||||
|
||||
gitlab "github.com/xanzy/go-gitlab"
|
||||
@@ -144,7 +145,11 @@ func (g *GitlabProvider) listBranches(_ context.Context, repo *Repository) ([]gi
|
||||
branches := []gitlab.Branch{}
|
||||
// If we don't specifically want to query for all branches, just use the default branch and call it a day.
|
||||
if !g.allBranches {
|
||||
gitlabBranch, _, err := g.client.Branches.GetBranch(repo.RepositoryId, repo.Branch, nil)
|
||||
gitlabBranch, resp, err := g.client.Branches.GetBranch(repo.RepositoryId, repo.Branch, nil)
|
||||
// 404s are not an error here, just a normal false.
|
||||
if resp != nil && resp.StatusCode == http.StatusNotFound {
|
||||
return []gitlab.Branch{}, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -157,6 +162,10 @@ func (g *GitlabProvider) listBranches(_ context.Context, repo *Repository) ([]gi
|
||||
}
|
||||
for {
|
||||
gitlabBranches, resp, err := g.client.Branches.ListBranches(repo.RepositoryId, opt)
|
||||
// 404s are not an error here, just a normal false.
|
||||
if resp != nil && resp.StatusCode == http.StatusNotFound {
|
||||
return []gitlab.Branch{}, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -274,6 +274,8 @@ func gitlabMockHandler(t *testing.T) func(http.ResponseWriter, *http.Request) {
|
||||
if err != nil {
|
||||
t.Fail()
|
||||
}
|
||||
case "/api/v4/projects/27084533/repository/branches/foo":
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
default:
|
||||
_, err := io.WriteString(w, `[]`)
|
||||
if err != nil {
|
||||
@@ -391,3 +393,29 @@ func TestGitlabHasPath(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGitlabGetBranches(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
gitlabMockHandler(t)(w, r)
|
||||
}))
|
||||
host, _ := NewGitlabProvider(context.Background(), "test-argocd-proton", "", ts.URL, false, true)
|
||||
|
||||
repo := &Repository{
|
||||
RepositoryId: 27084533,
|
||||
Branch: "master",
|
||||
}
|
||||
t.Run("branch exists", func(t *testing.T) {
|
||||
repos, err := host.GetBranches(context.Background(), repo)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, repos[0].Branch, "master")
|
||||
})
|
||||
|
||||
repo2 := &Repository{
|
||||
RepositoryId: 27084533,
|
||||
Branch: "foo",
|
||||
}
|
||||
t.Run("unknown branch", func(t *testing.T) {
|
||||
_, err := host.GetBranches(context.Background(), repo2)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ var Policies = map[string]Policy{
|
||||
"sync": &SyncPolicy{},
|
||||
"create-only": &CreateOnlyPolicy{},
|
||||
"create-update": &CreateUpdatePolicy{},
|
||||
"create-delete": &CreateDeletePolicy{},
|
||||
}
|
||||
|
||||
type SyncPolicy struct{}
|
||||
@@ -42,3 +43,13 @@ func (p *CreateOnlyPolicy) Update() bool {
|
||||
func (p *CreateOnlyPolicy) Delete() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
type CreateDeletePolicy struct{}
|
||||
|
||||
func (p *CreateDeletePolicy) Update() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (p *CreateDeletePolicy) Delete() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
34
applicationset/utils/test/testutils.go
Normal file
34
applicationset/utils/test/testutils.go
Normal file
@@ -0,0 +1,34 @@
|
||||
package test
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
type ArgoCDServiceMock struct {
|
||||
Mock *mock.Mock
|
||||
}
|
||||
|
||||
func (a ArgoCDServiceMock) GetApps(ctx context.Context, repoURL string, revision string) ([]string, error) {
|
||||
args := a.Mock.Called(ctx, repoURL, revision)
|
||||
|
||||
return args.Get(0).([]string), args.Error(1)
|
||||
}
|
||||
|
||||
func (a ArgoCDServiceMock) GetFiles(ctx context.Context, repoURL string, revision string, pattern string) (map[string][]byte, error) {
|
||||
args := a.Mock.Called(ctx, repoURL, revision, pattern)
|
||||
|
||||
return args.Get(0).(map[string][]byte), args.Error(1)
|
||||
}
|
||||
|
||||
func (a ArgoCDServiceMock) GetFileContent(ctx context.Context, repoURL string, revision string, path string) ([]byte, error) {
|
||||
args := a.Mock.Called(ctx, repoURL, revision, path)
|
||||
|
||||
return args.Get(0).([]byte), args.Error(1)
|
||||
}
|
||||
|
||||
func (a ArgoCDServiceMock) GetDirectories(ctx context.Context, repoURL string, revision string) ([]string, error) {
|
||||
args := a.Mock.Called(ctx, repoURL, revision)
|
||||
return args.Get(0).([]string), args.Error(1)
|
||||
}
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
"text/template"
|
||||
"unsafe"
|
||||
|
||||
"github.com/Masterminds/sprig"
|
||||
"github.com/Masterminds/sprig/v3"
|
||||
"github.com/valyala/fasttemplate"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
@@ -133,6 +133,16 @@ func (r *Render) deeplyReplace(copy, original reflect.Value, replaceMap map[stri
|
||||
if err := r.deeplyReplace(copyValue, originalValue, replaceMap, useGoTemplate); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Keys can be templated as well as values (e.g. to template something into an annotation).
|
||||
if key.Kind() == reflect.String {
|
||||
templatedKey, err := r.Replace(key.String(), replaceMap, useGoTemplate)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
key = reflect.ValueOf(templatedKey)
|
||||
}
|
||||
|
||||
copy.SetMapIndex(key, copyValue)
|
||||
}
|
||||
|
||||
@@ -164,7 +174,7 @@ func (r *Render) deeplyReplace(copy, original reflect.Value, replaceMap map[stri
|
||||
|
||||
func (r *Render) RenderTemplateParams(tmpl *argoappsv1.Application, syncPolicy *argoappsv1.ApplicationSetSyncPolicy, params map[string]interface{}, useGoTemplate bool) (*argoappsv1.Application, error) {
|
||||
if tmpl == nil {
|
||||
return nil, fmt.Errorf("application template is empty ")
|
||||
return nil, fmt.Errorf("application template is empty")
|
||||
}
|
||||
|
||||
if len(params) == 0 {
|
||||
@@ -194,6 +204,27 @@ func (r *Render) RenderTemplateParams(tmpl *argoappsv1.Application, syncPolicy *
|
||||
return replacedTmpl, nil
|
||||
}
|
||||
|
||||
func (r *Render) RenderGeneratorParams(gen *argoappsv1.ApplicationSetGenerator, params map[string]interface{}, useGoTemplate bool) (*argoappsv1.ApplicationSetGenerator, error) {
|
||||
if gen == nil {
|
||||
return nil, fmt.Errorf("generator is empty")
|
||||
}
|
||||
|
||||
if len(params) == 0 {
|
||||
return gen, nil
|
||||
}
|
||||
|
||||
original := reflect.ValueOf(gen)
|
||||
copy := reflect.New(original.Type()).Elem()
|
||||
|
||||
if err := r.deeplyReplace(copy, original, params, useGoTemplate); err != nil {
|
||||
return nil, fmt.Errorf("failed to replace parameters in generator: %w", err)
|
||||
}
|
||||
|
||||
replacedGen := copy.Interface().(*argoappsv1.ApplicationSetGenerator)
|
||||
|
||||
return replacedGen, nil
|
||||
}
|
||||
|
||||
var isTemplatedRegex = regexp.MustCompile(".*{{.*}}.*")
|
||||
|
||||
// Replace executes basic string substitution of a template with replacement values.
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
logtest "github.com/sirupsen/logrus/hooks/test"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
@@ -40,7 +41,7 @@ func TestRenderTemplateParams(t *testing.T) {
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: argoappsv1.ApplicationSpec{
|
||||
Source: argoappsv1.ApplicationSource{
|
||||
Source: &argoappsv1.ApplicationSource{
|
||||
Path: "",
|
||||
RepoURL: "",
|
||||
TargetRevision: "",
|
||||
@@ -219,7 +220,7 @@ func TestRenderTemplateParamsGoTemplate(t *testing.T) {
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: argoappsv1.ApplicationSpec{
|
||||
Source: argoappsv1.ApplicationSource{
|
||||
Source: &argoappsv1.ApplicationSource{
|
||||
Path: "",
|
||||
RepoURL: "",
|
||||
TargetRevision: "",
|
||||
@@ -461,14 +462,56 @@ func TestRenderTemplateParamsGoTemplate(t *testing.T) {
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRenderTemplateKeys(t *testing.T) {
|
||||
t.Run("fasttemplate", func(t *testing.T) {
|
||||
application := &argoappsv1.Application{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: map[string]string{
|
||||
"annotation-{{key}}": "annotation-{{value}}",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
params := map[string]interface{}{
|
||||
"key": "some-key",
|
||||
"value": "some-value",
|
||||
}
|
||||
|
||||
render := Render{}
|
||||
newApplication, err := render.RenderTemplateParams(application, nil, params, false)
|
||||
require.NoError(t, err)
|
||||
require.Contains(t, newApplication.ObjectMeta.Annotations, "annotation-some-key")
|
||||
assert.Equal(t, newApplication.ObjectMeta.Annotations["annotation-some-key"], "annotation-some-value")
|
||||
})
|
||||
t.Run("gotemplate", func(t *testing.T) {
|
||||
application := &argoappsv1.Application{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: map[string]string{
|
||||
"annotation-{{ .key }}": "annotation-{{ .value }}",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
params := map[string]interface{}{
|
||||
"key": "some-key",
|
||||
"value": "some-value",
|
||||
}
|
||||
|
||||
render := Render{}
|
||||
newApplication, err := render.RenderTemplateParams(application, nil, params, true)
|
||||
require.NoError(t, err)
|
||||
require.Contains(t, newApplication.ObjectMeta.Annotations, "annotation-some-key")
|
||||
assert.Equal(t, newApplication.ObjectMeta.Annotations["annotation-some-key"], "annotation-some-value")
|
||||
})
|
||||
}
|
||||
|
||||
func TestRenderTemplateParamsFinalizers(t *testing.T) {
|
||||
|
||||
emptyApplication := &argoappsv1.Application{
|
||||
Spec: argoappsv1.ApplicationSpec{
|
||||
Source: argoappsv1.ApplicationSource{
|
||||
Source: &argoappsv1.ApplicationSource{
|
||||
Path: "",
|
||||
RepoURL: "",
|
||||
TargetRevision: "",
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
-----BEGIN PUBLIC KEY-----
|
||||
MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEesHEB7vX5Y2RxXypjMy1nI1z7iRG
|
||||
JI9/gt/sYqzpsa65aaNP4npM43DDxoIy/MQBo9s/mxGxmA+8UXeDpVC9vw==
|
||||
-----END PUBLIC KEY-----
|
||||
@@ -271,6 +271,16 @@
|
||||
"description": "the application's namespace.",
|
||||
"name": "appNamespace",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"collectionFormat": "multi",
|
||||
"description": "the project names to restrict returned list applications (legacy name for backwards-compatibility).",
|
||||
"name": "project",
|
||||
"in": "query"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
@@ -585,6 +595,16 @@
|
||||
"description": "the application's namespace.",
|
||||
"name": "appNamespace",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"collectionFormat": "multi",
|
||||
"description": "the project names to restrict returned list applications (legacy name for backwards-compatibility).",
|
||||
"name": "project",
|
||||
"in": "query"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
@@ -735,6 +755,42 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/v1/applications/{name}/links": {
|
||||
"get": {
|
||||
"tags": [
|
||||
"ApplicationService"
|
||||
],
|
||||
"summary": "ListLinks returns the list of all application deep links",
|
||||
"operationId": "ApplicationService_ListLinks",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"name": "name",
|
||||
"in": "path",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"name": "namespace",
|
||||
"in": "query"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/applicationLinksResponse"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/runtimeError"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/v1/applications/{name}/logs": {
|
||||
"get": {
|
||||
"tags": [
|
||||
@@ -1385,6 +1441,67 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/v1/applications/{name}/resource/links": {
|
||||
"get": {
|
||||
"tags": [
|
||||
"ApplicationService"
|
||||
],
|
||||
"summary": "ListResourceLinks returns the list of all resource deep links",
|
||||
"operationId": "ApplicationService_ListResourceLinks",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"name": "name",
|
||||
"in": "path",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"name": "namespace",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"name": "resourceName",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"name": "version",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"name": "group",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"name": "kind",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"name": "appNamespace",
|
||||
"in": "query"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/applicationLinksResponse"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/runtimeError"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/v1/applications/{name}/revisions/{revision}/metadata": {
|
||||
"get": {
|
||||
"tags": [
|
||||
@@ -2560,6 +2677,37 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/v1/projects/{name}/links": {
|
||||
"get": {
|
||||
"tags": [
|
||||
"ProjectService"
|
||||
],
|
||||
"summary": "ListLinks returns all deep links for the particular project",
|
||||
"operationId": "ProjectService_ListLinks",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"name": "name",
|
||||
"in": "path",
|
||||
"required": true
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/applicationLinksResponse"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/runtimeError"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/v1/projects/{name}/syncwindows": {
|
||||
"get": {
|
||||
"tags": [
|
||||
@@ -3296,6 +3444,18 @@
|
||||
"description": "Reference between project and repository that allow you automatically to be added as item inside SourceRepos project entity.",
|
||||
"name": "project",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Google Cloud Platform service account key.",
|
||||
"name": "gcpServiceAccountKey",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "boolean",
|
||||
"description": "Whether to force HTTP basic auth.",
|
||||
"name": "forceHttpBasicAuth",
|
||||
"in": "query"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
@@ -3454,6 +3614,29 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/v1/settings/plugins": {
|
||||
"get": {
|
||||
"tags": [
|
||||
"SettingsService"
|
||||
],
|
||||
"summary": "Get returns Argo CD plugins",
|
||||
"operationId": "SettingsService_GetPlugins",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/clusterSettingsPluginsResponse"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/runtimeError"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/v1/stream/applications": {
|
||||
"get": {
|
||||
"tags": [
|
||||
@@ -3507,6 +3690,16 @@
|
||||
"description": "the application's namespace.",
|
||||
"name": "appNamespace",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"collectionFormat": "multi",
|
||||
"description": "the project names to restrict returned list applications (legacy name for backwards-compatibility).",
|
||||
"name": "project",
|
||||
"in": "query"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
@@ -3900,6 +4093,34 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"applicationLinkInfo": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"description": {
|
||||
"type": "string"
|
||||
},
|
||||
"iconClass": {
|
||||
"type": "string"
|
||||
},
|
||||
"title": {
|
||||
"type": "string"
|
||||
},
|
||||
"url": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"applicationLinksResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"items": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/applicationLinkInfo"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"applicationLogEntry": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -4097,6 +4318,9 @@
|
||||
"appLabelKey": {
|
||||
"type": "string"
|
||||
},
|
||||
"appsInAnyNamespaceEnabled": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"configManagementPlugins": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
@@ -4177,6 +4401,17 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"clusterSettingsPluginsResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"plugins": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/clusterPlugin"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"gpgkeyGnuPGPublicKeyCreateResponse": {
|
||||
"type": "object",
|
||||
"title": "Response to a public key creation request",
|
||||
@@ -4197,6 +4432,24 @@
|
||||
"type": "object",
|
||||
"title": "Generic (empty) response for GPG public key CRUD requests"
|
||||
},
|
||||
"intstrIntOrString": {
|
||||
"description": "+protobuf=true\n+protobuf.options.(gogoproto.goproto_stringer)=false\n+k8s:openapi-gen=true",
|
||||
"type": "object",
|
||||
"title": "IntOrString is a type that can hold an int32 or a string. When used in\nJSON or YAML marshalling and unmarshalling, it produces or consumes the\ninner type. This allows you to have, for example, a JSON field that can\naccept a name or number.\nTODO: Rename to Int32OrString",
|
||||
"properties": {
|
||||
"intVal": {
|
||||
"type": "integer",
|
||||
"format": "int32"
|
||||
},
|
||||
"strVal": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": {
|
||||
"type": "string",
|
||||
"format": "int64"
|
||||
}
|
||||
}
|
||||
},
|
||||
"notificationService": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -4505,6 +4758,65 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"repositoryParameterAnnouncement": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"array": {
|
||||
"description": "array is the default value of the parameter if the parameter is an array.",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"collectionType": {
|
||||
"description": "collectionType is the type of value this parameter holds - either a single value (a string) or a collection\n(array or map). If collectionType is set, only the field with that type will be used. If collectionType is not\nset, `string` is the default. If collectionType is set to an invalid value, a validation error is thrown.",
|
||||
"type": "string"
|
||||
},
|
||||
"itemType": {
|
||||
"description": "itemType determines the primitive data type represented by the parameter. Parameters are always encoded as\nstrings, but this field lets them be interpreted as other primitive types.",
|
||||
"type": "string"
|
||||
},
|
||||
"map": {
|
||||
"description": "map is the default value of the parameter if the parameter is a map.",
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"name": {
|
||||
"description": "name is the name identifying a parameter.",
|
||||
"type": "string"
|
||||
},
|
||||
"required": {
|
||||
"description": "required defines if this given parameter is mandatory.",
|
||||
"type": "boolean"
|
||||
},
|
||||
"string": {
|
||||
"description": "string is the default value of the parameter if the parameter is a string.",
|
||||
"type": "string"
|
||||
},
|
||||
"title": {
|
||||
"description": "title is a human-readable text of the parameter name.",
|
||||
"type": "string"
|
||||
},
|
||||
"tooltip": {
|
||||
"description": "tooltip is a human-readable description of the parameter.",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"repositoryPluginAppSpec": {
|
||||
"type": "object",
|
||||
"title": "PluginAppSpec contains details about a plugin-type Application",
|
||||
"properties": {
|
||||
"parametersAnnouncement": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/repositoryParameterAnnouncement"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"repositoryRefs": {
|
||||
"type": "object",
|
||||
"title": "A subset of the repository's named refs",
|
||||
@@ -4551,6 +4863,9 @@
|
||||
"kustomize": {
|
||||
"$ref": "#/definitions/repositoryKustomizeAppSpec"
|
||||
},
|
||||
"plugin": {
|
||||
"$ref": "#/definitions/repositoryPluginAppSpec"
|
||||
},
|
||||
"type": {
|
||||
"type": "string"
|
||||
}
|
||||
@@ -5373,6 +5688,23 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1ApplicationMatchExpression": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"key": {
|
||||
"type": "string"
|
||||
},
|
||||
"operator": {
|
||||
"type": "string"
|
||||
},
|
||||
"values": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1ApplicationSet": {
|
||||
"type": "object",
|
||||
"title": "ApplicationSet is a set of Application resources\n+genclient\n+genclient:noStatus\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object\n+kubebuilder:resource:path=applicationsets,shortName=appset;appsets\n+kubebuilder:subresource:status",
|
||||
@@ -5388,6 +5720,31 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1ApplicationSetApplicationStatus": {
|
||||
"type": "object",
|
||||
"title": "ApplicationSetApplicationStatus contains details about each Application managed by the ApplicationSet",
|
||||
"properties": {
|
||||
"application": {
|
||||
"type": "string",
|
||||
"title": "Application contains the name of the Application resource"
|
||||
},
|
||||
"lastTransitionTime": {
|
||||
"$ref": "#/definitions/v1Time"
|
||||
},
|
||||
"message": {
|
||||
"type": "string",
|
||||
"title": "Message contains human-readable message indicating details about the status"
|
||||
},
|
||||
"status": {
|
||||
"type": "string",
|
||||
"title": "Status contains the AppSet's perceived status of the managed Application resource: (Waiting, Pending, Progressing, Healthy)"
|
||||
},
|
||||
"step": {
|
||||
"type": "string",
|
||||
"title": "Step tracks which step this Application should be updated in"
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1ApplicationSetCondition": {
|
||||
"type": "object",
|
||||
"title": "ApplicationSetCondition contains details about an applicationset condition, which is usally an error or warning",
|
||||
@@ -5494,6 +5851,31 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1ApplicationSetRolloutStep": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"matchExpressions": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/v1alpha1ApplicationMatchExpression"
|
||||
}
|
||||
},
|
||||
"maxUpdate": {
|
||||
"$ref": "#/definitions/intstrIntOrString"
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1ApplicationSetRolloutStrategy": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"steps": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/v1alpha1ApplicationSetRolloutStep"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1ApplicationSetSpec": {
|
||||
"description": "ApplicationSetSpec represents a class of application set state.",
|
||||
"type": "object",
|
||||
@@ -5507,6 +5889,9 @@
|
||||
"goTemplate": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"strategy": {
|
||||
"$ref": "#/definitions/v1alpha1ApplicationSetStrategy"
|
||||
},
|
||||
"syncPolicy": {
|
||||
"$ref": "#/definitions/v1alpha1ApplicationSetSyncPolicy"
|
||||
},
|
||||
@@ -5519,6 +5904,12 @@
|
||||
"type": "object",
|
||||
"title": "ApplicationSetStatus defines the observed state of ApplicationSet",
|
||||
"properties": {
|
||||
"applicationStatus": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/v1alpha1ApplicationSetApplicationStatus"
|
||||
}
|
||||
},
|
||||
"conditions": {
|
||||
"type": "array",
|
||||
"title": "INSERT ADDITIONAL STATUS FIELD - define observed state of cluster\nImportant: Run \"make\" to regenerate code after modifying this file",
|
||||
@@ -5528,6 +5919,18 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1ApplicationSetStrategy": {
|
||||
"description": "ApplicationSetStrategy configures how generated Applications are updated in sequence.",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"rollingSync": {
|
||||
"$ref": "#/definitions/v1alpha1ApplicationSetRolloutStrategy"
|
||||
},
|
||||
"type": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1ApplicationSetSyncPolicy": {
|
||||
"description": "ApplicationSetSyncPolicy configures how generated Applications will relate to their\nApplicationSet.",
|
||||
"type": "object",
|
||||
@@ -5604,6 +6007,10 @@
|
||||
"plugin": {
|
||||
"$ref": "#/definitions/v1alpha1ApplicationSourcePlugin"
|
||||
},
|
||||
"ref": {
|
||||
"description": "Ref is reference to another source within sources field. This field will not be used if used with a `source` tag.",
|
||||
"type": "string"
|
||||
},
|
||||
"repoURL": {
|
||||
"type": "string",
|
||||
"title": "RepoURL is the URL to the repository (Git or Helm) that contains the application manifests"
|
||||
@@ -5772,6 +6179,39 @@
|
||||
},
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"parameters": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/v1alpha1ApplicationSourcePluginParameter"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1ApplicationSourcePluginParameter": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"array": {
|
||||
"description": "Array is the value of an array type parameter.",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"map": {
|
||||
"description": "Map is the value of a map type parameter.",
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"name": {
|
||||
"description": "Name is the name identifying a parameter.",
|
||||
"type": "string"
|
||||
},
|
||||
"string": {
|
||||
"description": "String_ is the value of a string type parameter.",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -5808,6 +6248,13 @@
|
||||
"source": {
|
||||
"$ref": "#/definitions/v1alpha1ApplicationSource"
|
||||
},
|
||||
"sources": {
|
||||
"type": "array",
|
||||
"title": "Sources is a reference to the location of the application's manifests or chart",
|
||||
"items": {
|
||||
"$ref": "#/definitions/v1alpha1ApplicationSource"
|
||||
}
|
||||
},
|
||||
"syncPolicy": {
|
||||
"$ref": "#/definitions/v1alpha1SyncPolicy"
|
||||
}
|
||||
@@ -5858,6 +6305,13 @@
|
||||
"type": "string",
|
||||
"title": "SourceType specifies the type of this application"
|
||||
},
|
||||
"sourceTypes": {
|
||||
"type": "array",
|
||||
"title": "SourceTypes specifies the type of the sources included in the application",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"summary": {
|
||||
"$ref": "#/definitions/v1alpha1ApplicationSummary"
|
||||
},
|
||||
@@ -6155,6 +6609,13 @@
|
||||
},
|
||||
"source": {
|
||||
"$ref": "#/definitions/v1alpha1ApplicationSource"
|
||||
},
|
||||
"sources": {
|
||||
"type": "array",
|
||||
"title": "Sources is a reference to the application's multiple sources used for comparison",
|
||||
"items": {
|
||||
"$ref": "#/definitions/v1alpha1ApplicationSource"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -6289,6 +6750,9 @@
|
||||
"$ref": "#/definitions/v1alpha1GitFileGeneratorItem"
|
||||
}
|
||||
},
|
||||
"pathParamPrefix": {
|
||||
"type": "string"
|
||||
},
|
||||
"repoURL": {
|
||||
"type": "string"
|
||||
},
|
||||
@@ -6544,6 +7008,23 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1ManagedNamespaceMetadata": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"annotations": {
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"labels": {
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1MatrixGenerator": {
|
||||
"description": "MatrixGenerator generates the cartesian product of two sets of parameters. The parameters are defined by two nested\ngenerators.",
|
||||
"type": "object",
|
||||
@@ -6897,6 +7378,14 @@
|
||||
"type": "boolean",
|
||||
"title": "EnableOCI specifies whether helm-oci support should be enabled for this repo"
|
||||
},
|
||||
"forceHttpBasicAuth": {
|
||||
"type": "boolean",
|
||||
"title": "ForceHttpBasicAuth specifies whether Argo CD should attempt to force basic auth for HTTP connections"
|
||||
},
|
||||
"gcpServiceAccountKey": {
|
||||
"type": "string",
|
||||
"title": "GCPServiceAccountKey specifies the service account key in JSON format to be used for getting credentials to Google Cloud Source repos"
|
||||
},
|
||||
"githubAppEnterpriseBaseUrl": {
|
||||
"type": "string",
|
||||
"title": "GithubAppEnterpriseBaseURL specifies the GitHub API URL for GitHub app authentication. If empty will default to https://api.github.com"
|
||||
@@ -6919,6 +7408,10 @@
|
||||
"type": "string",
|
||||
"title": "Password for authenticating at the repo server"
|
||||
},
|
||||
"proxy": {
|
||||
"type": "string",
|
||||
"title": "Proxy specifies the HTTP/HTTPS proxy used to access repos at the repo server"
|
||||
},
|
||||
"sshPrivateKey": {
|
||||
"type": "string",
|
||||
"title": "SSHPrivateKey contains the private key data for authenticating at the repo server using SSH (only Git repos)"
|
||||
@@ -6975,6 +7468,14 @@
|
||||
"type": "boolean",
|
||||
"title": "EnableOCI specifies whether helm-oci support should be enabled for this repo"
|
||||
},
|
||||
"forceHttpBasicAuth": {
|
||||
"type": "boolean",
|
||||
"title": "ForceHttpBasicAuth specifies whether Argo CD should attempt to force basic auth for HTTP connections"
|
||||
},
|
||||
"gcpServiceAccountKey": {
|
||||
"type": "string",
|
||||
"title": "GCPServiceAccountKey specifies the service account key in JSON format to be used for getting credentials to Google Cloud Source repos"
|
||||
},
|
||||
"githubAppEnterpriseBaseUrl": {
|
||||
"type": "string",
|
||||
"title": "GithubAppEnterpriseBaseURL specifies the base URL of GitHub Enterprise installation. If empty will default to https://api.github.com"
|
||||
@@ -7465,8 +7966,22 @@
|
||||
"type": "string",
|
||||
"title": "Revision holds the revision the sync was performed against"
|
||||
},
|
||||
"revisions": {
|
||||
"type": "array",
|
||||
"title": "Revisions holds the revision of each source in sources field the sync was performed against",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"source": {
|
||||
"$ref": "#/definitions/v1alpha1ApplicationSource"
|
||||
},
|
||||
"sources": {
|
||||
"type": "array",
|
||||
"title": "Sources is a reference to the application sources used for the sync operation",
|
||||
"items": {
|
||||
"$ref": "#/definitions/v1alpha1ApplicationSource"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -7767,9 +8282,23 @@
|
||||
"description": "Revision is the revision (Git) or chart version (Helm) which to sync the application to\nIf omitted, will use the revision specified in app spec.",
|
||||
"type": "string"
|
||||
},
|
||||
"revisions": {
|
||||
"description": "Revisions is the list of revision (Git) or chart version (Helm) which to sync each source in sources field for the application to\nIf omitted, will use the revision specified in app spec.",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"source": {
|
||||
"$ref": "#/definitions/v1alpha1ApplicationSource"
|
||||
},
|
||||
"sources": {
|
||||
"type": "array",
|
||||
"title": "Sources overrides the source definition set in the application.\nThis is typically set in a Rollback operation and is nil during a Sync operation",
|
||||
"items": {
|
||||
"$ref": "#/definitions/v1alpha1ApplicationSource"
|
||||
}
|
||||
},
|
||||
"syncOptions": {
|
||||
"type": "array",
|
||||
"title": "SyncOptions provide per-sync sync-options, e.g. Validate=false",
|
||||
@@ -7815,8 +8344,22 @@
|
||||
"type": "string",
|
||||
"title": "Revision holds the revision this sync operation was performed to"
|
||||
},
|
||||
"revisions": {
|
||||
"type": "array",
|
||||
"title": "Revisions holds the revision this sync operation was performed for respective indexed source in sources field",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"source": {
|
||||
"$ref": "#/definitions/v1alpha1ApplicationSource"
|
||||
},
|
||||
"sources": {
|
||||
"type": "array",
|
||||
"title": "Source records the application source information of the sync, used for comparing auto-sync",
|
||||
"items": {
|
||||
"$ref": "#/definitions/v1alpha1ApplicationSource"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -7827,6 +8370,9 @@
|
||||
"automated": {
|
||||
"$ref": "#/definitions/v1alpha1SyncPolicyAutomated"
|
||||
},
|
||||
"managedNamespaceMetadata": {
|
||||
"$ref": "#/definitions/v1alpha1ManagedNamespaceMetadata"
|
||||
},
|
||||
"retry": {
|
||||
"$ref": "#/definitions/v1alpha1RetryStrategy"
|
||||
},
|
||||
@@ -7868,6 +8414,13 @@
|
||||
"type": "string",
|
||||
"title": "Revision contains information about the revision the comparison has been performed to"
|
||||
},
|
||||
"revisions": {
|
||||
"type": "array",
|
||||
"title": "Revisions contains information about the revisions of multiple sources the comparison has been performed to",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"status": {
|
||||
"type": "string",
|
||||
"title": "Status is the sync state of the comparison"
|
||||
|
||||
@@ -46,16 +46,17 @@ func getSubmoduleEnabled() bool {
|
||||
|
||||
func NewCommand() *cobra.Command {
|
||||
var (
|
||||
clientConfig clientcmd.ClientConfig
|
||||
metricsAddr string
|
||||
probeBindAddr string
|
||||
webhookAddr string
|
||||
enableLeaderElection bool
|
||||
namespace string
|
||||
argocdRepoServer string
|
||||
policy string
|
||||
debugLog bool
|
||||
dryRun bool
|
||||
clientConfig clientcmd.ClientConfig
|
||||
metricsAddr string
|
||||
probeBindAddr string
|
||||
webhookAddr string
|
||||
enableLeaderElection bool
|
||||
namespace string
|
||||
argocdRepoServer string
|
||||
policy string
|
||||
debugLog bool
|
||||
dryRun bool
|
||||
enableProgressiveSyncs bool
|
||||
)
|
||||
scheme := runtime.NewScheme()
|
||||
_ = clientgoscheme.AddToScheme(scheme)
|
||||
@@ -89,7 +90,7 @@ func NewCommand() *cobra.Command {
|
||||
|
||||
policyObj, exists := utils.Policies[policy]
|
||||
if !exists {
|
||||
log.Info("Policy value can be: sync, create-only, create-update")
|
||||
log.Info("Policy value can be: sync, create-only, create-update, create-delete")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
@@ -168,15 +169,16 @@ func NewCommand() *cobra.Command {
|
||||
|
||||
go func() { errors.CheckError(askPassServer.Run(askpass.SocketPath)) }()
|
||||
if err = (&controllers.ApplicationSetReconciler{
|
||||
Generators: topLevelGenerators,
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Recorder: mgr.GetEventRecorderFor("applicationset-controller"),
|
||||
Renderer: &utils.Render{},
|
||||
Policy: policyObj,
|
||||
ArgoAppClientset: appSetConfig,
|
||||
KubeClientset: k8sClient,
|
||||
ArgoDB: argoCDDB,
|
||||
Generators: topLevelGenerators,
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Recorder: mgr.GetEventRecorderFor("applicationset-controller"),
|
||||
Renderer: &utils.Render{},
|
||||
Policy: policyObj,
|
||||
ArgoAppClientset: appSetConfig,
|
||||
KubeClientset: k8sClient,
|
||||
ArgoDB: argoCDDB,
|
||||
EnableProgressiveSyncs: enableProgressiveSyncs,
|
||||
}).SetupWithManager(mgr); err != nil {
|
||||
log.Error(err, "unable to create controller", "controller", "ApplicationSet")
|
||||
os.Exit(1)
|
||||
@@ -200,11 +202,12 @@ func NewCommand() *cobra.Command {
|
||||
"Enabling this will ensure there is only one active controller manager.")
|
||||
command.Flags().StringVar(&namespace, "namespace", env.StringFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_NAMESPACE", ""), "Argo CD repo namespace (default: argocd)")
|
||||
command.Flags().StringVar(&argocdRepoServer, "argocd-repo-server", env.StringFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_REPO_SERVER", common.DefaultRepoServerAddr), "Argo CD repo server address")
|
||||
command.Flags().StringVar(&policy, "policy", env.StringFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_POLICY", "sync"), "Modify how application is synced between the generator and the cluster. Default is 'sync' (create & update & delete), options: 'create-only', 'create-update' (no deletion)")
|
||||
command.Flags().StringVar(&policy, "policy", env.StringFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_POLICY", "sync"), "Modify how application is synced between the generator and the cluster. Default is 'sync' (create & update & delete), options: 'create-only', 'create-update' (no deletion), 'create-delete' (no update)")
|
||||
command.Flags().BoolVar(&debugLog, "debug", env.ParseBoolFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_DEBUG", false), "Print debug logs. Takes precedence over loglevel")
|
||||
command.Flags().StringVar(&cmdutil.LogFormat, "logformat", env.StringFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_LOGFORMAT", "text"), "Set the logging format. One of: text|json")
|
||||
command.Flags().StringVar(&cmdutil.LogLevel, "loglevel", env.StringFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_LOGLEVEL", "info"), "Set the logging level. One of: debug|info|warn|error")
|
||||
command.Flags().BoolVar(&dryRun, "dry-run", env.ParseBoolFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_DRY_RUN", false), "Enable dry run mode")
|
||||
command.Flags().BoolVar(&enableProgressiveSyncs, "enable-progressive-syncs", env.ParseBoolFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_PROGRESSIVE_SYNCS", false), "Enable use of the experimental progressive syncs feature.")
|
||||
return &command
|
||||
}
|
||||
|
||||
|
||||
@@ -71,6 +71,7 @@ func NewCommand() *cobra.Command {
|
||||
dexServerStrictTLS bool
|
||||
staticAssetsDir string
|
||||
applicationNamespaces []string
|
||||
enableProxyExtension bool
|
||||
)
|
||||
var command = &cobra.Command{
|
||||
Use: cliName,
|
||||
@@ -184,6 +185,7 @@ func NewCommand() *cobra.Command {
|
||||
RedisClient: redisClient,
|
||||
StaticAssetsDir: staticAssetsDir,
|
||||
ApplicationNamespaces: applicationNamespaces,
|
||||
EnableProxyExtension: enableProxyExtension,
|
||||
}
|
||||
|
||||
stats.RegisterStackDumper()
|
||||
@@ -235,6 +237,7 @@ func NewCommand() *cobra.Command {
|
||||
command.Flags().BoolVar(&dexServerPlaintext, "dex-server-plaintext", env.ParseBoolFromEnv("ARGOCD_SERVER_DEX_SERVER_PLAINTEXT", false), "Use a plaintext client (non-TLS) to connect to dex server")
|
||||
command.Flags().BoolVar(&dexServerStrictTLS, "dex-server-strict-tls", env.ParseBoolFromEnv("ARGOCD_SERVER_DEX_SERVER_STRICT_TLS", false), "Perform strict validation of TLS certificates when connecting to dex server")
|
||||
command.Flags().StringSliceVar(&applicationNamespaces, "application-namespaces", env.StringsFromEnv("ARGOCD_APPLICATION_NAMESPACES", []string{}, ","), "List of additional namespaces where application resources can be managed in")
|
||||
command.Flags().BoolVar(&enableProxyExtension, "enable-proxy-extension", env.ParseBoolFromEnv("ARGOCD_SERVER_ENABLE_PROXY_EXTENSION", false), "Enable Proxy Extension feature")
|
||||
tlsConfigCustomizerSrc = tls.AddTLSFlagsToCmd(command)
|
||||
cacheSrc = servercache.AddCacheFlagsToCmd(command, func(client *redis.Client) {
|
||||
redisClient = client
|
||||
|
||||
@@ -56,6 +56,7 @@ func NewAdminCommand() *cobra.Command {
|
||||
command.AddCommand(NewExportCommand())
|
||||
command.AddCommand(NewDashboardCommand())
|
||||
command.AddCommand(NewNotificationsCommand())
|
||||
command.AddCommand(NewInitialPasswordCommand())
|
||||
|
||||
command.Flags().StringVar(&cmdutil.LogFormat, "logformat", "text", "Set the logging format. One of: text|json")
|
||||
command.Flags().StringVar(&cmdutil.LogLevel, "loglevel", "info", "Set the logging level. One of: debug|info|warn|error")
|
||||
|
||||
@@ -401,7 +401,12 @@ func reconcileApplications(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res := appStateManager.CompareAppState(&app, proj, app.Spec.Source.TargetRevision, app.Spec.Source, false, false, nil)
|
||||
sources := make([]v1alpha1.ApplicationSource, 0)
|
||||
revisions := make([]string, 0)
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions = append(revisions, app.Spec.GetSource().TargetRevision)
|
||||
|
||||
res := appStateManager.CompareAppState(&app, proj, revisions, sources, false, false, nil, false)
|
||||
items = append(items, appReconcileResult{
|
||||
Name: app.Name,
|
||||
Conditions: app.Status.Conditions,
|
||||
|
||||
@@ -80,6 +80,7 @@ func TestGetReconcileResults_Refresh(t *testing.T) {
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Source: &v1alpha1.ApplicationSource{},
|
||||
Project: "default",
|
||||
Destination: v1alpha1.ApplicationDestination{
|
||||
Server: v1alpha1.KubernetesInternalAPIServerAddr,
|
||||
|
||||
46
cmd/argocd/commands/admin/initial_password.go
Normal file
46
cmd/argocd/commands/admin/initial_password.go
Normal file
@@ -0,0 +1,46 @@
|
||||
package admin
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/util/cli"
|
||||
"github.com/argoproj/argo-cd/v2/util/errors"
|
||||
)
|
||||
|
||||
const initialPasswordSecretName = "argocd-initial-admin-secret"
|
||||
|
||||
// NewInitialPasswordCommand defines a new command to retrieve Argo CD initial password.
|
||||
func NewInitialPasswordCommand() *cobra.Command {
|
||||
var (
|
||||
clientConfig clientcmd.ClientConfig
|
||||
)
|
||||
var command = cobra.Command{
|
||||
Use: "initial-password",
|
||||
Short: "Prints initial password to log in to Argo CD for the first time",
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
|
||||
config, err := clientConfig.ClientConfig()
|
||||
errors.CheckError(err)
|
||||
namespace, _, err := clientConfig.Namespace()
|
||||
errors.CheckError(err)
|
||||
|
||||
kubeClientset := kubernetes.NewForConfigOrDie(config)
|
||||
secret, err := kubeClientset.CoreV1().Secrets(namespace).Get(context.Background(), initialPasswordSecretName, v1.GetOptions{})
|
||||
errors.CheckError(err)
|
||||
|
||||
if initialPass, ok := secret.Data["password"]; ok {
|
||||
fmt.Println(string(initialPass))
|
||||
fmt.Println("\n This password must be only used for first time login. We strongly recommend you update the password using `argocd account update-password`.")
|
||||
}
|
||||
},
|
||||
}
|
||||
clientConfig = cli.AddKubectlFlagsToCmd(&command)
|
||||
|
||||
return &command
|
||||
}
|
||||
@@ -33,7 +33,7 @@ func NewNotificationsCommand() *cobra.Command {
|
||||
var argocdService service.Service
|
||||
toolsCommand := cmd.NewToolsCommand(
|
||||
"notifications",
|
||||
"notifications",
|
||||
"argocd admin notifications",
|
||||
applications,
|
||||
settings.GetFactorySettings(argocdService, "argocd-notifications-secret", "argocd-notifications-cm"), func(clientConfig clientcmd.ClientConfig) {
|
||||
k8sCfg, err := clientConfig.ClientConfig()
|
||||
|
||||
@@ -206,7 +206,7 @@ var validatorsByGroup = map[string]settingValidator{
|
||||
}
|
||||
ssoProvider = "Dex"
|
||||
} else if general.OIDCConfigRAW != "" {
|
||||
if _, err := settings.UnmarshalOIDCConfig(general.OIDCConfigRAW); err != nil {
|
||||
if err := settings.ValidateOIDCConfig(general.OIDCConfigRAW); err != nil {
|
||||
return "", fmt.Errorf("invalid oidc.config: %v", err)
|
||||
}
|
||||
ssoProvider = "OIDC"
|
||||
|
||||
@@ -33,7 +33,6 @@ import (
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/cmd/argocd/commands/headless"
|
||||
cmdutil "github.com/argoproj/argo-cd/v2/cmd/util"
|
||||
argocommon "github.com/argoproj/argo-cd/v2/common"
|
||||
"github.com/argoproj/argo-cd/v2/controller"
|
||||
argocdclient "github.com/argoproj/argo-cd/v2/pkg/apiclient"
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apiclient/application"
|
||||
@@ -150,9 +149,6 @@ func NewApplicationCreateCommand(clientOpts *argocdclient.ClientOptions) *cobra.
|
||||
c.HelpFunc()(c, args)
|
||||
os.Exit(1)
|
||||
}
|
||||
if app.Spec.Source.Plugin != nil && app.Spec.Source.Plugin.Name != "" {
|
||||
log.Warnf(argocommon.ConfigMapPluginCLIDeprecationWarning)
|
||||
}
|
||||
if appNamespace != "" {
|
||||
app.Namespace = appNamespace
|
||||
}
|
||||
@@ -169,7 +165,9 @@ func NewApplicationCreateCommand(clientOpts *argocdclient.ClientOptions) *cobra.
|
||||
|
||||
// Get app before creating to see if it is being updated or no change
|
||||
existing, err := appIf.Get(ctx, &applicationpkg.ApplicationQuery{Name: &app.Name})
|
||||
if grpc.UnwrapGRPCStatus(err).Code() != codes.NotFound {
|
||||
unwrappedError := grpc.UnwrapGRPCStatus(err).Code()
|
||||
// As part of the fix for CVE-2022-41354, the API will return Permission Denied when an app does not exist.
|
||||
if unwrappedError != codes.NotFound && unwrappedError != codes.PermissionDenied {
|
||||
errors.CheckError(err)
|
||||
}
|
||||
|
||||
@@ -294,10 +292,6 @@ func NewApplicationGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Com
|
||||
})
|
||||
errors.CheckError(err)
|
||||
|
||||
if app.Spec.Source.Plugin != nil && app.Spec.Source.Plugin.Name != "" {
|
||||
log.Warnf(argocommon.ConfigMapPluginCLIDeprecationWarning)
|
||||
}
|
||||
|
||||
pConn, projIf := headless.NewClientOrDie(clientOpts, c).NewProjectClientOrDie()
|
||||
defer argoio.Close(pConn)
|
||||
proj, err := projIf.Get(ctx, &projectpkg.ProjectQuery{Name: app.Spec.Project})
|
||||
@@ -440,15 +434,16 @@ func NewApplicationLogsCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
}
|
||||
|
||||
func printAppSummaryTable(app *argoappv1.Application, appURL string, windows *argoappv1.SyncWindows) {
|
||||
source := app.Spec.GetSource()
|
||||
fmt.Printf(printOpFmtStr, "Name:", app.QualifiedName())
|
||||
fmt.Printf(printOpFmtStr, "Project:", app.Spec.GetProject())
|
||||
fmt.Printf(printOpFmtStr, "Server:", getServer(app))
|
||||
fmt.Printf(printOpFmtStr, "Namespace:", app.Spec.Destination.Namespace)
|
||||
fmt.Printf(printOpFmtStr, "URL:", appURL)
|
||||
fmt.Printf(printOpFmtStr, "Repo:", app.Spec.Source.RepoURL)
|
||||
fmt.Printf(printOpFmtStr, "Target:", app.Spec.Source.TargetRevision)
|
||||
fmt.Printf(printOpFmtStr, "Path:", app.Spec.Source.Path)
|
||||
printAppSourceDetails(&app.Spec.Source)
|
||||
fmt.Printf(printOpFmtStr, "Repo:", source.RepoURL)
|
||||
fmt.Printf(printOpFmtStr, "Target:", source.TargetRevision)
|
||||
fmt.Printf(printOpFmtStr, "Path:", source.Path)
|
||||
printAppSourceDetails(&source)
|
||||
var wds []string
|
||||
var status string
|
||||
var allow, deny, inactiveAllows bool
|
||||
@@ -502,11 +497,11 @@ func printAppSummaryTable(app *argoappv1.Application, appURL string, windows *ar
|
||||
syncStatusStr := string(app.Status.Sync.Status)
|
||||
switch app.Status.Sync.Status {
|
||||
case argoappv1.SyncStatusCodeSynced:
|
||||
syncStatusStr += fmt.Sprintf(" to %s", app.Spec.Source.TargetRevision)
|
||||
syncStatusStr += fmt.Sprintf(" to %s", app.Spec.GetSource().TargetRevision)
|
||||
case argoappv1.SyncStatusCodeOutOfSync:
|
||||
syncStatusStr += fmt.Sprintf(" from %s", app.Spec.Source.TargetRevision)
|
||||
syncStatusStr += fmt.Sprintf(" from %s", app.Spec.GetSource().TargetRevision)
|
||||
}
|
||||
if !git.IsCommitSHA(app.Spec.Source.TargetRevision) && !git.IsTruncatedCommitSHA(app.Spec.Source.TargetRevision) && len(app.Status.Sync.Revision) > 7 {
|
||||
if !git.IsCommitSHA(app.Spec.GetSource().TargetRevision) && !git.IsTruncatedCommitSHA(app.Spec.GetSource().TargetRevision) && len(app.Status.Sync.Revision) > 7 {
|
||||
syncStatusStr += fmt.Sprintf(" (%s)", app.Status.Sync.Revision[0:7])
|
||||
}
|
||||
fmt.Printf(printOpFmtStr, "Sync Status:", syncStatusStr)
|
||||
@@ -575,8 +570,8 @@ func truncateString(str string, num int) string {
|
||||
|
||||
// printParams prints parameters and overrides
|
||||
func printParams(app *argoappv1.Application) {
|
||||
if app.Spec.Source.Helm != nil {
|
||||
printHelmParams(app.Spec.Source.Helm)
|
||||
if app.Spec.GetSource().Helm != nil {
|
||||
printHelmParams(app.Spec.GetSource().Helm)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -624,10 +619,6 @@ func NewApplicationSetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Com
|
||||
app, err := appIf.Get(ctx, &applicationpkg.ApplicationQuery{Name: &appName, AppNamespace: &appNs})
|
||||
errors.CheckError(err)
|
||||
|
||||
if app.Spec.Source.Plugin != nil && app.Spec.Source.Plugin.Name != "" {
|
||||
log.Warnf(argocommon.ConfigMapPluginCLIDeprecationWarning)
|
||||
}
|
||||
|
||||
visited := cmdutil.SetAppSpecOptions(c.Flags(), &app.Spec, &appOpts)
|
||||
if visited == 0 {
|
||||
log.Error("Please set at least one option to update")
|
||||
@@ -692,11 +683,8 @@ func NewApplicationUnsetCommand(clientOpts *argocdclient.ClientOptions) *cobra.C
|
||||
app, err := appIf.Get(ctx, &applicationpkg.ApplicationQuery{Name: &appName, AppNamespace: &appNs})
|
||||
errors.CheckError(err)
|
||||
|
||||
if app.Spec.Source.Plugin != nil && app.Spec.Source.Plugin.Name != "" {
|
||||
log.Warnf(argocommon.ConfigMapPluginCLIDeprecationWarning)
|
||||
}
|
||||
|
||||
updated, nothingToUnset := unset(&app.Spec.Source, opts)
|
||||
source := app.Spec.GetSource()
|
||||
updated, nothingToUnset := unset(&source, opts)
|
||||
if nothingToUnset {
|
||||
c.HelpFunc()(c, args)
|
||||
os.Exit(1)
|
||||
@@ -842,18 +830,19 @@ func getLocalObjects(ctx context.Context, app *argoappv1.Application, local, loc
|
||||
|
||||
func getLocalObjectsString(ctx context.Context, app *argoappv1.Application, local, localRepoRoot, appLabelKey, kubeVersion string, apiVersions []string, kustomizeOptions *argoappv1.KustomizeOptions,
|
||||
configManagementPlugins []*argoappv1.ConfigManagementPlugin, trackingMethod string) []string {
|
||||
res, err := repository.GenerateManifests(ctx, local, localRepoRoot, app.Spec.Source.TargetRevision, &repoapiclient.ManifestRequest{
|
||||
Repo: &argoappv1.Repository{Repo: app.Spec.Source.RepoURL},
|
||||
source := app.Spec.GetSource()
|
||||
res, err := repository.GenerateManifests(ctx, local, localRepoRoot, source.TargetRevision, &repoapiclient.ManifestRequest{
|
||||
Repo: &argoappv1.Repository{Repo: source.RepoURL},
|
||||
AppLabelKey: appLabelKey,
|
||||
AppName: app.Name,
|
||||
Namespace: app.Spec.Destination.Namespace,
|
||||
ApplicationSource: &app.Spec.Source,
|
||||
ApplicationSource: &source,
|
||||
KustomizeOptions: kustomizeOptions,
|
||||
KubeVersion: kubeVersion,
|
||||
ApiVersions: apiVersions,
|
||||
Plugins: configManagementPlugins,
|
||||
TrackingMethod: trackingMethod,
|
||||
}, true, &git.NoopCredsStore{}, resource.MustParse("0"))
|
||||
}, true, &git.NoopCredsStore{}, resource.MustParse("0"), nil)
|
||||
errors.CheckError(err)
|
||||
|
||||
return res.Manifests
|
||||
@@ -930,10 +919,6 @@ func NewApplicationDiffCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
})
|
||||
errors.CheckError(err)
|
||||
|
||||
if app.Spec.Source.Plugin != nil && app.Spec.Source.Plugin.Name != "" {
|
||||
log.Warnf(argocommon.ConfigMapPluginCLIDeprecationWarning)
|
||||
}
|
||||
|
||||
resources, err := appIf.ManagedResources(ctx, &applicationpkg.ResourcesQuery{ApplicationName: &appName, AppNamespace: &appNs})
|
||||
errors.CheckError(err)
|
||||
conn, settingsIf := clientset.NewSettingsClientOrDie()
|
||||
@@ -964,7 +949,7 @@ func NewApplicationDiffCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
|
||||
diffOption.serversideRes = res
|
||||
} else {
|
||||
fmt.Fprintf(os.Stderr, "Warning: local diff without --server-side-generate is deprecated and does not work with plugins. Server-side generation will be the default in v2.6.")
|
||||
fmt.Fprintf(os.Stderr, "Warning: local diff without --server-side-generate is deprecated and does not work with plugins. Server-side generation will be the default in v2.7.")
|
||||
conn, clusterIf := clientset.NewClusterClientOrDie()
|
||||
defer argoio.Close(conn)
|
||||
cluster, err := clusterIf.Get(ctx, &clusterpkg.ClusterQuery{Name: app.Spec.Destination.Name, Server: app.Spec.Destination.Server})
|
||||
@@ -1247,7 +1232,7 @@ func printApplicationTable(apps []argoappv1.Application, output *string) {
|
||||
formatConditionsSummary(app),
|
||||
}
|
||||
if *output == "wide" {
|
||||
vals = append(vals, app.Spec.Source.RepoURL, app.Spec.Source.Path, app.Spec.Source.TargetRevision)
|
||||
vals = append(vals, app.Spec.GetSource().RepoURL, app.Spec.GetSource().Path, app.Spec.GetSource().TargetRevision)
|
||||
}
|
||||
_, _ = fmt.Fprintf(w, fmtStr, vals...)
|
||||
}
|
||||
@@ -1298,17 +1283,6 @@ func NewApplicationListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
if cluster != "" {
|
||||
appList = argo.FilterByCluster(appList, cluster)
|
||||
}
|
||||
var appsWithDeprecatedPlugins []string
|
||||
for _, app := range appList {
|
||||
if app.Spec.Source.Plugin != nil && app.Spec.Source.Plugin.Name != "" {
|
||||
appsWithDeprecatedPlugins = append(appsWithDeprecatedPlugins, app.Name)
|
||||
}
|
||||
}
|
||||
|
||||
if len(appsWithDeprecatedPlugins) > 0 {
|
||||
log.Warnf(argocommon.ConfigMapPluginCLIDeprecationWarning)
|
||||
log.Warnf("The following Applications use deprecated plugins: %s", strings.Join(appsWithDeprecatedPlugins, ", "))
|
||||
}
|
||||
|
||||
switch output {
|
||||
case "yaml", "json":
|
||||
@@ -1625,17 +1599,19 @@ func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
|
||||
var localObjsStrings []string
|
||||
diffOption := &DifferenceOption{}
|
||||
|
||||
app, err := appIf.Get(ctx, &applicationpkg.ApplicationQuery{
|
||||
Name: &appName,
|
||||
AppNamespace: &appNs,
|
||||
})
|
||||
errors.CheckError(err)
|
||||
|
||||
if app.Spec.HasMultipleSources() {
|
||||
log.Fatal("argocd cli does not work on multi-source app")
|
||||
return
|
||||
}
|
||||
|
||||
if local != "" {
|
||||
app, err := appIf.Get(ctx, &applicationpkg.ApplicationQuery{
|
||||
Name: &appName,
|
||||
AppNamespace: &appNs,
|
||||
})
|
||||
errors.CheckError(err)
|
||||
|
||||
if app.Spec.Source.Plugin != nil && app.Spec.Source.Plugin.Name != "" {
|
||||
log.Warnf(argocommon.ConfigMapPluginCLIDeprecationWarning)
|
||||
}
|
||||
|
||||
if app.Spec.SyncPolicy != nil && app.Spec.SyncPolicy.Automated != nil && !dryRun {
|
||||
log.Fatal("Cannot use local sync when Automatic Sync Policy is enabled except with --dry-run")
|
||||
}
|
||||
@@ -1709,16 +1685,6 @@ func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
}
|
||||
}
|
||||
if diffChanges {
|
||||
app, err := appIf.Get(ctx, &applicationpkg.ApplicationQuery{
|
||||
Name: &appName,
|
||||
AppNamespace: &appNs,
|
||||
})
|
||||
errors.CheckError(err)
|
||||
|
||||
if app.Spec.Source.Plugin != nil && app.Spec.Source.Plugin.Name != "" {
|
||||
log.Warnf(argocommon.ConfigMapPluginCLIDeprecationWarning)
|
||||
}
|
||||
|
||||
resources, err := appIf.ManagedResources(ctx, &applicationpkg.ResourcesQuery{
|
||||
ApplicationName: &appName,
|
||||
AppNamespace: &appNs,
|
||||
@@ -2087,8 +2053,9 @@ func setParameterOverrides(app *argoappv1.Application, parameters []string) {
|
||||
if len(parameters) == 0 {
|
||||
return
|
||||
}
|
||||
source := app.Spec.GetSource()
|
||||
var sourceType argoappv1.ApplicationSourceType
|
||||
if st, _ := app.Spec.Source.ExplicitType(); st != nil {
|
||||
if st, _ := source.ExplicitType(); st != nil {
|
||||
sourceType = *st
|
||||
} else if app.Status.SourceType != "" {
|
||||
sourceType = app.Status.SourceType
|
||||
@@ -2100,8 +2067,8 @@ func setParameterOverrides(app *argoappv1.Application, parameters []string) {
|
||||
|
||||
switch sourceType {
|
||||
case argoappv1.ApplicationSourceTypeHelm:
|
||||
if app.Spec.Source.Helm == nil {
|
||||
app.Spec.Source.Helm = &argoappv1.ApplicationSourceHelm{}
|
||||
if source.Helm == nil {
|
||||
source.Helm = &argoappv1.ApplicationSourceHelm{}
|
||||
}
|
||||
for _, p := range parameters {
|
||||
newParam, err := argoappv1.NewHelmParameter(p, false)
|
||||
@@ -2109,7 +2076,7 @@ func setParameterOverrides(app *argoappv1.Application, parameters []string) {
|
||||
log.Error(err)
|
||||
continue
|
||||
}
|
||||
app.Spec.Source.Helm.AddParameter(*newParam)
|
||||
source.Helm.AddParameter(*newParam)
|
||||
}
|
||||
default:
|
||||
log.Fatalf("Parameters can only be set against Helm applications")
|
||||
@@ -2161,10 +2128,6 @@ func NewApplicationHistoryCommand(clientOpts *argocdclient.ClientOptions) *cobra
|
||||
})
|
||||
errors.CheckError(err)
|
||||
|
||||
if app.Spec.Source.Plugin != nil && app.Spec.Source.Plugin.Name != "" {
|
||||
log.Warnf(argocommon.ConfigMapPluginCLIDeprecationWarning)
|
||||
}
|
||||
|
||||
if output == "id" {
|
||||
printApplicationHistoryIds(app.Status.History)
|
||||
} else {
|
||||
@@ -2225,10 +2188,6 @@ func NewApplicationRollbackCommand(clientOpts *argocdclient.ClientOptions) *cobr
|
||||
})
|
||||
errors.CheckError(err)
|
||||
|
||||
if app.Spec.Source.Plugin != nil && app.Spec.Source.Plugin.Name != "" {
|
||||
log.Warnf(argocommon.ConfigMapPluginCLIDeprecationWarning)
|
||||
}
|
||||
|
||||
depInfo, err := findRevisionHistory(app, int64(depID))
|
||||
errors.CheckError(err)
|
||||
|
||||
@@ -2312,10 +2271,6 @@ func NewApplicationManifestsCommand(clientOpts *argocdclient.ClientOptions) *cob
|
||||
app, err := appIf.Get(context.Background(), &applicationpkg.ApplicationQuery{Name: &appName})
|
||||
errors.CheckError(err)
|
||||
|
||||
if app.Spec.Source.Plugin != nil && app.Spec.Source.Plugin.Name != "" {
|
||||
log.Warnf(argocommon.ConfigMapPluginCLIDeprecationWarning)
|
||||
}
|
||||
|
||||
settingsConn, settingsIf := clientset.NewSettingsClientOrDie()
|
||||
defer argoio.Close(settingsConn)
|
||||
argoSettings, err := settingsIf.Get(context.Background(), &settingspkg.SettingsQuery{})
|
||||
@@ -2415,10 +2370,6 @@ func NewApplicationEditCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
})
|
||||
errors.CheckError(err)
|
||||
|
||||
if app.Spec.Source.Plugin != nil && app.Spec.Source.Plugin.Name != "" {
|
||||
log.Warnf(argocommon.ConfigMapPluginCLIDeprecationWarning)
|
||||
}
|
||||
|
||||
appData, err := json.Marshal(app.Spec)
|
||||
errors.CheckError(err)
|
||||
appData, err = yaml.JSONToYAML(appData)
|
||||
|
||||
@@ -498,7 +498,7 @@ func TestPrintAppSummaryTable(t *testing.T) {
|
||||
},
|
||||
Project: "default",
|
||||
Destination: v1alpha1.ApplicationDestination{Server: "local", Namespace: "argocd"},
|
||||
Source: v1alpha1.ApplicationSource{
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
RepoURL: "test",
|
||||
TargetRevision: "master",
|
||||
Path: "/test",
|
||||
@@ -604,7 +604,7 @@ func TestPrintParams(t *testing.T) {
|
||||
output, _ := captureOutput(func() error {
|
||||
app := &v1alpha1.Application{
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Source: v1alpha1.ApplicationSource{
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
Helm: &v1alpha1.ApplicationSourceHelm{
|
||||
Parameters: []v1alpha1.HelmParameter{
|
||||
{
|
||||
@@ -985,7 +985,7 @@ func TestPrintApplicationTableWide(t *testing.T) {
|
||||
Server: "http://localhost:8080",
|
||||
Namespace: "default",
|
||||
},
|
||||
Source: v1alpha1.ApplicationSource{
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
RepoURL: "https://github.com/argoproj/argocd-example-apps",
|
||||
Path: "guestbook",
|
||||
TargetRevision: "123",
|
||||
@@ -1261,7 +1261,7 @@ func testApp(name, project string, labels map[string]string, annotations map[str
|
||||
Finalizers: finalizers,
|
||||
},
|
||||
Spec: argoappv1.ApplicationSpec{
|
||||
Source: argoappv1.ApplicationSource{
|
||||
Source: &argoappv1.ApplicationSource{
|
||||
RepoURL: "https://github.com/argoproj/argocd-example-apps.git",
|
||||
},
|
||||
Project: project,
|
||||
|
||||
@@ -95,7 +95,7 @@ func NewApplicationSetGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.
|
||||
fmt.Println()
|
||||
}
|
||||
if showParams {
|
||||
printHelmParams(appSet.Spec.Template.Spec.Source.Helm)
|
||||
printHelmParams(appSet.Spec.Template.Spec.GetSource().Helm)
|
||||
}
|
||||
default:
|
||||
errors.CheckError(fmt.Errorf("unknown output format: %s", output))
|
||||
@@ -317,7 +317,7 @@ func printApplicationSetTable(apps []arogappsetv1.ApplicationSet, output *string
|
||||
conditions,
|
||||
}
|
||||
if *output == "wide" {
|
||||
vals = append(vals, app.Spec.Template.Spec.Source.RepoURL, app.Spec.Template.Spec.Source.Path, app.Spec.Template.Spec.Source.TargetRevision)
|
||||
vals = append(vals, app.Spec.Template.Spec.GetSource().RepoURL, app.Spec.Template.Spec.GetSource().Path, app.Spec.Template.Spec.GetSource().TargetRevision)
|
||||
}
|
||||
_, _ = fmt.Fprintf(w, fmtStr, vals...)
|
||||
}
|
||||
@@ -333,25 +333,29 @@ func getServerForAppSet(appSet *arogappsetv1.ApplicationSet) string {
|
||||
}
|
||||
|
||||
func printAppSetSummaryTable(appSet *arogappsetv1.ApplicationSet) {
|
||||
source := appSet.Spec.Template.Spec.GetSource()
|
||||
fmt.Printf(printOpFmtStr, "Name:", appSet.Name)
|
||||
fmt.Printf(printOpFmtStr, "Project:", appSet.Spec.Template.Spec.GetProject())
|
||||
fmt.Printf(printOpFmtStr, "Server:", getServerForAppSet(appSet))
|
||||
fmt.Printf(printOpFmtStr, "Namespace:", appSet.Spec.Template.Spec.Destination.Namespace)
|
||||
fmt.Printf(printOpFmtStr, "Repo:", appSet.Spec.Template.Spec.Source.RepoURL)
|
||||
fmt.Printf(printOpFmtStr, "Target:", appSet.Spec.Template.Spec.Source.TargetRevision)
|
||||
fmt.Printf(printOpFmtStr, "Path:", appSet.Spec.Template.Spec.Source.Path)
|
||||
printAppSourceDetails(&appSet.Spec.Template.Spec.Source)
|
||||
fmt.Printf(printOpFmtStr, "Repo:", source.RepoURL)
|
||||
fmt.Printf(printOpFmtStr, "Target:", source.TargetRevision)
|
||||
fmt.Printf(printOpFmtStr, "Path:", source.Path)
|
||||
printAppSourceDetails(&source)
|
||||
|
||||
var syncPolicy string
|
||||
if appSet.Spec.SyncPolicy != nil && appSet.Spec.Template.Spec.SyncPolicy.Automated != nil {
|
||||
syncPolicy = "Automated"
|
||||
if appSet.Spec.Template.Spec.SyncPolicy.Automated.Prune {
|
||||
syncPolicy += " (Prune)"
|
||||
var (
|
||||
syncPolicyStr string
|
||||
syncPolicy = appSet.Spec.Template.Spec.SyncPolicy
|
||||
)
|
||||
if syncPolicy != nil && syncPolicy.Automated != nil {
|
||||
syncPolicyStr = "Automated"
|
||||
if syncPolicy.Automated.Prune {
|
||||
syncPolicyStr += " (Prune)"
|
||||
}
|
||||
} else {
|
||||
syncPolicy = "<none>"
|
||||
syncPolicyStr = "<none>"
|
||||
}
|
||||
fmt.Printf(printOpFmtStr, "SyncPolicy:", syncPolicy)
|
||||
fmt.Printf(printOpFmtStr, "SyncPolicy:", syncPolicyStr)
|
||||
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
@@ -68,3 +70,124 @@ func TestPrintApplicationSetTable(t *testing.T) {
|
||||
expectation := "NAME NAMESPACE PROJECT SYNCPOLICY CONDITIONS\napp-name default nil [{ResourcesUpToDate <nil> True }]\napp-name default nil [{ResourcesUpToDate <nil> True }]\n"
|
||||
assert.Equal(t, expectation, output)
|
||||
}
|
||||
|
||||
func TestPrintAppSetSummaryTable(t *testing.T) {
|
||||
baseAppSet := &arogappsetv1.ApplicationSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "app-name",
|
||||
},
|
||||
Spec: arogappsetv1.ApplicationSetSpec{
|
||||
Generators: []arogappsetv1.ApplicationSetGenerator{
|
||||
arogappsetv1.ApplicationSetGenerator{
|
||||
Git: &arogappsetv1.GitGenerator{
|
||||
RepoURL: "https://github.com/argoproj/argo-cd.git",
|
||||
Revision: "head",
|
||||
Directories: []arogappsetv1.GitDirectoryGeneratorItem{
|
||||
arogappsetv1.GitDirectoryGeneratorItem{
|
||||
Path: "applicationset/examples/git-generator-directory/cluster-addons/*",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Template: arogappsetv1.ApplicationSetTemplate{
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Project: "default",
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: arogappsetv1.ApplicationSetStatus{
|
||||
Conditions: []arogappsetv1.ApplicationSetCondition{
|
||||
arogappsetv1.ApplicationSetCondition{
|
||||
Status: v1alpha1.ApplicationSetConditionStatusTrue,
|
||||
Type: arogappsetv1.ApplicationSetConditionResourcesUpToDate,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
appsetSpecSyncPolicy := baseAppSet.DeepCopy()
|
||||
appsetSpecSyncPolicy.Spec.SyncPolicy = &arogappsetv1.ApplicationSetSyncPolicy{
|
||||
PreserveResourcesOnDeletion: true,
|
||||
}
|
||||
|
||||
appSetTemplateSpecSyncPolicy := baseAppSet.DeepCopy()
|
||||
appSetTemplateSpecSyncPolicy.Spec.Template.Spec.SyncPolicy = &arogappsetv1.SyncPolicy{
|
||||
Automated: &arogappsetv1.SyncPolicyAutomated{
|
||||
SelfHeal: true,
|
||||
},
|
||||
}
|
||||
|
||||
appSetBothSyncPolicies := baseAppSet.DeepCopy()
|
||||
appSetBothSyncPolicies.Spec.SyncPolicy = &arogappsetv1.ApplicationSetSyncPolicy{
|
||||
PreserveResourcesOnDeletion: true,
|
||||
}
|
||||
appSetBothSyncPolicies.Spec.Template.Spec.SyncPolicy = &arogappsetv1.SyncPolicy{
|
||||
Automated: &arogappsetv1.SyncPolicyAutomated{
|
||||
SelfHeal: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range []struct {
|
||||
name string
|
||||
appSet *arogappsetv1.ApplicationSet
|
||||
expectedOutput string
|
||||
}{
|
||||
{
|
||||
name: "appset with only spec.syncPolicy set",
|
||||
appSet: appsetSpecSyncPolicy,
|
||||
expectedOutput: `Name: app-name
|
||||
Project: default
|
||||
Server:
|
||||
Namespace:
|
||||
Repo:
|
||||
Target:
|
||||
Path:
|
||||
SyncPolicy: <none>
|
||||
`,
|
||||
},
|
||||
{
|
||||
name: "appset with only spec.template.spec.syncPolicy set",
|
||||
appSet: appSetTemplateSpecSyncPolicy,
|
||||
expectedOutput: `Name: app-name
|
||||
Project: default
|
||||
Server:
|
||||
Namespace:
|
||||
Repo:
|
||||
Target:
|
||||
Path:
|
||||
SyncPolicy: Automated
|
||||
`,
|
||||
},
|
||||
{
|
||||
name: "appset with both spec.SyncPolicy and spec.template.spec.syncPolicy set",
|
||||
appSet: appSetBothSyncPolicies,
|
||||
expectedOutput: `Name: app-name
|
||||
Project: default
|
||||
Server:
|
||||
Namespace:
|
||||
Repo:
|
||||
Target:
|
||||
Path:
|
||||
SyncPolicy: Automated
|
||||
`,
|
||||
},
|
||||
} {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
oldStdout := os.Stdout
|
||||
defer func() {
|
||||
os.Stdout = oldStdout
|
||||
}()
|
||||
|
||||
r, w, _ := os.Pipe()
|
||||
os.Stdout = w
|
||||
|
||||
printAppSetSummaryTable(tt.appSet)
|
||||
w.Close()
|
||||
|
||||
out, err := ioutil.ReadAll(r)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, tt.expectedOutput, string(out))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -202,17 +202,18 @@ func StartLocalServer(ctx context.Context, clientOpts *apiclient.ClientOptions,
|
||||
}
|
||||
appstateCache := appstatecache.NewCache(cache.NewCache(&forwardCacheClient{namespace: namespace, context: ctxStr}), time.Hour)
|
||||
srv := server.NewServer(ctx, server.ArgoCDServerOpts{
|
||||
EnableGZip: false,
|
||||
Namespace: namespace,
|
||||
ListenPort: *port,
|
||||
AppClientset: appClientset,
|
||||
DisableAuth: true,
|
||||
RedisClient: redis.NewClient(&redis.Options{Addr: mr.Addr()}),
|
||||
Cache: servercache.NewCache(appstateCache, 0, 0, 0),
|
||||
KubeClientset: kubeClientset,
|
||||
Insecure: true,
|
||||
ListenHost: *address,
|
||||
RepoClientset: &forwardRepoClientset{namespace: namespace, context: ctxStr},
|
||||
EnableGZip: false,
|
||||
Namespace: namespace,
|
||||
ListenPort: *port,
|
||||
AppClientset: appClientset,
|
||||
DisableAuth: true,
|
||||
RedisClient: redis.NewClient(&redis.Options{Addr: mr.Addr()}),
|
||||
Cache: servercache.NewCache(appstateCache, 0, 0, 0),
|
||||
KubeClientset: kubeClientset,
|
||||
Insecure: true,
|
||||
ListenHost: *address,
|
||||
RepoClientset: &forwardRepoClientset{namespace: namespace, context: ctxStr},
|
||||
EnableProxyExtension: false,
|
||||
})
|
||||
srv.Init(ctx)
|
||||
|
||||
|
||||
@@ -70,6 +70,9 @@ func NewRepoAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
|
||||
# Add a private Git repository on GitHub Enterprise via GitHub App
|
||||
argocd repo add https://ghe.example.com/repos/repo --github-app-id 1 --github-app-installation-id 2 --github-app-private-key-path test.private-key.pem --github-app-enterprise-base-url https://ghe.example.com/api/v3
|
||||
|
||||
# Add a private Git repository on Google Cloud Sources via GCP service account credentials
|
||||
argocd repo add https://source.developers.google.com/p/my-google-cloud-project/r/my-repo --gcp-service-account-key-path service-account-key.json
|
||||
`
|
||||
|
||||
var command = &cobra.Command{
|
||||
@@ -135,6 +138,17 @@ func NewRepoAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
}
|
||||
}
|
||||
|
||||
if repoOpts.GCPServiceAccountKeyPath != "" {
|
||||
if git.IsHTTPSURL(repoOpts.Repo.Repo) {
|
||||
gcpServiceAccountKey, err := os.ReadFile(repoOpts.GCPServiceAccountKeyPath)
|
||||
errors.CheckError(err)
|
||||
repoOpts.Repo.GCPServiceAccountKey = string(gcpServiceAccountKey)
|
||||
} else {
|
||||
err := fmt.Errorf("--gcp-service-account-key-path is only supported for HTTPS repositories")
|
||||
errors.CheckError(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Set repository connection properties only when creating repository, not
|
||||
// when creating repository credentials.
|
||||
// InsecureIgnoreHostKey is deprecated and only here for backwards compat
|
||||
@@ -146,6 +160,7 @@ func NewRepoAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
repoOpts.Repo.GithubAppInstallationId = repoOpts.GithubAppInstallationId
|
||||
repoOpts.Repo.GitHubAppEnterpriseBaseURL = repoOpts.GitHubAppEnterpriseBaseURL
|
||||
repoOpts.Repo.Proxy = repoOpts.Proxy
|
||||
repoOpts.Repo.ForceHttpBasicAuth = repoOpts.ForceHttpBasicAuth
|
||||
|
||||
if repoOpts.Repo.Type == "helm" && repoOpts.Repo.Name == "" {
|
||||
errors.CheckError(fmt.Errorf("Must specify --name for repos of type 'helm'"))
|
||||
@@ -184,6 +199,8 @@ func NewRepoAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
GithubAppEnterpriseBaseUrl: repoOpts.Repo.GitHubAppEnterpriseBaseURL,
|
||||
Proxy: repoOpts.Proxy,
|
||||
Project: repoOpts.Repo.Project,
|
||||
GcpServiceAccountKey: repoOpts.Repo.GCPServiceAccountKey,
|
||||
ForceHttpBasicAuth: repoOpts.Repo.ForceHttpBasicAuth,
|
||||
}
|
||||
_, err := repoIf.ValidateAccess(ctx, &repoAccessReq)
|
||||
errors.CheckError(err)
|
||||
|
||||
@@ -39,12 +39,13 @@ func NewRepoCredsCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command
|
||||
// NewRepoCredsAddCommand returns a new instance of an `argocd repocreds add` command
|
||||
func NewRepoCredsAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
var (
|
||||
repo appsv1.RepoCreds
|
||||
upsert bool
|
||||
sshPrivateKeyPath string
|
||||
tlsClientCertPath string
|
||||
tlsClientCertKeyPath string
|
||||
githubAppPrivateKeyPath string
|
||||
repo appsv1.RepoCreds
|
||||
upsert bool
|
||||
sshPrivateKeyPath string
|
||||
tlsClientCertPath string
|
||||
tlsClientCertKeyPath string
|
||||
githubAppPrivateKeyPath string
|
||||
gcpServiceAccountKeyPath string
|
||||
)
|
||||
|
||||
// For better readability and easier formatting
|
||||
@@ -62,6 +63,9 @@ func NewRepoCredsAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comma
|
||||
|
||||
# Add credentials with helm oci registry so that these oci registry urls do not need to be added as repos individually.
|
||||
argocd repocreds add localhost:5000/myrepo --enable-oci --type helm
|
||||
|
||||
# Add credentials with GCP credentials for all repositories under https://source.developers.google.com/p/my-google-cloud-project/r/
|
||||
argocd repocreds add https://source.developers.google.com/p/my-google-cloud-project/r/ --gcp-service-account-key-path service-account-key.json
|
||||
`
|
||||
|
||||
var command = &cobra.Command{
|
||||
@@ -127,6 +131,18 @@ func NewRepoCredsAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comma
|
||||
}
|
||||
}
|
||||
|
||||
// Specifying gcpServiceAccountKeyPath is only valid for HTTPS repositories
|
||||
if gcpServiceAccountKeyPath != "" {
|
||||
if git.IsHTTPSURL(repo.URL) {
|
||||
gcpServiceAccountKey, err := os.ReadFile(gcpServiceAccountKeyPath)
|
||||
errors.CheckError(err)
|
||||
repo.GCPServiceAccountKey = string(gcpServiceAccountKey)
|
||||
} else {
|
||||
err := fmt.Errorf("--gcp-service-account-key-path is only supported for HTTPS repositories")
|
||||
errors.CheckError(err)
|
||||
}
|
||||
}
|
||||
|
||||
conn, repoIf := headless.NewClientOrDie(clientOpts, c).NewRepoCredsClientOrDie()
|
||||
defer io.Close(conn)
|
||||
|
||||
@@ -158,6 +174,8 @@ func NewRepoCredsAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comma
|
||||
command.Flags().BoolVar(&upsert, "upsert", false, "Override an existing repository with the same name even if the spec differs")
|
||||
command.Flags().BoolVar(&repo.EnableOCI, "enable-oci", false, "Specifies whether helm-oci support should be enabled for this repo")
|
||||
command.Flags().StringVar(&repo.Type, "type", common.DefaultRepoType, "type of the repository, \"git\" or \"helm\"")
|
||||
command.Flags().StringVar(&gcpServiceAccountKeyPath, "gcp-service-account-key-path", "", "service account key for the Google Cloud Platform")
|
||||
command.Flags().BoolVar(&repo.ForceHttpBasicAuth, "force-http-basic-auth", false, "whether to force basic auth when connecting via HTTP")
|
||||
return command
|
||||
}
|
||||
|
||||
|
||||
@@ -138,22 +138,26 @@ func SetAppSpecOptions(flags *pflag.FlagSet, spec *argoappv1.ApplicationSpec, ap
|
||||
}
|
||||
flags.Visit(func(f *pflag.Flag) {
|
||||
visited++
|
||||
source := spec.GetSourcePtr()
|
||||
if source == nil {
|
||||
source = &argoappv1.ApplicationSource{}
|
||||
}
|
||||
switch f.Name {
|
||||
case "repo":
|
||||
spec.Source.RepoURL = appOpts.repoURL
|
||||
source.RepoURL = appOpts.repoURL
|
||||
case "path":
|
||||
spec.Source.Path = appOpts.appPath
|
||||
source.Path = appOpts.appPath
|
||||
case "helm-chart":
|
||||
spec.Source.Chart = appOpts.chart
|
||||
source.Chart = appOpts.chart
|
||||
case "revision":
|
||||
spec.Source.TargetRevision = appOpts.revision
|
||||
source.TargetRevision = appOpts.revision
|
||||
case "revision-history-limit":
|
||||
i := int64(appOpts.revisionHistoryLimit)
|
||||
spec.RevisionHistoryLimit = &i
|
||||
case "values":
|
||||
setHelmOpt(&spec.Source, helmOpts{valueFiles: appOpts.valuesFiles})
|
||||
setHelmOpt(source, helmOpts{valueFiles: appOpts.valuesFiles})
|
||||
case "ignore-missing-value-files":
|
||||
setHelmOpt(&spec.Source, helmOpts{ignoreMissingValueFiles: appOpts.ignoreMissingValueFiles})
|
||||
setHelmOpt(source, helmOpts{ignoreMissingValueFiles: appOpts.ignoreMissingValueFiles})
|
||||
case "values-literal-file":
|
||||
var data []byte
|
||||
|
||||
@@ -165,41 +169,41 @@ func SetAppSpecOptions(flags *pflag.FlagSet, spec *argoappv1.ApplicationSpec, ap
|
||||
data, err = config.ReadRemoteFile(appOpts.values)
|
||||
}
|
||||
errors.CheckError(err)
|
||||
setHelmOpt(&spec.Source, helmOpts{values: string(data)})
|
||||
setHelmOpt(source, helmOpts{values: string(data)})
|
||||
case "release-name":
|
||||
setHelmOpt(&spec.Source, helmOpts{releaseName: appOpts.releaseName})
|
||||
setHelmOpt(source, helmOpts{releaseName: appOpts.releaseName})
|
||||
case "helm-version":
|
||||
setHelmOpt(&spec.Source, helmOpts{version: appOpts.helmVersion})
|
||||
setHelmOpt(source, helmOpts{version: appOpts.helmVersion})
|
||||
case "helm-pass-credentials":
|
||||
setHelmOpt(&spec.Source, helmOpts{passCredentials: appOpts.helmPassCredentials})
|
||||
setHelmOpt(source, helmOpts{passCredentials: appOpts.helmPassCredentials})
|
||||
case "helm-set":
|
||||
setHelmOpt(&spec.Source, helmOpts{helmSets: appOpts.helmSets})
|
||||
setHelmOpt(source, helmOpts{helmSets: appOpts.helmSets})
|
||||
case "helm-set-string":
|
||||
setHelmOpt(&spec.Source, helmOpts{helmSetStrings: appOpts.helmSetStrings})
|
||||
setHelmOpt(source, helmOpts{helmSetStrings: appOpts.helmSetStrings})
|
||||
case "helm-set-file":
|
||||
setHelmOpt(&spec.Source, helmOpts{helmSetFiles: appOpts.helmSetFiles})
|
||||
setHelmOpt(source, helmOpts{helmSetFiles: appOpts.helmSetFiles})
|
||||
case "helm-skip-crds":
|
||||
setHelmOpt(&spec.Source, helmOpts{skipCrds: appOpts.helmSkipCrds})
|
||||
setHelmOpt(source, helmOpts{skipCrds: appOpts.helmSkipCrds})
|
||||
case "directory-recurse":
|
||||
if spec.Source.Directory != nil {
|
||||
spec.Source.Directory.Recurse = appOpts.directoryRecurse
|
||||
if source.Directory != nil {
|
||||
source.Directory.Recurse = appOpts.directoryRecurse
|
||||
} else {
|
||||
spec.Source.Directory = &argoappv1.ApplicationSourceDirectory{Recurse: appOpts.directoryRecurse}
|
||||
source.Directory = &argoappv1.ApplicationSourceDirectory{Recurse: appOpts.directoryRecurse}
|
||||
}
|
||||
case "directory-exclude":
|
||||
if spec.Source.Directory != nil {
|
||||
spec.Source.Directory.Exclude = appOpts.directoryExclude
|
||||
if source.Directory != nil {
|
||||
source.Directory.Exclude = appOpts.directoryExclude
|
||||
} else {
|
||||
spec.Source.Directory = &argoappv1.ApplicationSourceDirectory{Exclude: appOpts.directoryExclude}
|
||||
source.Directory = &argoappv1.ApplicationSourceDirectory{Exclude: appOpts.directoryExclude}
|
||||
}
|
||||
case "directory-include":
|
||||
if spec.Source.Directory != nil {
|
||||
spec.Source.Directory.Include = appOpts.directoryInclude
|
||||
if source.Directory != nil {
|
||||
source.Directory.Include = appOpts.directoryInclude
|
||||
} else {
|
||||
spec.Source.Directory = &argoappv1.ApplicationSourceDirectory{Include: appOpts.directoryInclude}
|
||||
source.Directory = &argoappv1.ApplicationSourceDirectory{Include: appOpts.directoryInclude}
|
||||
}
|
||||
case "config-management-plugin":
|
||||
spec.Source.Plugin = &argoappv1.ApplicationSourcePlugin{Name: appOpts.configManagementPlugin}
|
||||
source.Plugin = &argoappv1.ApplicationSourcePlugin{Name: appOpts.configManagementPlugin}
|
||||
case "dest-name":
|
||||
spec.Destination.Name = appOpts.destName
|
||||
case "dest-server":
|
||||
@@ -209,37 +213,37 @@ func SetAppSpecOptions(flags *pflag.FlagSet, spec *argoappv1.ApplicationSpec, ap
|
||||
case "project":
|
||||
spec.Project = appOpts.project
|
||||
case "nameprefix":
|
||||
setKustomizeOpt(&spec.Source, kustomizeOpts{namePrefix: appOpts.namePrefix})
|
||||
setKustomizeOpt(source, kustomizeOpts{namePrefix: appOpts.namePrefix})
|
||||
case "namesuffix":
|
||||
setKustomizeOpt(&spec.Source, kustomizeOpts{nameSuffix: appOpts.nameSuffix})
|
||||
setKustomizeOpt(source, kustomizeOpts{nameSuffix: appOpts.nameSuffix})
|
||||
case "kustomize-image":
|
||||
setKustomizeOpt(&spec.Source, kustomizeOpts{images: appOpts.kustomizeImages})
|
||||
setKustomizeOpt(source, kustomizeOpts{images: appOpts.kustomizeImages})
|
||||
case "kustomize-version":
|
||||
setKustomizeOpt(&spec.Source, kustomizeOpts{version: appOpts.kustomizeVersion})
|
||||
setKustomizeOpt(source, kustomizeOpts{version: appOpts.kustomizeVersion})
|
||||
case "kustomize-common-label":
|
||||
parsedLabels, err := label.Parse(appOpts.kustomizeCommonLabels)
|
||||
errors.CheckError(err)
|
||||
setKustomizeOpt(&spec.Source, kustomizeOpts{commonLabels: parsedLabels})
|
||||
setKustomizeOpt(source, kustomizeOpts{commonLabels: parsedLabels})
|
||||
case "kustomize-common-annotation":
|
||||
parsedAnnotations, err := label.Parse(appOpts.kustomizeCommonAnnotations)
|
||||
errors.CheckError(err)
|
||||
setKustomizeOpt(&spec.Source, kustomizeOpts{commonAnnotations: parsedAnnotations})
|
||||
setKustomizeOpt(source, kustomizeOpts{commonAnnotations: parsedAnnotations})
|
||||
case "kustomize-force-common-label":
|
||||
setKustomizeOpt(&spec.Source, kustomizeOpts{forceCommonLabels: appOpts.kustomizeForceCommonLabels})
|
||||
setKustomizeOpt(source, kustomizeOpts{forceCommonLabels: appOpts.kustomizeForceCommonLabels})
|
||||
case "kustomize-force-common-annotation":
|
||||
setKustomizeOpt(&spec.Source, kustomizeOpts{forceCommonAnnotations: appOpts.kustomizeForceCommonAnnotations})
|
||||
setKustomizeOpt(source, kustomizeOpts{forceCommonAnnotations: appOpts.kustomizeForceCommonAnnotations})
|
||||
case "jsonnet-tla-str":
|
||||
setJsonnetOpt(&spec.Source, appOpts.jsonnetTlaStr, false)
|
||||
setJsonnetOpt(source, appOpts.jsonnetTlaStr, false)
|
||||
case "jsonnet-tla-code":
|
||||
setJsonnetOpt(&spec.Source, appOpts.jsonnetTlaCode, true)
|
||||
setJsonnetOpt(source, appOpts.jsonnetTlaCode, true)
|
||||
case "jsonnet-ext-var-str":
|
||||
setJsonnetOptExtVar(&spec.Source, appOpts.jsonnetExtVarStr, false)
|
||||
setJsonnetOptExtVar(source, appOpts.jsonnetExtVarStr, false)
|
||||
case "jsonnet-ext-var-code":
|
||||
setJsonnetOptExtVar(&spec.Source, appOpts.jsonnetExtVarCode, true)
|
||||
setJsonnetOptExtVar(source, appOpts.jsonnetExtVarCode, true)
|
||||
case "jsonnet-libs":
|
||||
setJsonnetOptLibs(&spec.Source, appOpts.jsonnetLibs)
|
||||
setJsonnetOptLibs(source, appOpts.jsonnetLibs)
|
||||
case "plugin-env":
|
||||
setPluginOptEnvs(&spec.Source, appOpts.pluginEnvs)
|
||||
setPluginOptEnvs(source, appOpts.pluginEnvs)
|
||||
case "sync-policy":
|
||||
switch appOpts.syncPolicy {
|
||||
case "none":
|
||||
@@ -296,6 +300,7 @@ func SetAppSpecOptions(flags *pflag.FlagSet, spec *argoappv1.ApplicationSpec, ap
|
||||
log.Fatalf("Invalid sync-retry-limit [%d]", appOpts.retryLimit)
|
||||
}
|
||||
}
|
||||
spec.Source = source
|
||||
})
|
||||
if flags.Changed("auto-prune") {
|
||||
if spec.SyncPolicy == nil || spec.SyncPolicy.Automated == nil {
|
||||
@@ -473,8 +478,9 @@ func SetParameterOverrides(app *argoappv1.Application, parameters []string) {
|
||||
if len(parameters) == 0 {
|
||||
return
|
||||
}
|
||||
source := app.Spec.GetSource()
|
||||
var sourceType argoappv1.ApplicationSourceType
|
||||
if st, _ := app.Spec.Source.ExplicitType(); st != nil {
|
||||
if st, _ := source.ExplicitType(); st != nil {
|
||||
sourceType = *st
|
||||
} else if app.Status.SourceType != "" {
|
||||
sourceType = app.Status.SourceType
|
||||
@@ -486,8 +492,8 @@ func SetParameterOverrides(app *argoappv1.Application, parameters []string) {
|
||||
|
||||
switch sourceType {
|
||||
case argoappv1.ApplicationSourceTypeHelm:
|
||||
if app.Spec.Source.Helm == nil {
|
||||
app.Spec.Source.Helm = &argoappv1.ApplicationSourceHelm{}
|
||||
if source.Helm == nil {
|
||||
source.Helm = &argoappv1.ApplicationSourceHelm{}
|
||||
}
|
||||
for _, p := range parameters {
|
||||
newParam, err := argoappv1.NewHelmParameter(p, false)
|
||||
@@ -495,7 +501,7 @@ func SetParameterOverrides(app *argoappv1.Application, parameters []string) {
|
||||
log.Error(err)
|
||||
continue
|
||||
}
|
||||
app.Spec.Source.Helm.AddParameter(*newParam)
|
||||
source.Helm.AddParameter(*newParam)
|
||||
}
|
||||
default:
|
||||
log.Fatalf("Parameters can only be set against Helm applications")
|
||||
@@ -580,6 +586,9 @@ func constructAppsBaseOnName(appName string, labels, annotations, args []string,
|
||||
Name: appName,
|
||||
Namespace: appNs,
|
||||
},
|
||||
Spec: argoappv1.ApplicationSpec{
|
||||
Source: &argoappv1.ApplicationSource{},
|
||||
},
|
||||
}
|
||||
SetAppSpecOptions(flags, &app.Spec, &appOpts)
|
||||
SetParameterOverrides(app, appOpts.Parameters)
|
||||
|
||||
@@ -149,7 +149,9 @@ func (f *appOptionsFixture) SetFlag(key, value string) error {
|
||||
|
||||
func newAppOptionsFixture() *appOptionsFixture {
|
||||
fixture := &appOptionsFixture{
|
||||
spec: &v1alpha1.ApplicationSpec{},
|
||||
spec: &v1alpha1.ApplicationSpec{
|
||||
Source: &v1alpha1.ApplicationSource{},
|
||||
},
|
||||
command: &cobra.Command{},
|
||||
options: &AppOptions{},
|
||||
}
|
||||
|
||||
@@ -61,6 +61,6 @@ func readAppset(yml []byte, appsets *[]*argoprojiov1alpha1.ApplicationSet) error
|
||||
*appsets = append(*appsets, &appset)
|
||||
|
||||
}
|
||||
|
||||
return fmt.Errorf("error reading app set: %w", err)
|
||||
// we reach here if there is no error found while reading the Application Set
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -22,6 +22,8 @@ type RepoOptions struct {
|
||||
GithubAppPrivateKeyPath string
|
||||
GitHubAppEnterpriseBaseURL string
|
||||
Proxy string
|
||||
GCPServiceAccountKeyPath string
|
||||
ForceHttpBasicAuth bool
|
||||
}
|
||||
|
||||
func AddRepoFlags(command *cobra.Command, opts *RepoOptions) {
|
||||
@@ -42,4 +44,6 @@ func AddRepoFlags(command *cobra.Command, opts *RepoOptions) {
|
||||
command.Flags().StringVar(&opts.GithubAppPrivateKeyPath, "github-app-private-key-path", "", "private key of the GitHub Application")
|
||||
command.Flags().StringVar(&opts.GitHubAppEnterpriseBaseURL, "github-app-enterprise-base-url", "", "base url to use when using GitHub Enterprise (e.g. https://ghe.example.com/api/v3")
|
||||
command.Flags().StringVar(&opts.Proxy, "proxy", "", "use proxy to access repository")
|
||||
command.Flags().StringVar(&opts.GCPServiceAccountKeyPath, "gcp-service-account-key-path", "", "service account key for the Google Cloud Platform")
|
||||
command.Flags().BoolVar(&opts.ForceHttpBasicAuth, "force-http-basic-auth", false, "whether to force use of basic auth when connecting repository via HTTP")
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ package apiclient
|
||||
import (
|
||||
context "context"
|
||||
fmt "fmt"
|
||||
apiclient "github.com/argoproj/argo-cd/v2/reposerver/apiclient"
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
grpc "google.golang.org/grpc"
|
||||
codes "google.golang.org/grpc/codes"
|
||||
@@ -317,6 +318,7 @@ func (m *ManifestResponse) GetSourceType() string {
|
||||
|
||||
type RepositoryResponse struct {
|
||||
IsSupported bool `protobuf:"varint,1,opt,name=isSupported,proto3" json:"isSupported,omitempty"`
|
||||
IsDiscoveryEnabled bool `protobuf:"varint,2,opt,name=isDiscoveryEnabled,proto3" json:"isDiscoveryEnabled,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
@@ -362,6 +364,62 @@ func (m *RepositoryResponse) GetIsSupported() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *RepositoryResponse) GetIsDiscoveryEnabled() bool {
|
||||
if m != nil {
|
||||
return m.IsDiscoveryEnabled
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ParametersAnnouncementResponse contains a list of announcements. This list represents all the parameters which a CMP
|
||||
// is able to accept.
|
||||
type ParametersAnnouncementResponse struct {
|
||||
ParameterAnnouncements []*apiclient.ParameterAnnouncement `protobuf:"bytes,1,rep,name=parameterAnnouncements,proto3" json:"parameterAnnouncements,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ParametersAnnouncementResponse) Reset() { *m = ParametersAnnouncementResponse{} }
|
||||
func (m *ParametersAnnouncementResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*ParametersAnnouncementResponse) ProtoMessage() {}
|
||||
func (*ParametersAnnouncementResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b21875a7079a06ed, []int{5}
|
||||
}
|
||||
func (m *ParametersAnnouncementResponse) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *ParametersAnnouncementResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_ParametersAnnouncementResponse.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalToSizedBuffer(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *ParametersAnnouncementResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ParametersAnnouncementResponse.Merge(m, src)
|
||||
}
|
||||
func (m *ParametersAnnouncementResponse) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *ParametersAnnouncementResponse) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ParametersAnnouncementResponse.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ParametersAnnouncementResponse proto.InternalMessageInfo
|
||||
|
||||
func (m *ParametersAnnouncementResponse) GetParameterAnnouncements() []*apiclient.ParameterAnnouncement {
|
||||
if m != nil {
|
||||
return m.ParameterAnnouncements
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type File struct {
|
||||
Chunk []byte `protobuf:"bytes,1,opt,name=chunk,proto3" json:"chunk,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
@@ -373,7 +431,7 @@ func (m *File) Reset() { *m = File{} }
|
||||
func (m *File) String() string { return proto.CompactTextString(m) }
|
||||
func (*File) ProtoMessage() {}
|
||||
func (*File) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b21875a7079a06ed, []int{5}
|
||||
return fileDescriptor_b21875a7079a06ed, []int{6}
|
||||
}
|
||||
func (m *File) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -415,44 +473,50 @@ func init() {
|
||||
proto.RegisterType((*EnvEntry)(nil), "plugin.EnvEntry")
|
||||
proto.RegisterType((*ManifestResponse)(nil), "plugin.ManifestResponse")
|
||||
proto.RegisterType((*RepositoryResponse)(nil), "plugin.RepositoryResponse")
|
||||
proto.RegisterType((*ParametersAnnouncementResponse)(nil), "plugin.ParametersAnnouncementResponse")
|
||||
proto.RegisterType((*File)(nil), "plugin.File")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("cmpserver/plugin/plugin.proto", fileDescriptor_b21875a7079a06ed) }
|
||||
|
||||
var fileDescriptor_b21875a7079a06ed = []byte{
|
||||
// 483 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x53, 0xd1, 0x8e, 0x12, 0x3d,
|
||||
0x14, 0xa6, 0x3f, 0xec, 0x2e, 0x1c, 0x36, 0xf9, 0x49, 0x63, 0xe2, 0x84, 0xec, 0x22, 0x99, 0x2b,
|
||||
0x6e, 0x84, 0x04, 0x8d, 0x77, 0x26, 0xba, 0x66, 0x75, 0xa3, 0xc1, 0x90, 0xe2, 0x95, 0x77, 0xdd,
|
||||
0x72, 0x80, 0xba, 0x33, 0x6d, 0x6d, 0x3b, 0x93, 0xe0, 0x95, 0x6f, 0xe3, 0x2b, 0xf8, 0x08, 0x5e,
|
||||
0xfa, 0x08, 0x86, 0x27, 0x31, 0x53, 0x66, 0x18, 0xe2, 0x46, 0xaf, 0x38, 0xdf, 0x77, 0xce, 0xf9,
|
||||
0xf8, 0xbe, 0x4e, 0x0b, 0x97, 0x22, 0x35, 0x0e, 0x6d, 0x8e, 0x76, 0x62, 0x92, 0x6c, 0x2d, 0x55,
|
||||
0xf9, 0x33, 0x36, 0x56, 0x7b, 0x4d, 0x4f, 0xf7, 0x28, 0xfe, 0x4a, 0xa0, 0xf7, 0xd2, 0x98, 0x85,
|
||||
0xb7, 0xc8, 0x53, 0x86, 0x9f, 0x33, 0x74, 0x9e, 0x3e, 0x87, 0x76, 0x8a, 0x9e, 0x2f, 0xb9, 0xe7,
|
||||
0x11, 0x19, 0x92, 0x51, 0x77, 0xfa, 0x68, 0x5c, 0x6e, 0xcf, 0xb8, 0x92, 0x2b, 0x74, 0xbe, 0x1c,
|
||||
0x9d, 0x95, 0x63, 0x37, 0x0d, 0x76, 0x58, 0xa1, 0x31, 0xb4, 0x56, 0x32, 0xc1, 0xe8, 0xbf, 0xb0,
|
||||
0x7a, 0x5e, 0xad, 0xbe, 0x96, 0x09, 0xde, 0x34, 0x58, 0xe8, 0x5d, 0x75, 0xe0, 0xcc, 0xee, 0x25,
|
||||
0xe2, 0x6f, 0x04, 0x1e, 0xfe, 0x45, 0x96, 0x46, 0x70, 0xc6, 0x8d, 0x79, 0xcf, 0x53, 0x0c, 0x46,
|
||||
0x3a, 0xac, 0x82, 0x74, 0x00, 0xc0, 0x8d, 0x61, 0x98, 0xcc, 0xb9, 0xdf, 0x84, 0xbf, 0xea, 0xb0,
|
||||
0x23, 0x86, 0xf6, 0xa1, 0x2d, 0x36, 0x28, 0xee, 0x5c, 0x96, 0x46, 0xcd, 0xd0, 0x3d, 0x60, 0x4a,
|
||||
0xa1, 0xe5, 0xe4, 0x17, 0x8c, 0x5a, 0x43, 0x32, 0x6a, 0xb2, 0x50, 0xd3, 0x18, 0x9a, 0xa8, 0xf2,
|
||||
0xe8, 0x64, 0xd8, 0x1c, 0x75, 0xa7, 0xbd, 0xca, 0xf3, 0xb5, 0xca, 0xaf, 0x95, 0xb7, 0x5b, 0x56,
|
||||
0x34, 0xe3, 0xa7, 0xd0, 0xae, 0x88, 0x42, 0x43, 0xd5, 0xb6, 0x42, 0x4d, 0x1f, 0xc0, 0x49, 0xce,
|
||||
0x93, 0x0c, 0x4b, 0x3b, 0x7b, 0x10, 0xcf, 0xa1, 0x57, 0xc7, 0x73, 0x46, 0x2b, 0x87, 0xf4, 0x02,
|
||||
0x3a, 0x69, 0xc9, 0xb9, 0x88, 0x0c, 0x9b, 0xa3, 0x0e, 0xab, 0x89, 0x22, 0x9b, 0xd3, 0x99, 0x15,
|
||||
0xf8, 0x61, 0x6b, 0x2a, 0xb1, 0x23, 0x26, 0x7e, 0x06, 0x94, 0xa1, 0xd1, 0x4e, 0x7a, 0x6d, 0xb7,
|
||||
0x07, 0xcd, 0x21, 0x74, 0xa5, 0x5b, 0x64, 0xc6, 0x68, 0xeb, 0x71, 0x19, 0x8c, 0xb5, 0xd9, 0x31,
|
||||
0x15, 0x5f, 0x40, 0xab, 0xf8, 0x08, 0x85, 0x4f, 0xb1, 0xc9, 0xd4, 0x5d, 0x98, 0x39, 0x67, 0x7b,
|
||||
0x30, 0xfd, 0x4e, 0xe0, 0xf2, 0x95, 0x56, 0x2b, 0xb9, 0x9e, 0x71, 0xc5, 0xd7, 0x98, 0xa2, 0xf2,
|
||||
0xf3, 0x70, 0x0c, 0x0b, 0xb4, 0xb9, 0x14, 0x48, 0xdf, 0x42, 0xef, 0x0d, 0x2a, 0xb4, 0xdc, 0x63,
|
||||
0x95, 0x88, 0x46, 0xd5, 0x51, 0xfd, 0x79, 0x8b, 0xfa, 0xd1, 0xfd, 0x3b, 0xb3, 0x77, 0x1a, 0x37,
|
||||
0x46, 0x84, 0xbe, 0x83, 0xff, 0x67, 0xdc, 0x8b, 0x4d, 0x1d, 0xe4, 0x1f, 0x52, 0xfd, 0xaa, 0x73,
|
||||
0x3f, 0x76, 0x21, 0x76, 0xf5, 0xe2, 0xc7, 0x6e, 0x40, 0x7e, 0xee, 0x06, 0xe4, 0xd7, 0x6e, 0x40,
|
||||
0x3e, 0x4e, 0xd7, 0xd2, 0x6f, 0xb2, 0xdb, 0xb1, 0xd0, 0xe9, 0x84, 0xdb, 0xb5, 0x36, 0x56, 0x7f,
|
||||
0x0a, 0xc5, 0x63, 0xb1, 0x9c, 0xe4, 0xd3, 0x49, 0xfd, 0x32, 0xb8, 0x91, 0x22, 0x91, 0xa8, 0xfc,
|
||||
0xed, 0x69, 0x78, 0x16, 0x4f, 0x7e, 0x07, 0x00, 0x00, 0xff, 0xff, 0x83, 0x01, 0x5e, 0x48, 0x37,
|
||||
0x03, 0x00, 0x00,
|
||||
// 576 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x94, 0xdd, 0x6e, 0x12, 0x4f,
|
||||
0x14, 0xc0, 0xbb, 0x85, 0xb6, 0x70, 0x68, 0xf2, 0x27, 0x93, 0x7f, 0x74, 0x25, 0x2d, 0xe2, 0x5e,
|
||||
0x18, 0x6e, 0x84, 0x04, 0xbd, 0x35, 0xb1, 0x55, 0x6c, 0xa3, 0xc1, 0x90, 0xa9, 0x37, 0x7a, 0x37,
|
||||
0x1d, 0x0e, 0x30, 0x76, 0x77, 0x66, 0x9c, 0x99, 0xdd, 0x04, 0xbd, 0xf1, 0x3d, 0x7c, 0x00, 0x5f,
|
||||
0xc5, 0x4b, 0x1f, 0xc1, 0xf4, 0x49, 0x0c, 0xb3, 0xbb, 0x40, 0x6c, 0x8b, 0x57, 0x7b, 0x3e, 0x7f,
|
||||
0x7b, 0xbe, 0x32, 0x70, 0xcc, 0x13, 0x6d, 0xd1, 0x64, 0x68, 0xfa, 0x3a, 0x4e, 0x67, 0x42, 0x16,
|
||||
0x9f, 0x9e, 0x36, 0xca, 0x29, 0xb2, 0x9f, 0x6b, 0xad, 0xe1, 0x4c, 0xb8, 0x79, 0x7a, 0xd9, 0xe3,
|
||||
0x2a, 0xe9, 0x33, 0x33, 0x53, 0xda, 0xa8, 0x4f, 0x5e, 0x78, 0xc2, 0x27, 0xfd, 0x6c, 0xd0, 0x37,
|
||||
0xa8, 0x55, 0x81, 0xf1, 0xa2, 0x70, 0xca, 0x2c, 0x36, 0xc4, 0x1c, 0x17, 0x7d, 0x0b, 0xa0, 0x79,
|
||||
0xa2, 0xf5, 0x85, 0x33, 0xc8, 0x12, 0x8a, 0x9f, 0x53, 0xb4, 0x8e, 0x3c, 0x87, 0x5a, 0x82, 0x8e,
|
||||
0x4d, 0x98, 0x63, 0x61, 0xd0, 0x09, 0xba, 0x8d, 0xc1, 0xc3, 0x5e, 0x51, 0xc4, 0x88, 0x49, 0x31,
|
||||
0x45, 0xeb, 0x8a, 0xd0, 0x51, 0x11, 0x76, 0xbe, 0x43, 0x57, 0x29, 0x24, 0x82, 0xea, 0x54, 0xc4,
|
||||
0x18, 0xee, 0xfa, 0xd4, 0xc3, 0x32, 0xf5, 0xb5, 0x88, 0xf1, 0x7c, 0x87, 0x7a, 0xdf, 0x69, 0x1d,
|
||||
0x0e, 0x4c, 0x8e, 0x88, 0x7e, 0x04, 0x70, 0xff, 0x0e, 0x2c, 0x09, 0xe1, 0x80, 0x69, 0xfd, 0x8e,
|
||||
0x25, 0xe8, 0x0b, 0xa9, 0xd3, 0x52, 0x25, 0x6d, 0x00, 0xa6, 0x35, 0xc5, 0x78, 0xcc, 0xdc, 0xdc,
|
||||
0xff, 0xaa, 0x4e, 0x37, 0x2c, 0xa4, 0x05, 0x35, 0x3e, 0x47, 0x7e, 0x65, 0xd3, 0x24, 0xac, 0x78,
|
||||
0xef, 0x4a, 0x27, 0x04, 0xaa, 0x56, 0x7c, 0xc1, 0xb0, 0xda, 0x09, 0xba, 0x15, 0xea, 0x65, 0x12,
|
||||
0x41, 0x05, 0x65, 0x16, 0xee, 0x75, 0x2a, 0xdd, 0xc6, 0xa0, 0x59, 0xd6, 0x3c, 0x94, 0xd9, 0x50,
|
||||
0x3a, 0xb3, 0xa0, 0x4b, 0x67, 0xf4, 0x0c, 0x6a, 0xa5, 0x61, 0xc9, 0x90, 0xeb, 0xb2, 0xbc, 0x4c,
|
||||
0xfe, 0x87, 0xbd, 0x8c, 0xc5, 0x29, 0x16, 0xe5, 0xe4, 0x4a, 0x34, 0x86, 0xe6, 0xba, 0x3d, 0xab,
|
||||
0x95, 0xb4, 0x48, 0x8e, 0xa0, 0x9e, 0x14, 0x36, 0x1b, 0x06, 0x9d, 0x4a, 0xb7, 0x4e, 0xd7, 0x86,
|
||||
0x65, 0x6f, 0x56, 0xa5, 0x86, 0xe3, 0xfb, 0x85, 0x2e, 0x61, 0x1b, 0x96, 0x68, 0x0a, 0x84, 0xae,
|
||||
0x16, 0xb9, 0x62, 0x76, 0xa0, 0x21, 0xec, 0x45, 0xaa, 0xb5, 0x32, 0x0e, 0x27, 0xbe, 0xb0, 0x1a,
|
||||
0xdd, 0x34, 0x91, 0x1e, 0x10, 0x61, 0x5f, 0x09, 0xcb, 0x55, 0x86, 0x66, 0x31, 0x94, 0xec, 0x32,
|
||||
0xc6, 0x89, 0xe7, 0xd7, 0xe8, 0x2d, 0x9e, 0xe8, 0x2b, 0xb4, 0xc7, 0xcc, 0xb0, 0x04, 0x1d, 0x1a,
|
||||
0x7b, 0x22, 0xa5, 0x4a, 0x25, 0xc7, 0x04, 0xe5, 0xba, 0x8f, 0x0f, 0x70, 0x4f, 0x97, 0x11, 0x9b,
|
||||
0x01, 0x79, 0x53, 0x8d, 0xc1, 0xa3, 0xde, 0xc6, 0xc5, 0x8d, 0x6f, 0x8b, 0xa4, 0x77, 0x00, 0xa2,
|
||||
0x23, 0xa8, 0x2e, 0x2f, 0x66, 0x39, 0x54, 0x3e, 0x4f, 0xe5, 0x95, 0x6f, 0xe8, 0x90, 0xe6, 0xca,
|
||||
0xe0, 0xfb, 0x2e, 0x1c, 0xbf, 0x54, 0x72, 0x2a, 0x66, 0x23, 0x26, 0xd9, 0xcc, 0xe7, 0x8c, 0xfd,
|
||||
0xce, 0x2e, 0xd0, 0x64, 0x82, 0x23, 0x79, 0x03, 0xcd, 0x33, 0x94, 0x68, 0x98, 0xc3, 0x72, 0xfc,
|
||||
0x24, 0x2c, 0xf7, 0xfa, 0xf7, 0xc9, 0xb7, 0xc2, 0x9b, 0x07, 0x9e, 0xb7, 0x18, 0xed, 0x74, 0x03,
|
||||
0xf2, 0x16, 0xfe, 0x1b, 0x31, 0xc7, 0xe7, 0xeb, 0xa9, 0x6f, 0x41, 0xb5, 0x4a, 0xcf, 0xcd, 0x1d,
|
||||
0x79, 0x18, 0x83, 0x07, 0x67, 0xe8, 0x6e, 0x1f, 0xec, 0x16, 0xec, 0xe3, 0xd2, 0xb3, 0x7d, 0x25,
|
||||
0xcb, 0x5f, 0x9c, 0xbe, 0xf8, 0x79, 0xdd, 0x0e, 0x7e, 0x5d, 0xb7, 0x83, 0xdf, 0xd7, 0xed, 0xe0,
|
||||
0xe3, 0xe0, 0x1f, 0x4f, 0xc5, 0xfa, 0xc1, 0x61, 0x5a, 0xf0, 0x58, 0xa0, 0x74, 0x97, 0xfb, 0xfe,
|
||||
0x79, 0x78, 0xfa, 0x27, 0x00, 0x00, 0xff, 0xff, 0x23, 0x88, 0x8e, 0xd3, 0x8e, 0x04, 0x00, 0x00,
|
||||
}
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
@@ -472,6 +536,8 @@ type ConfigManagementPluginServiceClient interface {
|
||||
GenerateManifest(ctx context.Context, opts ...grpc.CallOption) (ConfigManagementPluginService_GenerateManifestClient, error)
|
||||
// MatchRepository returns whether or not the given application is supported by the plugin
|
||||
MatchRepository(ctx context.Context, opts ...grpc.CallOption) (ConfigManagementPluginService_MatchRepositoryClient, error)
|
||||
// GetParametersAnnouncement gets a list of parameter announcements for the given app
|
||||
GetParametersAnnouncement(ctx context.Context, opts ...grpc.CallOption) (ConfigManagementPluginService_GetParametersAnnouncementClient, error)
|
||||
}
|
||||
|
||||
type configManagementPluginServiceClient struct {
|
||||
@@ -550,6 +616,40 @@ func (x *configManagementPluginServiceMatchRepositoryClient) CloseAndRecv() (*Re
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (c *configManagementPluginServiceClient) GetParametersAnnouncement(ctx context.Context, opts ...grpc.CallOption) (ConfigManagementPluginService_GetParametersAnnouncementClient, error) {
|
||||
stream, err := c.cc.NewStream(ctx, &_ConfigManagementPluginService_serviceDesc.Streams[2], "/plugin.ConfigManagementPluginService/GetParametersAnnouncement", opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
x := &configManagementPluginServiceGetParametersAnnouncementClient{stream}
|
||||
return x, nil
|
||||
}
|
||||
|
||||
type ConfigManagementPluginService_GetParametersAnnouncementClient interface {
|
||||
Send(*AppStreamRequest) error
|
||||
CloseAndRecv() (*ParametersAnnouncementResponse, error)
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
type configManagementPluginServiceGetParametersAnnouncementClient struct {
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
func (x *configManagementPluginServiceGetParametersAnnouncementClient) Send(m *AppStreamRequest) error {
|
||||
return x.ClientStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func (x *configManagementPluginServiceGetParametersAnnouncementClient) CloseAndRecv() (*ParametersAnnouncementResponse, error) {
|
||||
if err := x.ClientStream.CloseSend(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m := new(ParametersAnnouncementResponse)
|
||||
if err := x.ClientStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// ConfigManagementPluginServiceServer is the server API for ConfigManagementPluginService service.
|
||||
type ConfigManagementPluginServiceServer interface {
|
||||
// GenerateManifests receive a stream containing a tgz archive with all required files necessary
|
||||
@@ -557,6 +657,8 @@ type ConfigManagementPluginServiceServer interface {
|
||||
GenerateManifest(ConfigManagementPluginService_GenerateManifestServer) error
|
||||
// MatchRepository returns whether or not the given application is supported by the plugin
|
||||
MatchRepository(ConfigManagementPluginService_MatchRepositoryServer) error
|
||||
// GetParametersAnnouncement gets a list of parameter announcements for the given app
|
||||
GetParametersAnnouncement(ConfigManagementPluginService_GetParametersAnnouncementServer) error
|
||||
}
|
||||
|
||||
// UnimplementedConfigManagementPluginServiceServer can be embedded to have forward compatible implementations.
|
||||
@@ -569,6 +671,9 @@ func (*UnimplementedConfigManagementPluginServiceServer) GenerateManifest(srv Co
|
||||
func (*UnimplementedConfigManagementPluginServiceServer) MatchRepository(srv ConfigManagementPluginService_MatchRepositoryServer) error {
|
||||
return status.Errorf(codes.Unimplemented, "method MatchRepository not implemented")
|
||||
}
|
||||
func (*UnimplementedConfigManagementPluginServiceServer) GetParametersAnnouncement(srv ConfigManagementPluginService_GetParametersAnnouncementServer) error {
|
||||
return status.Errorf(codes.Unimplemented, "method GetParametersAnnouncement not implemented")
|
||||
}
|
||||
|
||||
func RegisterConfigManagementPluginServiceServer(s *grpc.Server, srv ConfigManagementPluginServiceServer) {
|
||||
s.RegisterService(&_ConfigManagementPluginService_serviceDesc, srv)
|
||||
@@ -626,6 +731,32 @@ func (x *configManagementPluginServiceMatchRepositoryServer) Recv() (*AppStreamR
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func _ConfigManagementPluginService_GetParametersAnnouncement_Handler(srv interface{}, stream grpc.ServerStream) error {
|
||||
return srv.(ConfigManagementPluginServiceServer).GetParametersAnnouncement(&configManagementPluginServiceGetParametersAnnouncementServer{stream})
|
||||
}
|
||||
|
||||
type ConfigManagementPluginService_GetParametersAnnouncementServer interface {
|
||||
SendAndClose(*ParametersAnnouncementResponse) error
|
||||
Recv() (*AppStreamRequest, error)
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
type configManagementPluginServiceGetParametersAnnouncementServer struct {
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
func (x *configManagementPluginServiceGetParametersAnnouncementServer) SendAndClose(m *ParametersAnnouncementResponse) error {
|
||||
return x.ServerStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func (x *configManagementPluginServiceGetParametersAnnouncementServer) Recv() (*AppStreamRequest, error) {
|
||||
m := new(AppStreamRequest)
|
||||
if err := x.ServerStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
var _ConfigManagementPluginService_serviceDesc = grpc.ServiceDesc{
|
||||
ServiceName: "plugin.ConfigManagementPluginService",
|
||||
HandlerType: (*ConfigManagementPluginServiceServer)(nil),
|
||||
@@ -641,6 +772,11 @@ var _ConfigManagementPluginService_serviceDesc = grpc.ServiceDesc{
|
||||
Handler: _ConfigManagementPluginService_MatchRepository_Handler,
|
||||
ClientStreams: true,
|
||||
},
|
||||
{
|
||||
StreamName: "GetParametersAnnouncement",
|
||||
Handler: _ConfigManagementPluginService_GetParametersAnnouncement_Handler,
|
||||
ClientStreams: true,
|
||||
},
|
||||
},
|
||||
Metadata: "cmpserver/plugin/plugin.proto",
|
||||
}
|
||||
@@ -898,6 +1034,16 @@ func (m *RepositoryResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i -= len(m.XXX_unrecognized)
|
||||
copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
if m.IsDiscoveryEnabled {
|
||||
i--
|
||||
if m.IsDiscoveryEnabled {
|
||||
dAtA[i] = 1
|
||||
} else {
|
||||
dAtA[i] = 0
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0x10
|
||||
}
|
||||
if m.IsSupported {
|
||||
i--
|
||||
if m.IsSupported {
|
||||
@@ -911,6 +1057,47 @@ func (m *RepositoryResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func (m *ParametersAnnouncementResponse) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *ParametersAnnouncementResponse) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *ParametersAnnouncementResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.XXX_unrecognized != nil {
|
||||
i -= len(m.XXX_unrecognized)
|
||||
copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
if len(m.ParameterAnnouncements) > 0 {
|
||||
for iNdEx := len(m.ParameterAnnouncements) - 1; iNdEx >= 0; iNdEx-- {
|
||||
{
|
||||
size, err := m.ParameterAnnouncements[iNdEx].MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintPlugin(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0xa
|
||||
}
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func (m *File) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
@@ -1079,6 +1266,27 @@ func (m *RepositoryResponse) Size() (n int) {
|
||||
if m.IsSupported {
|
||||
n += 2
|
||||
}
|
||||
if m.IsDiscoveryEnabled {
|
||||
n += 2
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *ParametersAnnouncementResponse) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.ParameterAnnouncements) > 0 {
|
||||
for _, e := range m.ParameterAnnouncements {
|
||||
l = e.Size()
|
||||
n += 1 + l + sovPlugin(uint64(l))
|
||||
}
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
@@ -1707,6 +1915,111 @@ func (m *RepositoryResponse) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
}
|
||||
m.IsSupported = bool(v != 0)
|
||||
case 2:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field IsDiscoveryEnabled", wireType)
|
||||
}
|
||||
var v int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowPlugin
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
v |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
m.IsDiscoveryEnabled = bool(v != 0)
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipPlugin(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthPlugin
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *ParametersAnnouncementResponse) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowPlugin
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: ParametersAnnouncementResponse: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: ParametersAnnouncementResponse: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field ParameterAnnouncements", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowPlugin
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthPlugin
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthPlugin
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.ParameterAnnouncements = append(m.ParameterAnnouncements, &apiclient.ParameterAnnouncement{})
|
||||
if err := m.ParameterAnnouncements[len(m.ParameterAnnouncements)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipPlugin(dAtA[iNdEx:])
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/common"
|
||||
"github.com/argoproj/argo-cd/v2/reposerver/apiclient"
|
||||
configUtil "github.com/argoproj/argo-cd/v2/util/config"
|
||||
)
|
||||
|
||||
@@ -21,10 +22,11 @@ type PluginConfig struct {
|
||||
}
|
||||
|
||||
type PluginConfigSpec struct {
|
||||
Version string `json:"version"`
|
||||
Init Command `json:"init,omitempty"`
|
||||
Generate Command `json:"generate"`
|
||||
Discover Discover `json:"discover"`
|
||||
Version string `json:"version"`
|
||||
Init Command `json:"init,omitempty"`
|
||||
Generate Command `json:"generate"`
|
||||
Discover Discover `json:"discover"`
|
||||
Parameters Parameters `yaml:"parameters"`
|
||||
}
|
||||
|
||||
//Discover holds find and fileName
|
||||
@@ -45,6 +47,17 @@ type Find struct {
|
||||
Glob string `json:"glob"`
|
||||
}
|
||||
|
||||
// Parameters holds static and dynamic configurations
|
||||
type Parameters struct {
|
||||
Static []*apiclient.ParameterAnnouncement `yaml:"static"`
|
||||
Dynamic Command `yaml:"dynamic"`
|
||||
}
|
||||
|
||||
// Dynamic hold the dynamic announcements for CMP's
|
||||
type Dynamic struct {
|
||||
Command
|
||||
}
|
||||
|
||||
func ReadPluginConfig(filePath string) (*PluginConfig, error) {
|
||||
path := fmt.Sprintf("%s/%s", strings.TrimRight(filePath, "/"), common.PluginConfigFileName)
|
||||
|
||||
@@ -71,9 +84,7 @@ func ValidatePluginConfig(config PluginConfig) error {
|
||||
if len(config.Spec.Generate.Command) == 0 {
|
||||
return fmt.Errorf("invalid plugin configuration file. spec.generate command should be non-empty")
|
||||
}
|
||||
if config.Spec.Discover.Find.Glob == "" && len(config.Spec.Discover.Find.Command.Command) == 0 && config.Spec.Discover.FileName == "" {
|
||||
return fmt.Errorf("invalid plugin configuration file. atleast one of discover.find.command or discover.find.glob or discover.fineName should be non-empty")
|
||||
}
|
||||
// discovery field is optional as apps can now specify plugin names directly
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -3,17 +3,22 @@ package plugin
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode"
|
||||
|
||||
"github.com/argoproj/pkg/rand"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/cmpserver/apiclient"
|
||||
"github.com/argoproj/argo-cd/v2/common"
|
||||
repoclient "github.com/argoproj/argo-cd/v2/reposerver/apiclient"
|
||||
"github.com/argoproj/argo-cd/v2/util/buffered_context"
|
||||
"github.com/argoproj/argo-cd/v2/util/cmp"
|
||||
"github.com/argoproj/argo-cd/v2/util/io/files"
|
||||
@@ -21,8 +26,6 @@ import (
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
"github.com/mattn/go-zglob"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/cmpserver/apiclient"
|
||||
)
|
||||
|
||||
// cmpTimeoutBuffer is the amount of time before the request deadline to timeout server-side work. It makes sure there's
|
||||
@@ -45,15 +48,14 @@ func NewService(initConstants CMPServerInitConstants) *Service {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) Init() error {
|
||||
workDir := common.GetCMPWorkDir()
|
||||
func (s *Service) Init(workDir string) error {
|
||||
err := os.RemoveAll(workDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error removing workdir %q: %s", workDir, err)
|
||||
return fmt.Errorf("error removing workdir %q: %w", workDir, err)
|
||||
}
|
||||
err = os.MkdirAll(workDir, 0700)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating workdir %q: %s", workDir, err)
|
||||
return fmt.Errorf("error creating workdir %q: %w", workDir, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -73,9 +75,8 @@ func runCommand(ctx context.Context, command Command, path string, env []string)
|
||||
}
|
||||
logCtx := log.WithFields(log.Fields{"execID": execId})
|
||||
|
||||
// log in a way we can copy-and-paste into a terminal
|
||||
args := strings.Join(cmd.Args, " ")
|
||||
logCtx.WithFields(log.Fields{"dir": cmd.Dir}).Info(args)
|
||||
argsToLog := getCommandArgsToLog(cmd)
|
||||
logCtx.WithFields(log.Fields{"dir": cmd.Dir}).Info(argsToLog)
|
||||
|
||||
var stdout bytes.Buffer
|
||||
var stderr bytes.Buffer
|
||||
@@ -106,7 +107,7 @@ func runCommand(ctx context.Context, command Command, path string, env []string)
|
||||
logCtx.WithFields(log.Fields{"duration": duration}).Debug(output)
|
||||
|
||||
if err != nil {
|
||||
err := newCmdError(args, errors.New(err.Error()), strings.TrimSpace(stderr.String()))
|
||||
err := newCmdError(argsToLog, errors.New(err.Error()), strings.TrimSpace(stderr.String()))
|
||||
logCtx.Error(err.Error())
|
||||
return strings.TrimSuffix(output, "\n"), err
|
||||
}
|
||||
@@ -114,6 +115,28 @@ func runCommand(ctx context.Context, command Command, path string, env []string)
|
||||
return strings.TrimSuffix(output, "\n"), nil
|
||||
}
|
||||
|
||||
// getCommandArgsToLog represents the given command in a way that we can copy-and-paste into a terminal
|
||||
func getCommandArgsToLog(cmd *exec.Cmd) string {
|
||||
var argsToLog []string
|
||||
for _, arg := range cmd.Args {
|
||||
containsSpace := false
|
||||
for _, r := range arg {
|
||||
if unicode.IsSpace(r) {
|
||||
containsSpace = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if containsSpace {
|
||||
// add quotes and escape any internal quotes
|
||||
argsToLog = append(argsToLog, strconv.Quote(arg))
|
||||
} else {
|
||||
argsToLog = append(argsToLog, arg)
|
||||
}
|
||||
}
|
||||
args := strings.Join(argsToLog, " ")
|
||||
return args
|
||||
}
|
||||
|
||||
type CmdError struct {
|
||||
Args string
|
||||
Stderr string
|
||||
@@ -143,24 +166,50 @@ func environ(envVars []*apiclient.EnvEntry) []string {
|
||||
return environ
|
||||
}
|
||||
|
||||
// getTempDirMustCleanup creates a temporary directory and returns a cleanup function.
|
||||
func getTempDirMustCleanup(baseDir string) (workDir string, cleanup func(), err error) {
|
||||
workDir, err = files.CreateTempDir(baseDir)
|
||||
if err != nil {
|
||||
return "", nil, fmt.Errorf("error creating temp dir: %w", err)
|
||||
}
|
||||
cleanup = func() {
|
||||
if err := os.RemoveAll(workDir); err != nil {
|
||||
log.WithFields(map[string]interface{}{
|
||||
common.SecurityField: common.SecurityHigh,
|
||||
common.SecurityCWEField: 459,
|
||||
}).Errorf("Failed to clean up temp directory: %s", err)
|
||||
}
|
||||
}
|
||||
return workDir, cleanup, nil
|
||||
}
|
||||
|
||||
type Stream interface {
|
||||
Recv() (*apiclient.AppStreamRequest, error)
|
||||
Context() context.Context
|
||||
}
|
||||
|
||||
type GenerateManifestStream interface {
|
||||
Stream
|
||||
SendAndClose(response *apiclient.ManifestResponse) error
|
||||
}
|
||||
|
||||
// GenerateManifest runs generate command from plugin config file and returns generated manifest files
|
||||
func (s *Service) GenerateManifest(stream apiclient.ConfigManagementPluginService_GenerateManifestServer) error {
|
||||
return s.generateManifestGeneric(stream)
|
||||
}
|
||||
|
||||
func (s *Service) generateManifestGeneric(stream GenerateManifestStream) error {
|
||||
ctx, cancel := buffered_context.WithEarlierDeadline(stream.Context(), cmpTimeoutBuffer)
|
||||
defer cancel()
|
||||
workDir, err := files.CreateTempDir(common.GetCMPWorkDir())
|
||||
workDir, cleanup, err := getTempDirMustCleanup(common.GetCMPWorkDir())
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating temp dir: %s", err)
|
||||
return fmt.Errorf("error creating workdir for manifest generation: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := os.RemoveAll(workDir); err != nil {
|
||||
// we panic here as the workDir may contain sensitive information
|
||||
panic(fmt.Sprintf("error removing generate manifest workdir: %s", err))
|
||||
}
|
||||
}()
|
||||
defer cleanup()
|
||||
|
||||
metadata, err := cmp.ReceiveRepoStream(ctx, stream, workDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("generate manifest error receiving stream: %s", err)
|
||||
return fmt.Errorf("generate manifest error receiving stream: %w", err)
|
||||
}
|
||||
|
||||
appPath := filepath.Clean(filepath.Join(workDir, metadata.AppRelPath))
|
||||
@@ -169,11 +218,11 @@ func (s *Service) GenerateManifest(stream apiclient.ConfigManagementPluginServic
|
||||
}
|
||||
response, err := s.generateManifest(ctx, appPath, metadata.GetEnv())
|
||||
if err != nil {
|
||||
return fmt.Errorf("error generating manifests: %s", err)
|
||||
return fmt.Errorf("error generating manifests: %w", err)
|
||||
}
|
||||
err = stream.SendAndClose(response)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error sending manifest response: %s", err)
|
||||
return fmt.Errorf("error sending manifest response: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -203,6 +252,11 @@ func (s *Service) generateManifest(ctx context.Context, appDir string, envEntrie
|
||||
|
||||
manifests, err := kube.SplitYAMLToString([]byte(out))
|
||||
if err != nil {
|
||||
sanitizedManifests := manifests
|
||||
if len(sanitizedManifests) > 1000 {
|
||||
sanitizedManifests = manifests[:1000]
|
||||
}
|
||||
log.Debugf("Failed to split generated manifests. Beginning of generated manifests: %q", sanitizedManifests)
|
||||
return &apiclient.ManifestResponse{}, err
|
||||
}
|
||||
|
||||
@@ -211,58 +265,63 @@ func (s *Service) generateManifest(ctx context.Context, appDir string, envEntrie
|
||||
}, err
|
||||
}
|
||||
|
||||
type MatchRepositoryStream interface {
|
||||
Stream
|
||||
SendAndClose(response *apiclient.RepositoryResponse) error
|
||||
}
|
||||
|
||||
// MatchRepository receives the application stream and checks whether
|
||||
// its repository type is supported by the config management plugin
|
||||
// server.
|
||||
//The checks are implemented in the following order:
|
||||
// 1. If spec.Discover.FileName is provided it finds for a name match in Applications files
|
||||
// 2. If spec.Discover.Find.Glob is provided if finds for a glob match in Applications files
|
||||
// 3. Otherwise it runs the spec.Discover.Find.Command
|
||||
// The checks are implemented in the following order:
|
||||
// 1. If spec.Discover.FileName is provided it finds for a name match in Applications files
|
||||
// 2. If spec.Discover.Find.Glob is provided if finds for a glob match in Applications files
|
||||
// 3. Otherwise it runs the spec.Discover.Find.Command
|
||||
func (s *Service) MatchRepository(stream apiclient.ConfigManagementPluginService_MatchRepositoryServer) error {
|
||||
return s.matchRepositoryGeneric(stream)
|
||||
}
|
||||
|
||||
func (s *Service) matchRepositoryGeneric(stream MatchRepositoryStream) error {
|
||||
bufferedCtx, cancel := buffered_context.WithEarlierDeadline(stream.Context(), cmpTimeoutBuffer)
|
||||
defer cancel()
|
||||
|
||||
workDir, err := files.CreateTempDir(common.GetCMPWorkDir())
|
||||
workDir, cleanup, err := getTempDirMustCleanup(common.GetCMPWorkDir())
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating match repository workdir: %s", err)
|
||||
return fmt.Errorf("error creating workdir for repository matching: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := os.RemoveAll(workDir); err != nil {
|
||||
// we panic here as the workDir may contain sensitive information
|
||||
panic(fmt.Sprintf("error removing match repository workdir: %s", err))
|
||||
}
|
||||
}()
|
||||
defer cleanup()
|
||||
|
||||
metadata, err := cmp.ReceiveRepoStream(bufferedCtx, stream, workDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("match repository error receiving stream: %s", err)
|
||||
return fmt.Errorf("match repository error receiving stream: %w", err)
|
||||
}
|
||||
|
||||
isSupported, err := s.matchRepository(bufferedCtx, workDir, metadata.GetEnv())
|
||||
isSupported, isDiscoveryEnabled, err := s.matchRepository(bufferedCtx, workDir, metadata.GetEnv())
|
||||
if err != nil {
|
||||
return fmt.Errorf("match repository error: %s", err)
|
||||
return fmt.Errorf("match repository error: %w", err)
|
||||
}
|
||||
repoResponse := &apiclient.RepositoryResponse{IsSupported: isSupported}
|
||||
repoResponse := &apiclient.RepositoryResponse{IsSupported: isSupported, IsDiscoveryEnabled: isDiscoveryEnabled}
|
||||
|
||||
err = stream.SendAndClose(repoResponse)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error sending match repository response: %s", err)
|
||||
return fmt.Errorf("error sending match repository response: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) matchRepository(ctx context.Context, workdir string, envEntries []*apiclient.EnvEntry) (bool, error) {
|
||||
func (s *Service) matchRepository(ctx context.Context, workdir string, envEntries []*apiclient.EnvEntry) (isSupported bool, isDiscoveryEnabled bool, err error) {
|
||||
config := s.initConstants.PluginConfig
|
||||
|
||||
if config.Spec.Discover.FileName != "" {
|
||||
log.Debugf("config.Spec.Discover.FileName is provided")
|
||||
pattern := filepath.Join(workdir, config.Spec.Discover.FileName)
|
||||
matches, err := filepath.Glob(pattern)
|
||||
if err != nil {
|
||||
e := fmt.Errorf("error finding filename match for pattern %q: %s", pattern, err)
|
||||
e := fmt.Errorf("error finding filename match for pattern %q: %w", pattern, err)
|
||||
log.Debug(e)
|
||||
return false, e
|
||||
return false, true, e
|
||||
}
|
||||
return len(matches) > 0, nil
|
||||
return len(matches) > 0, true, nil
|
||||
}
|
||||
|
||||
if config.Spec.Discover.Find.Glob != "" {
|
||||
@@ -272,27 +331,86 @@ func (s *Service) matchRepository(ctx context.Context, workdir string, envEntrie
|
||||
// https://github.com/golang/go/issues/11862
|
||||
matches, err := zglob.Glob(pattern)
|
||||
if err != nil {
|
||||
e := fmt.Errorf("error finding glob match for pattern %q: %s", pattern, err)
|
||||
e := fmt.Errorf("error finding glob match for pattern %q: %w", pattern, err)
|
||||
log.Debug(e)
|
||||
return false, e
|
||||
return false, true, e
|
||||
}
|
||||
|
||||
if len(matches) > 0 {
|
||||
return true, nil
|
||||
return len(matches) > 0, true, nil
|
||||
}
|
||||
|
||||
if len(config.Spec.Discover.Find.Command.Command) > 0 {
|
||||
log.Debugf("Going to try runCommand.")
|
||||
env := append(os.Environ(), environ(envEntries)...)
|
||||
find, err := runCommand(ctx, config.Spec.Discover.Find.Command, workdir, env)
|
||||
if err != nil {
|
||||
return false, true, fmt.Errorf("error running find command: %w", err)
|
||||
}
|
||||
return false, nil
|
||||
return find != "", true, nil
|
||||
}
|
||||
|
||||
log.Debugf("Going to try runCommand.")
|
||||
env := append(os.Environ(), environ(envEntries)...)
|
||||
|
||||
find, err := runCommand(ctx, config.Spec.Discover.Find.Command, workdir, env)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("error running find command: %s", err)
|
||||
}
|
||||
|
||||
if find != "" {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
return false, false, nil
|
||||
}
|
||||
|
||||
// ParametersAnnouncementStream defines an interface able to send/receive a stream of parameter announcements.
|
||||
type ParametersAnnouncementStream interface {
|
||||
Stream
|
||||
SendAndClose(response *apiclient.ParametersAnnouncementResponse) error
|
||||
}
|
||||
|
||||
// GetParametersAnnouncement gets parameter announcements for a given Application and repo contents.
|
||||
func (s *Service) GetParametersAnnouncement(stream apiclient.ConfigManagementPluginService_GetParametersAnnouncementServer) error {
|
||||
bufferedCtx, cancel := buffered_context.WithEarlierDeadline(stream.Context(), cmpTimeoutBuffer)
|
||||
defer cancel()
|
||||
|
||||
workDir, cleanup, err := getTempDirMustCleanup(common.GetCMPWorkDir())
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating workdir for generating parameter announcements: %w", err)
|
||||
}
|
||||
defer cleanup()
|
||||
|
||||
metadata, err := cmp.ReceiveRepoStream(bufferedCtx, stream, workDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parameters announcement error receiving stream: %w", err)
|
||||
}
|
||||
appPath := filepath.Clean(filepath.Join(workDir, metadata.AppRelPath))
|
||||
if !strings.HasPrefix(appPath, workDir) {
|
||||
return fmt.Errorf("illegal appPath: out of workDir bound")
|
||||
}
|
||||
|
||||
repoResponse, err := getParametersAnnouncement(bufferedCtx, appPath, s.initConstants.PluginConfig.Spec.Parameters.Static, s.initConstants.PluginConfig.Spec.Parameters.Dynamic)
|
||||
if err != nil {
|
||||
return fmt.Errorf("get parameters announcement error: %w", err)
|
||||
}
|
||||
|
||||
err = stream.SendAndClose(repoResponse)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error sending parameters announcement response: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getParametersAnnouncement(ctx context.Context, appDir string, announcements []*repoclient.ParameterAnnouncement, command Command) (*apiclient.ParametersAnnouncementResponse, error) {
|
||||
augmentedAnnouncements := announcements
|
||||
|
||||
if len(command.Command) > 0 {
|
||||
stdout, err := runCommand(ctx, command, appDir, os.Environ())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error executing dynamic parameter output command: %w", err)
|
||||
}
|
||||
|
||||
var dynamicParamAnnouncements []*repoclient.ParameterAnnouncement
|
||||
err = json.Unmarshal([]byte(stdout), &dynamicParamAnnouncements)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error unmarshaling dynamic parameter output into ParametersAnnouncementResponse: %w", err)
|
||||
}
|
||||
|
||||
// dynamic goes first, because static should take precedence by being later.
|
||||
augmentedAnnouncements = append(dynamicParamAnnouncements, announcements...)
|
||||
}
|
||||
|
||||
repoResponse := &apiclient.ParametersAnnouncementResponse{
|
||||
ParameterAnnouncements: augmentedAnnouncements,
|
||||
}
|
||||
return repoResponse, nil
|
||||
}
|
||||
|
||||
@@ -3,6 +3,8 @@ option go_package = "github.com/argoproj/argo-cd/v2/cmpserver/apiclient";
|
||||
|
||||
package plugin;
|
||||
|
||||
import "github.com/argoproj/argo-cd/v2/reposerver/repository/repository.proto";
|
||||
|
||||
// AppStreamRequest is the request object used to send the application's
|
||||
// files over a stream.
|
||||
message AppStreamRequest {
|
||||
@@ -42,6 +44,13 @@ message ManifestResponse {
|
||||
|
||||
message RepositoryResponse {
|
||||
bool isSupported = 1;
|
||||
bool isDiscoveryEnabled = 2;
|
||||
}
|
||||
|
||||
// ParametersAnnouncementResponse contains a list of announcements. This list represents all the parameters which a CMP
|
||||
// is able to accept.
|
||||
message ParametersAnnouncementResponse {
|
||||
repeated repository.ParameterAnnouncement parameterAnnouncements = 1;
|
||||
}
|
||||
|
||||
message File {
|
||||
@@ -58,4 +67,8 @@ service ConfigManagementPluginService {
|
||||
// MatchRepository returns whether or not the given application is supported by the plugin
|
||||
rpc MatchRepository(stream AppStreamRequest) returns (RepositoryResponse) {
|
||||
}
|
||||
|
||||
// GetParametersAnnouncement gets a list of parameter announcements for the given app
|
||||
rpc GetParametersAnnouncement(stream AppStreamRequest) returns (ParametersAnnouncementResponse) {
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,17 +1,28 @@
|
||||
package plugin
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"gopkg.in/yaml.v2"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/cmpserver/apiclient"
|
||||
repoclient "github.com/argoproj/argo-cd/v2/reposerver/apiclient"
|
||||
"github.com/argoproj/argo-cd/v2/test"
|
||||
"github.com/argoproj/argo-cd/v2/util/cmp"
|
||||
"github.com/argoproj/argo-cd/v2/util/tgzstream"
|
||||
)
|
||||
|
||||
func newService(configFilePath string) (*Service, error) {
|
||||
@@ -30,6 +41,11 @@ func newService(configFilePath string) (*Service, error) {
|
||||
return service, nil
|
||||
}
|
||||
|
||||
func (s *Service) WithGenerateCommand(command Command) *Service {
|
||||
s.initConstants.PluginConfig.Spec.Generate = command
|
||||
return s
|
||||
}
|
||||
|
||||
type pluginOpt func(*CMPServerInitConstants)
|
||||
|
||||
func withDiscover(d Discover) pluginOpt {
|
||||
@@ -84,11 +100,12 @@ func TestMatchRepository(t *testing.T) {
|
||||
f := setup(t, withDiscover(d))
|
||||
|
||||
// when
|
||||
match, err := f.service.matchRepository(context.Background(), f.path, f.env)
|
||||
match, discovery, err := f.service.matchRepository(context.Background(), f.path, f.env)
|
||||
|
||||
// then
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, match)
|
||||
assert.True(t, discovery)
|
||||
})
|
||||
t.Run("will not match plugin by filename if file not found", func(t *testing.T) {
|
||||
// given
|
||||
@@ -98,11 +115,25 @@ func TestMatchRepository(t *testing.T) {
|
||||
f := setup(t, withDiscover(d))
|
||||
|
||||
// when
|
||||
match, err := f.service.matchRepository(context.Background(), f.path, f.env)
|
||||
match, discovery, err := f.service.matchRepository(context.Background(), f.path, f.env)
|
||||
|
||||
// then
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, match)
|
||||
assert.True(t, discovery)
|
||||
})
|
||||
t.Run("will not match a pattern with a syntax error", func(t *testing.T) {
|
||||
// given
|
||||
d := Discover{
|
||||
FileName: "[",
|
||||
}
|
||||
f := setup(t, withDiscover(d))
|
||||
|
||||
// when
|
||||
_, _, err := f.service.matchRepository(context.Background(), f.path, f.env)
|
||||
|
||||
// then
|
||||
assert.ErrorContains(t, err, "syntax error")
|
||||
})
|
||||
t.Run("will match plugin by glob", func(t *testing.T) {
|
||||
// given
|
||||
@@ -114,11 +145,12 @@ func TestMatchRepository(t *testing.T) {
|
||||
f := setup(t, withDiscover(d))
|
||||
|
||||
// when
|
||||
match, err := f.service.matchRepository(context.Background(), f.path, f.env)
|
||||
match, discovery, err := f.service.matchRepository(context.Background(), f.path, f.env)
|
||||
|
||||
// then
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, match)
|
||||
assert.True(t, discovery)
|
||||
})
|
||||
t.Run("will not match plugin by glob if not found", func(t *testing.T) {
|
||||
// given
|
||||
@@ -130,11 +162,27 @@ func TestMatchRepository(t *testing.T) {
|
||||
f := setup(t, withDiscover(d))
|
||||
|
||||
// when
|
||||
match, err := f.service.matchRepository(context.Background(), f.path, f.env)
|
||||
match, discovery, err := f.service.matchRepository(context.Background(), f.path, f.env)
|
||||
|
||||
// then
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, match)
|
||||
assert.True(t, discovery)
|
||||
})
|
||||
t.Run("will throw an error for a bad pattern", func(t *testing.T) {
|
||||
// given
|
||||
d := Discover{
|
||||
Find: Find{
|
||||
Glob: "does-not-exist",
|
||||
},
|
||||
}
|
||||
f := setup(t, withDiscover(d))
|
||||
|
||||
// when
|
||||
_, _, err := f.service.matchRepository(context.Background(), f.path, f.env)
|
||||
|
||||
// then
|
||||
assert.ErrorContains(t, err, "error finding glob match for pattern")
|
||||
})
|
||||
t.Run("will match plugin by command when returns any output", func(t *testing.T) {
|
||||
// given
|
||||
@@ -148,11 +196,12 @@ func TestMatchRepository(t *testing.T) {
|
||||
f := setup(t, withDiscover(d))
|
||||
|
||||
// when
|
||||
match, err := f.service.matchRepository(context.Background(), f.path, f.env)
|
||||
match, discovery, err := f.service.matchRepository(context.Background(), f.path, f.env)
|
||||
|
||||
// then
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, match)
|
||||
assert.True(t, discovery)
|
||||
})
|
||||
t.Run("will not match plugin by command when returns no output", func(t *testing.T) {
|
||||
// given
|
||||
@@ -166,11 +215,11 @@ func TestMatchRepository(t *testing.T) {
|
||||
f := setup(t, withDiscover(d))
|
||||
|
||||
// when
|
||||
match, err := f.service.matchRepository(context.Background(), f.path, f.env)
|
||||
|
||||
match, discovery, err := f.service.matchRepository(context.Background(), f.path, f.env)
|
||||
// then
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, match)
|
||||
assert.True(t, discovery)
|
||||
})
|
||||
t.Run("will match plugin because env var defined", func(t *testing.T) {
|
||||
// given
|
||||
@@ -184,11 +233,12 @@ func TestMatchRepository(t *testing.T) {
|
||||
f := setup(t, withDiscover(d))
|
||||
|
||||
// when
|
||||
match, err := f.service.matchRepository(context.Background(), f.path, f.env)
|
||||
match, discovery, err := f.service.matchRepository(context.Background(), f.path, f.env)
|
||||
|
||||
// then
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, match)
|
||||
assert.True(t, discovery)
|
||||
})
|
||||
t.Run("will not match plugin because no env var defined", func(t *testing.T) {
|
||||
// given
|
||||
@@ -203,11 +253,12 @@ func TestMatchRepository(t *testing.T) {
|
||||
f := setup(t, withDiscover(d))
|
||||
|
||||
// when
|
||||
match, err := f.service.matchRepository(context.Background(), f.path, f.env)
|
||||
match, discovery, err := f.service.matchRepository(context.Background(), f.path, f.env)
|
||||
|
||||
// then
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, match)
|
||||
assert.True(t, discovery)
|
||||
})
|
||||
t.Run("will not match plugin by command when command fails", func(t *testing.T) {
|
||||
// given
|
||||
@@ -221,11 +272,25 @@ func TestMatchRepository(t *testing.T) {
|
||||
f := setup(t, withDiscover(d))
|
||||
|
||||
// when
|
||||
match, err := f.service.matchRepository(context.Background(), f.path, f.env)
|
||||
match, discovery, err := f.service.matchRepository(context.Background(), f.path, f.env)
|
||||
|
||||
// then
|
||||
assert.Error(t, err)
|
||||
assert.False(t, match)
|
||||
assert.True(t, discovery)
|
||||
})
|
||||
t.Run("will not match plugin as discovery is not set", func(t *testing.T) {
|
||||
// given
|
||||
d := Discover{}
|
||||
f := setup(t, withDiscover(d))
|
||||
|
||||
// when
|
||||
match, discovery, err := f.service.matchRepository(context.Background(), f.path, f.env)
|
||||
|
||||
// then
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, match)
|
||||
assert.False(t, discovery)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -238,17 +303,49 @@ func Test_Negative_ConfigFile_DoesnotExist(t *testing.T) {
|
||||
|
||||
func TestGenerateManifest(t *testing.T) {
|
||||
configFilePath := "./testdata/kustomize/config"
|
||||
|
||||
t.Run("successful generate", func(t *testing.T) {
|
||||
service, err := newService(configFilePath)
|
||||
require.NoError(t, err)
|
||||
|
||||
res1, err := service.generateManifest(context.Background(), "testdata/kustomize", nil)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, res1)
|
||||
|
||||
expectedOutput := "{\"apiVersion\":\"v1\",\"data\":{\"foo\":\"bar\"},\"kind\":\"ConfigMap\",\"metadata\":{\"name\":\"my-map\"}}"
|
||||
if res1 != nil {
|
||||
require.Equal(t, expectedOutput, res1.Manifests[0])
|
||||
}
|
||||
})
|
||||
t.Run("bad generate command", func(t *testing.T) {
|
||||
service, err := newService(configFilePath)
|
||||
require.NoError(t, err)
|
||||
service.WithGenerateCommand(Command{Command: []string{"bad-command"}})
|
||||
|
||||
res, err := service.generateManifest(context.Background(), "testdata/kustomize", nil)
|
||||
assert.ErrorContains(t, err, "executable file not found")
|
||||
assert.Nil(t, res.Manifests)
|
||||
})
|
||||
t.Run("bad yaml output", func(t *testing.T) {
|
||||
service, err := newService(configFilePath)
|
||||
require.NoError(t, err)
|
||||
service.WithGenerateCommand(Command{Command: []string{"echo", "invalid yaml: }"}})
|
||||
|
||||
res, err := service.generateManifest(context.Background(), "testdata/kustomize", nil)
|
||||
assert.ErrorContains(t, err, "failed to unmarshal manifest")
|
||||
assert.Nil(t, res.Manifests)
|
||||
})
|
||||
}
|
||||
|
||||
func TestGenerateManifest_deadline_exceeded(t *testing.T) {
|
||||
configFilePath := "./testdata/kustomize/config"
|
||||
service, err := newService(configFilePath)
|
||||
require.NoError(t, err)
|
||||
|
||||
res1, err := service.generateManifest(context.Background(), "", nil)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, res1)
|
||||
|
||||
expectedOutput := "{\"apiVersion\":\"v1\",\"data\":{\"foo\":\"bar\"},\"kind\":\"ConfigMap\",\"metadata\":{\"name\":\"my-map\"}}"
|
||||
if res1 != nil {
|
||||
require.Equal(t, expectedOutput, res1.Manifests[0])
|
||||
}
|
||||
expiredCtx, cancel := context.WithTimeout(context.Background(), time.Second*0)
|
||||
defer cancel()
|
||||
_, err = service.generateManifest(expiredCtx, "", nil)
|
||||
assert.ErrorContains(t, err, "context deadline exceeded")
|
||||
}
|
||||
|
||||
// TestRunCommandContextTimeout makes sure the command dies at timeout rather than sleeping past the timeout.
|
||||
@@ -266,3 +363,415 @@ func TestRunCommandContextTimeout(t *testing.T) {
|
||||
assert.Error(t, err) // The command should time out, causing an error.
|
||||
assert.Less(t, after.Sub(before), 1*time.Second)
|
||||
}
|
||||
|
||||
func TestRunCommandEmptyCommand(t *testing.T) {
|
||||
_, err := runCommand(context.Background(), Command{}, "", nil)
|
||||
assert.ErrorContains(t, err, "Command is empty")
|
||||
}
|
||||
|
||||
func Test_getParametersAnnouncement_empty_command(t *testing.T) {
|
||||
staticYAML := `
|
||||
- name: static-a
|
||||
- name: static-b
|
||||
`
|
||||
static := &[]*repoclient.ParameterAnnouncement{}
|
||||
err := yaml.Unmarshal([]byte(staticYAML), static)
|
||||
require.NoError(t, err)
|
||||
command := Command{
|
||||
Command: []string{"echo"},
|
||||
Args: []string{`[]`},
|
||||
}
|
||||
res, err := getParametersAnnouncement(context.Background(), "", *static, command)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []*repoclient.ParameterAnnouncement{{Name: "static-a"}, {Name: "static-b"}}, res.ParameterAnnouncements)
|
||||
}
|
||||
|
||||
func Test_getParametersAnnouncement_no_command(t *testing.T) {
|
||||
staticYAML := `
|
||||
- name: static-a
|
||||
- name: static-b
|
||||
`
|
||||
static := &[]*repoclient.ParameterAnnouncement{}
|
||||
err := yaml.Unmarshal([]byte(staticYAML), static)
|
||||
require.NoError(t, err)
|
||||
command := Command{}
|
||||
res, err := getParametersAnnouncement(context.Background(), "", *static, command)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []*repoclient.ParameterAnnouncement{{Name: "static-a"}, {Name: "static-b"}}, res.ParameterAnnouncements)
|
||||
}
|
||||
|
||||
func Test_getParametersAnnouncement_static_and_dynamic(t *testing.T) {
|
||||
staticYAML := `
|
||||
- name: static-a
|
||||
- name: static-b
|
||||
`
|
||||
static := &[]*repoclient.ParameterAnnouncement{}
|
||||
err := yaml.Unmarshal([]byte(staticYAML), static)
|
||||
require.NoError(t, err)
|
||||
command := Command{
|
||||
Command: []string{"echo"},
|
||||
Args: []string{`[{"name": "dynamic-a"}, {"name": "dynamic-b"}]`},
|
||||
}
|
||||
res, err := getParametersAnnouncement(context.Background(), "", *static, command)
|
||||
require.NoError(t, err)
|
||||
expected := []*repoclient.ParameterAnnouncement{
|
||||
{Name: "dynamic-a"},
|
||||
{Name: "dynamic-b"},
|
||||
{Name: "static-a"},
|
||||
{Name: "static-b"},
|
||||
}
|
||||
assert.Equal(t, expected, res.ParameterAnnouncements)
|
||||
}
|
||||
|
||||
func Test_getParametersAnnouncement_invalid_json(t *testing.T) {
|
||||
command := Command{
|
||||
Command: []string{"echo"},
|
||||
Args: []string{`[`},
|
||||
}
|
||||
_, err := getParametersAnnouncement(context.Background(), "", []*repoclient.ParameterAnnouncement{}, command)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "unexpected end of JSON input")
|
||||
}
|
||||
|
||||
func Test_getParametersAnnouncement_bad_command(t *testing.T) {
|
||||
command := Command{
|
||||
Command: []string{"exit"},
|
||||
Args: []string{"1"},
|
||||
}
|
||||
_, err := getParametersAnnouncement(context.Background(), "", []*repoclient.ParameterAnnouncement{}, command)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "error executing dynamic parameter output command")
|
||||
}
|
||||
|
||||
func Test_getTempDirMustCleanup(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
|
||||
// Induce a directory create error to verify error handling.
|
||||
err := os.Chmod(tempDir, 0000)
|
||||
require.NoError(t, err)
|
||||
_, _, err = getTempDirMustCleanup(path.Join(tempDir, "test"))
|
||||
assert.ErrorContains(t, err, "error creating temp dir")
|
||||
|
||||
err = os.Chmod(tempDir, 0700)
|
||||
require.NoError(t, err)
|
||||
workDir, cleanup, err := getTempDirMustCleanup(tempDir)
|
||||
require.NoError(t, err)
|
||||
require.DirExists(t, workDir)
|
||||
cleanup()
|
||||
assert.NoDirExists(t, workDir)
|
||||
}
|
||||
|
||||
func TestService_Init(t *testing.T) {
|
||||
// Set up a base directory containing a test directory and a test file.
|
||||
tempDir := t.TempDir()
|
||||
workDir := path.Join(tempDir, "workDir")
|
||||
err := os.MkdirAll(workDir, 0700)
|
||||
require.NoError(t, err)
|
||||
testfile := path.Join(workDir, "testfile")
|
||||
file, err := os.Create(testfile)
|
||||
require.NoError(t, err)
|
||||
err = file.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Make the base directory read-only so Init's cleanup fails.
|
||||
err = os.Chmod(tempDir, 0000)
|
||||
require.NoError(t, err)
|
||||
s := NewService(CMPServerInitConstants{PluginConfig: PluginConfig{}})
|
||||
err = s.Init(workDir)
|
||||
assert.ErrorContains(t, err, "error removing workdir", "Init must throw an error if it can't remove the work directory")
|
||||
|
||||
// Make the base directory writable so Init's cleanup succeeds.
|
||||
err = os.Chmod(tempDir, 0700)
|
||||
require.NoError(t, err)
|
||||
err = s.Init(workDir)
|
||||
assert.NoError(t, err)
|
||||
assert.DirExists(t, workDir)
|
||||
assert.NoFileExists(t, testfile)
|
||||
}
|
||||
|
||||
func TestEnviron(t *testing.T) {
|
||||
t.Run("empty environ", func(t *testing.T) {
|
||||
env := environ([]*apiclient.EnvEntry{})
|
||||
assert.Nil(t, env)
|
||||
})
|
||||
t.Run("env vars with empty names or values", func(t *testing.T) {
|
||||
env := environ([]*apiclient.EnvEntry{
|
||||
{Value: "test"},
|
||||
{Name: "test"},
|
||||
})
|
||||
assert.Nil(t, env)
|
||||
})
|
||||
t.Run("proper env vars", func(t *testing.T) {
|
||||
env := environ([]*apiclient.EnvEntry{
|
||||
{Name: "name1", Value: "value1"},
|
||||
{Name: "name2", Value: "value2"},
|
||||
})
|
||||
assert.Equal(t, []string{"name1=value1", "name2=value2"}, env)
|
||||
})
|
||||
}
|
||||
|
||||
type MockGenerateManifestStream struct {
|
||||
metadataSent bool
|
||||
fileSent bool
|
||||
metadataRequest *apiclient.AppStreamRequest
|
||||
fileRequest *apiclient.AppStreamRequest
|
||||
response *apiclient.ManifestResponse
|
||||
}
|
||||
|
||||
func NewMockGenerateManifestStream(repoPath, appPath string, env []string) (*MockGenerateManifestStream, error) {
|
||||
tgz, mr, err := cmp.GetCompressedRepoAndMetadata(repoPath, appPath, env, nil, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer tgzstream.CloseAndDelete(tgz)
|
||||
|
||||
tgzBuffer := bytes.NewBuffer(nil)
|
||||
_, err = io.Copy(tgzBuffer, tgz)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to copy manifest targz to a byte buffer: %w", err)
|
||||
}
|
||||
|
||||
return &MockGenerateManifestStream{
|
||||
metadataRequest: mr,
|
||||
fileRequest: cmp.AppFileRequest(tgzBuffer.Bytes()),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (m *MockGenerateManifestStream) SendAndClose(response *apiclient.ManifestResponse) error {
|
||||
m.response = response
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MockGenerateManifestStream) Recv() (*apiclient.AppStreamRequest, error) {
|
||||
if !m.metadataSent {
|
||||
m.metadataSent = true
|
||||
return m.metadataRequest, nil
|
||||
}
|
||||
|
||||
if !m.fileSent {
|
||||
m.fileSent = true
|
||||
return m.fileRequest, nil
|
||||
}
|
||||
return nil, io.EOF
|
||||
}
|
||||
|
||||
func (m *MockGenerateManifestStream) Context() context.Context {
|
||||
return context.Background()
|
||||
}
|
||||
|
||||
func TestService_GenerateManifest(t *testing.T) {
|
||||
configFilePath := "./testdata/kustomize/config"
|
||||
service, err := newService(configFilePath)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("successful generate", func(t *testing.T) {
|
||||
s, err := NewMockGenerateManifestStream("./testdata/kustomize", "./testdata/kustomize", nil)
|
||||
require.NoError(t, err)
|
||||
err = service.generateManifestGeneric(s)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, s.response)
|
||||
assert.Equal(t, []string{"{\"apiVersion\":\"v1\",\"data\":{\"foo\":\"bar\"},\"kind\":\"ConfigMap\",\"metadata\":{\"name\":\"my-map\"}}"}, s.response.Manifests)
|
||||
})
|
||||
|
||||
t.Run("out-of-bounds app path", func(t *testing.T) {
|
||||
s, err := NewMockGenerateManifestStream("./testdata/kustomize", "./testdata/kustomize", nil)
|
||||
require.NoError(t, err)
|
||||
// set a malicious app path on the metadata
|
||||
s.metadataRequest.Request.(*apiclient.AppStreamRequest_Metadata).Metadata.AppRelPath = "../out-of-bounds"
|
||||
err = service.generateManifestGeneric(s)
|
||||
require.ErrorContains(t, err, "illegal appPath")
|
||||
assert.Nil(t, s.response)
|
||||
})
|
||||
}
|
||||
|
||||
type MockMatchRepositoryStream struct {
|
||||
metadataSent bool
|
||||
fileSent bool
|
||||
metadataRequest *apiclient.AppStreamRequest
|
||||
fileRequest *apiclient.AppStreamRequest
|
||||
response *apiclient.RepositoryResponse
|
||||
}
|
||||
|
||||
func NewMockMatchRepositoryStream(repoPath, appPath string, env []string) (*MockMatchRepositoryStream, error) {
|
||||
tgz, mr, err := cmp.GetCompressedRepoAndMetadata(repoPath, appPath, env, nil, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer tgzstream.CloseAndDelete(tgz)
|
||||
|
||||
tgzBuffer := bytes.NewBuffer(nil)
|
||||
_, err = io.Copy(tgzBuffer, tgz)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to copy manifest targz to a byte buffer: %w", err)
|
||||
}
|
||||
|
||||
return &MockMatchRepositoryStream{
|
||||
metadataRequest: mr,
|
||||
fileRequest: cmp.AppFileRequest(tgzBuffer.Bytes()),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (m *MockMatchRepositoryStream) SendAndClose(response *apiclient.RepositoryResponse) error {
|
||||
m.response = response
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MockMatchRepositoryStream) Recv() (*apiclient.AppStreamRequest, error) {
|
||||
if !m.metadataSent {
|
||||
m.metadataSent = true
|
||||
return m.metadataRequest, nil
|
||||
}
|
||||
|
||||
if !m.fileSent {
|
||||
m.fileSent = true
|
||||
return m.fileRequest, nil
|
||||
}
|
||||
return nil, io.EOF
|
||||
}
|
||||
|
||||
func (m *MockMatchRepositoryStream) Context() context.Context {
|
||||
return context.Background()
|
||||
}
|
||||
|
||||
func TestService_MatchRepository(t *testing.T) {
|
||||
configFilePath := "./testdata/kustomize/config"
|
||||
service, err := newService(configFilePath)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("supported app", func(t *testing.T) {
|
||||
s, err := NewMockMatchRepositoryStream("./testdata/kustomize", "./testdata/kustomize", nil)
|
||||
require.NoError(t, err)
|
||||
err = service.matchRepositoryGeneric(s)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, s.response)
|
||||
assert.True(t, s.response.IsSupported)
|
||||
})
|
||||
|
||||
t.Run("unsupported app", func(t *testing.T) {
|
||||
s, err := NewMockMatchRepositoryStream("./testdata/ksonnet", "./testdata/ksonnet", nil)
|
||||
require.NoError(t, err)
|
||||
err = service.matchRepositoryGeneric(s)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, s.response)
|
||||
assert.False(t, s.response.IsSupported)
|
||||
})
|
||||
}
|
||||
|
||||
type MockParametersAnnouncementStream struct {
|
||||
metadataSent bool
|
||||
fileSent bool
|
||||
metadataRequest *apiclient.AppStreamRequest
|
||||
fileRequest *apiclient.AppStreamRequest
|
||||
response *apiclient.ParametersAnnouncementResponse
|
||||
}
|
||||
|
||||
func NewMockParametersAnnouncementStream(repoPath, appPath string, env []string) (*MockParametersAnnouncementStream, error) {
|
||||
tgz, mr, err := cmp.GetCompressedRepoAndMetadata(repoPath, appPath, env, nil, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer tgzstream.CloseAndDelete(tgz)
|
||||
|
||||
tgzBuffer := bytes.NewBuffer(nil)
|
||||
_, err = io.Copy(tgzBuffer, tgz)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to copy manifest targz to a byte buffer: %w", err)
|
||||
}
|
||||
|
||||
return &MockParametersAnnouncementStream{
|
||||
metadataRequest: mr,
|
||||
fileRequest: cmp.AppFileRequest(tgzBuffer.Bytes()),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (m *MockParametersAnnouncementStream) SendAndClose(response *apiclient.ParametersAnnouncementResponse) error {
|
||||
m.response = response
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MockParametersAnnouncementStream) Recv() (*apiclient.AppStreamRequest, error) {
|
||||
if !m.metadataSent {
|
||||
m.metadataSent = true
|
||||
return m.metadataRequest, nil
|
||||
}
|
||||
|
||||
if !m.fileSent {
|
||||
m.fileSent = true
|
||||
return m.fileRequest, nil
|
||||
}
|
||||
return nil, io.EOF
|
||||
}
|
||||
|
||||
func (m *MockParametersAnnouncementStream) SetHeader(metadata.MD) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MockParametersAnnouncementStream) SendHeader(metadata.MD) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MockParametersAnnouncementStream) SetTrailer(metadata.MD) {}
|
||||
|
||||
func (m *MockParametersAnnouncementStream) Context() context.Context {
|
||||
return context.Background()
|
||||
}
|
||||
|
||||
func (m *MockParametersAnnouncementStream) SendMsg(interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MockParametersAnnouncementStream) RecvMsg(interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestService_GetParametersAnnouncement(t *testing.T) {
|
||||
configFilePath := "./testdata/kustomize/config"
|
||||
service, err := newService(configFilePath)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("successful response", func(t *testing.T) {
|
||||
s, err := NewMockParametersAnnouncementStream("./testdata/kustomize", "./testdata/kustomize", nil)
|
||||
require.NoError(t, err)
|
||||
err = service.GetParametersAnnouncement(s)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, s.response)
|
||||
require.Len(t, s.response.ParameterAnnouncements, 1)
|
||||
assert.Equal(t, repoclient.ParameterAnnouncement{Name: "test-param", String_: "test-value"}, *s.response.ParameterAnnouncements[0])
|
||||
})
|
||||
t.Run("out of bounds app", func(t *testing.T) {
|
||||
s, err := NewMockParametersAnnouncementStream("./testdata/kustomize", "./testdata/kustomize", nil)
|
||||
require.NoError(t, err)
|
||||
// set a malicious app path on the metadata
|
||||
s.metadataRequest.Request.(*apiclient.AppStreamRequest_Metadata).Metadata.AppRelPath = "../out-of-bounds"
|
||||
err = service.GetParametersAnnouncement(s)
|
||||
require.ErrorContains(t, err, "illegal appPath")
|
||||
require.Nil(t, s.response)
|
||||
})
|
||||
}
|
||||
|
||||
func Test_getCommandArgsToLog(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
args []string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "no spaces",
|
||||
args: []string{"sh", "-c", "cat"},
|
||||
expected: "sh -c cat",
|
||||
},
|
||||
{
|
||||
name: "spaces",
|
||||
args: []string{"sh", "-c", `echo "hello world"`},
|
||||
expected: `sh -c "echo \"hello world\""`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tcc := tc
|
||||
t.Run(tcc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
assert.Equal(t, tcc.expected, getCommandArgsToLog(exec.Command(tcc.args[0], tcc.args[1:]...)))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,8 +7,12 @@ spec:
|
||||
init:
|
||||
command: [kustomize, version]
|
||||
generate:
|
||||
command: [sh, -c, "cd testdata/kustomize && kustomize build"]
|
||||
command: [sh, -c, "kustomize build"]
|
||||
discover:
|
||||
find:
|
||||
command: [sh, -c, find . -name kustomization.yaml]
|
||||
glob: "**/*/kustomization.yaml"
|
||||
glob: "**/kustomization.yaml"
|
||||
parameters:
|
||||
static:
|
||||
- name: test-param
|
||||
string: test-value
|
||||
|
||||
@@ -108,7 +108,7 @@ func (a *ArgoCDCMPServer) CreateGRPC() (*grpc.Server, error) {
|
||||
return true, nil
|
||||
}))
|
||||
pluginService := plugin.NewService(a.initConstants)
|
||||
err := pluginService.Init()
|
||||
err := pluginService.Init(common.GetCMPWorkDir())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error initializing plugin service: %s", err)
|
||||
}
|
||||
|
||||
@@ -1,12 +1,15 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// Default service addresses and URLS of Argo CD internal services
|
||||
@@ -222,13 +225,7 @@ const (
|
||||
// DefaultCMPWorkDirName defines the work directory name used by the cmp-server
|
||||
DefaultCMPWorkDirName = "_cmp_server"
|
||||
|
||||
ConfigMapPluginDeprecationWarning = "argocd-cm plugins are deprecated, and support will be removed in v2.6. Upgrade your plugin to be installed via sidecar. https://argo-cd.readthedocs.io/en/stable/user-guide/config-management-plugins/"
|
||||
|
||||
ConfigMapPluginCLIDeprecationWarning = "spec.plugin.name is set, which means this Application uses a plugin installed in the " +
|
||||
"argocd-cm ConfigMap. Installing plugins via that ConfigMap is deprecated in Argo CD v2.5. " +
|
||||
"Starting in Argo CD v2.6, this Application will fail to sync. Contact your Argo CD admin " +
|
||||
"to make sure an upgrade plan is in place. More info: " +
|
||||
"https://argo-cd.readthedocs.io/en/latest/operator-manual/upgrading/2.4-2.5/"
|
||||
ConfigMapPluginDeprecationWarning = "argocd-cm plugins are deprecated, and support will be removed in v2.7. Upgrade your plugin to be installed via sidecar. https://argo-cd.readthedocs.io/en/stable/user-guide/config-management-plugins/"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -316,3 +313,10 @@ const (
|
||||
SecurityMedium = 2 // Could indicate malicious events, but has a high likelihood of being user/system error (i.e. access denied)
|
||||
SecurityLow = 1 // Unexceptional entries (i.e. successful access logs)
|
||||
)
|
||||
|
||||
// Common error messages
|
||||
const TokenVerificationError = "failed to verify the token"
|
||||
|
||||
var TokenVerificationErr = errors.New(TokenVerificationError)
|
||||
|
||||
var PermissionDeniedAPIError = status.Error(codes.PermissionDenied, "permission denied")
|
||||
|
||||
@@ -335,7 +335,7 @@ func (ctrl *ApplicationController) handleObjectUpdated(managedByApp map[string]b
|
||||
}
|
||||
|
||||
if !ctrl.canProcessApp(obj) {
|
||||
// Don't force refresh app if app belongs to a different controller shard
|
||||
// Don't force refresh app if app belongs to a different controller shard or is outside the allowed namespaces.
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -415,11 +415,11 @@ func isKnownOrphanedResourceExclusion(key kube.ResourceKey, proj *appv1.AppProje
|
||||
|
||||
func (ctrl *ApplicationController) getResourceTree(a *appv1.Application, managedResources []*appv1.ResourceDiff) (*appv1.ApplicationTree, error) {
|
||||
nodes := make([]appv1.ResourceNode, 0)
|
||||
|
||||
proj, err := ctrl.getAppProj(a)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
orphanedNodesMap := make(map[kube.ResourceKey]appv1.ResourceNode)
|
||||
warnOrphaned := true
|
||||
if proj.Spec.OrphanedResources != nil {
|
||||
@@ -429,7 +429,6 @@ func (ctrl *ApplicationController) getResourceTree(a *appv1.Application, managed
|
||||
}
|
||||
warnOrphaned = proj.Spec.OrphanedResources.IsWarn()
|
||||
}
|
||||
|
||||
for i := range managedResources {
|
||||
managedResource := managedResources[i]
|
||||
delete(orphanedNodesMap, kube.NewResourceKey(managedResource.Group, managedResource.Kind, managedResource.Namespace, managedResource.Name))
|
||||
@@ -751,6 +750,7 @@ func (ctrl *ApplicationController) Run(ctx context.Context, statusProcessors int
|
||||
// needs to be the qualified name of the application, i.e. <namespace>/<name>.
|
||||
func (ctrl *ApplicationController) requestAppRefresh(appName string, compareWith *CompareWith, after *time.Duration) {
|
||||
key := ctrl.toAppKey(appName)
|
||||
|
||||
if compareWith != nil && after != nil {
|
||||
ctrl.appComparisonTypeRefreshQueue.AddAfter(fmt.Sprintf("%s/%d", key, compareWith), *after)
|
||||
} else {
|
||||
@@ -1313,7 +1313,6 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo
|
||||
}
|
||||
ctrl.appRefreshQueue.Done(appKey)
|
||||
}()
|
||||
|
||||
obj, exists, err := ctrl.appInformer.GetIndexer().GetByKey(appKey.(string))
|
||||
if err != nil {
|
||||
log.Errorf("Failed to get application '%s' from informer index: %+v", appKey, err)
|
||||
@@ -1334,9 +1333,9 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo
|
||||
if !needRefresh {
|
||||
return
|
||||
}
|
||||
|
||||
app := origApp.DeepCopy()
|
||||
logCtx := log.WithFields(log.Fields{"application": app.QualifiedName()})
|
||||
|
||||
startTime := time.Now()
|
||||
defer func() {
|
||||
reconcileDuration := time.Since(startTime)
|
||||
@@ -1389,15 +1388,38 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo
|
||||
localManifests = opState.Operation.Sync.Manifests
|
||||
}
|
||||
|
||||
revision := app.Spec.Source.TargetRevision
|
||||
if comparisonLevel == CompareWithRecent {
|
||||
revision = app.Status.Sync.Revision
|
||||
}
|
||||
revisions := make([]string, 0)
|
||||
sources := make([]appv1.ApplicationSource, 0)
|
||||
|
||||
hasMultipleSources := app.Spec.HasMultipleSources()
|
||||
|
||||
// If we have multiple sources, we use all the sources under `sources` field and ignore source under `source` field.
|
||||
// else we use the source under the source field.
|
||||
if hasMultipleSources {
|
||||
for _, source := range app.Spec.Sources {
|
||||
// We do not perform any filtering of duplicate sources.
|
||||
// Argo CD will apply and update the resources generated from the sources automatically
|
||||
// based on the order in which manifests were generated
|
||||
sources = append(sources, source)
|
||||
revisions = append(revisions, source.TargetRevision)
|
||||
}
|
||||
if comparisonLevel == CompareWithRecent {
|
||||
revisions = app.Status.Sync.Revisions
|
||||
}
|
||||
} else {
|
||||
revision := app.Spec.GetSource().TargetRevision
|
||||
if comparisonLevel == CompareWithRecent {
|
||||
revision = app.Status.Sync.Revision
|
||||
}
|
||||
revisions = append(revisions, revision)
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
}
|
||||
now := metav1.Now()
|
||||
compareResult := ctrl.appStateManager.CompareAppState(app, project, revision, app.Spec.Source,
|
||||
|
||||
compareResult := ctrl.appStateManager.CompareAppState(app, project, revisions, sources,
|
||||
refreshType == appv1.RefreshTypeHard,
|
||||
comparisonLevel == CompareWithLatestForceResolve, localManifests)
|
||||
comparisonLevel == CompareWithLatestForceResolve, localManifests, hasMultipleSources)
|
||||
|
||||
for k, v := range compareResult.timings {
|
||||
logCtx = logCtx.WithField(k, v.Milliseconds())
|
||||
}
|
||||
@@ -1438,6 +1460,7 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo
|
||||
return resourceStatusKey(app.Status.Resources[i]) < resourceStatusKey(app.Status.Resources[j])
|
||||
})
|
||||
app.Status.SourceType = compareResult.appSourceType
|
||||
app.Status.SourceTypes = compareResult.appSourceTypes
|
||||
ctrl.persistAppStatus(origApp, &app.Status)
|
||||
return
|
||||
}
|
||||
@@ -1446,6 +1469,13 @@ func resourceStatusKey(res appv1.ResourceStatus) string {
|
||||
return strings.Join([]string{res.Group, res.Kind, res.Namespace, res.Name}, "/")
|
||||
}
|
||||
|
||||
func currentSourceEqualsSyncedSource(app *appv1.Application) bool {
|
||||
if app.Spec.HasMultipleSources() {
|
||||
return app.Spec.Sources.Equals(app.Status.Sync.ComparedTo.Sources)
|
||||
}
|
||||
return app.Spec.Source.Equals(app.Status.Sync.ComparedTo.Source)
|
||||
}
|
||||
|
||||
// needRefreshAppStatus answers if application status needs to be refreshed.
|
||||
// Returns true if application never been compared, has changed or comparison result has expired.
|
||||
// Additionally returns whether full refresh was requested or not.
|
||||
@@ -1463,27 +1493,32 @@ func (ctrl *ApplicationController) needRefreshAppStatus(app *appv1.Application,
|
||||
// user requested app refresh.
|
||||
refreshType = requestedType
|
||||
reason = fmt.Sprintf("%s refresh requested", refreshType)
|
||||
} else if !app.Spec.Source.Equals(app.Status.Sync.ComparedTo.Source) {
|
||||
reason = "spec.source differs"
|
||||
compareWith = CompareWithLatestForceResolve
|
||||
} else if hardExpired || softExpired {
|
||||
// The commented line below mysteriously crashes if app.Status.ReconciledAt is nil
|
||||
// reason = fmt.Sprintf("comparison expired. reconciledAt: %v, expiry: %v", app.Status.ReconciledAt, statusRefreshTimeout)
|
||||
//TODO: find existing Golang bug or create a new one
|
||||
reconciledAtStr := "never"
|
||||
if app.Status.ReconciledAt != nil {
|
||||
reconciledAtStr = app.Status.ReconciledAt.String()
|
||||
} else {
|
||||
if !currentSourceEqualsSyncedSource(app) {
|
||||
reason = "spec.source differs"
|
||||
compareWith = CompareWithLatestForceResolve
|
||||
if app.Spec.HasMultipleSources() {
|
||||
reason = "at least one of the spec.sources differs"
|
||||
}
|
||||
} else if hardExpired || softExpired {
|
||||
// The commented line below mysteriously crashes if app.Status.ReconciledAt is nil
|
||||
// reason = fmt.Sprintf("comparison expired. reconciledAt: %v, expiry: %v", app.Status.ReconciledAt, statusRefreshTimeout)
|
||||
//TODO: find existing Golang bug or create a new one
|
||||
reconciledAtStr := "never"
|
||||
if app.Status.ReconciledAt != nil {
|
||||
reconciledAtStr = app.Status.ReconciledAt.String()
|
||||
}
|
||||
reason = fmt.Sprintf("comparison expired, requesting refresh. reconciledAt: %v, expiry: %v", reconciledAtStr, statusRefreshTimeout)
|
||||
if hardExpired {
|
||||
reason = fmt.Sprintf("comparison expired, requesting hard refresh. reconciledAt: %v, expiry: %v", reconciledAtStr, statusHardRefreshTimeout)
|
||||
refreshType = appv1.RefreshTypeHard
|
||||
}
|
||||
} else if !app.Spec.Destination.Equals(app.Status.Sync.ComparedTo.Destination) {
|
||||
reason = "spec.destination differs"
|
||||
} else if requested, level := ctrl.isRefreshRequested(app.QualifiedName()); requested {
|
||||
compareWith = level
|
||||
reason = "controller refresh requested"
|
||||
}
|
||||
reason = fmt.Sprintf("comparison expired, requesting refresh. reconciledAt: %v, expiry: %v", reconciledAtStr, statusRefreshTimeout)
|
||||
if hardExpired {
|
||||
reason = fmt.Sprintf("comparison expired, requesting hard refresh. reconciledAt: %v, expiry: %v", reconciledAtStr, statusHardRefreshTimeout)
|
||||
refreshType = appv1.RefreshTypeHard
|
||||
}
|
||||
} else if !app.Spec.Destination.Equals(app.Status.Sync.ComparedTo.Destination) {
|
||||
reason = "spec.destination differs"
|
||||
} else if requested, level := ctrl.isRefreshRequested(app.QualifiedName()); requested {
|
||||
compareWith = level
|
||||
reason = "controller refresh requested"
|
||||
}
|
||||
|
||||
if reason != "" {
|
||||
@@ -1497,17 +1532,7 @@ func (ctrl *ApplicationController) refreshAppConditions(app *appv1.Application)
|
||||
errorConditions := make([]appv1.ApplicationCondition, 0)
|
||||
proj, err := ctrl.getAppProj(app)
|
||||
if err != nil {
|
||||
if apierr.IsNotFound(err) {
|
||||
errorConditions = append(errorConditions, appv1.ApplicationCondition{
|
||||
Type: appv1.ApplicationConditionInvalidSpecError,
|
||||
Message: fmt.Sprintf("Application referencing project %s which does not exist", app.Spec.Project),
|
||||
})
|
||||
} else {
|
||||
errorConditions = append(errorConditions, appv1.ApplicationCondition{
|
||||
Type: appv1.ApplicationConditionUnknownError,
|
||||
Message: err.Error(),
|
||||
})
|
||||
}
|
||||
errorConditions = append(errorConditions, ctrl.projectErrorToCondition(err, app))
|
||||
} else {
|
||||
specConditions, err := argo.ValidatePermissions(context.Background(), &app.Spec, proj, ctrl.db)
|
||||
if err != nil {
|
||||
@@ -1530,7 +1555,9 @@ func (ctrl *ApplicationController) refreshAppConditions(app *appv1.Application)
|
||||
func (ctrl *ApplicationController) normalizeApplication(orig, app *appv1.Application) {
|
||||
logCtx := log.WithFields(log.Fields{"application": app.QualifiedName()})
|
||||
app.Spec = *argo.NormalizeApplicationSpec(&app.Spec)
|
||||
|
||||
patch, modified, err := diff.CreateTwoWayMergePatch(orig, app, appv1.Application{})
|
||||
|
||||
if err != nil {
|
||||
logCtx.Errorf("error constructing app spec patch: %v", err)
|
||||
} else if modified {
|
||||
@@ -1574,7 +1601,6 @@ func (ctrl *ApplicationController) persistAppStatus(orig *appv1.Application, new
|
||||
logCtx.Infof("No status changes. Skipping patch")
|
||||
return
|
||||
}
|
||||
logCtx.Debugf("patch: %s", string(patch))
|
||||
appClient := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(orig.Namespace)
|
||||
_, err = appClient.Patch(context.Background(), orig.Name, types.MergePatchType, patch, metav1.PatchOptions{})
|
||||
if err != nil {
|
||||
@@ -1590,6 +1616,7 @@ func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus *
|
||||
return nil
|
||||
}
|
||||
logCtx := log.WithFields(log.Fields{"application": app.QualifiedName()})
|
||||
|
||||
if app.Operation != nil {
|
||||
logCtx.Infof("Skipping auto-sync: another operation is in progress")
|
||||
return nil
|
||||
@@ -1621,13 +1648,15 @@ func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus *
|
||||
}
|
||||
|
||||
desiredCommitSHA := syncStatus.Revision
|
||||
alreadyAttempted, attemptPhase := alreadyAttemptedSync(app, desiredCommitSHA)
|
||||
desiredCommitSHAsMS := syncStatus.Revisions
|
||||
alreadyAttempted, attemptPhase := alreadyAttemptedSync(app, desiredCommitSHA, desiredCommitSHAsMS, app.Spec.HasMultipleSources())
|
||||
selfHeal := app.Spec.SyncPolicy.Automated.SelfHeal
|
||||
op := appv1.Operation{
|
||||
Sync: &appv1.SyncOperation{
|
||||
Revision: desiredCommitSHA,
|
||||
Prune: app.Spec.SyncPolicy.Automated.Prune,
|
||||
SyncOptions: app.Spec.SyncPolicy.SyncOptions,
|
||||
Revisions: desiredCommitSHAsMS,
|
||||
},
|
||||
InitiatedBy: appv1.OperationInitiator{Automated: true},
|
||||
Retry: appv1.RetryStrategy{Limit: 5},
|
||||
@@ -1679,7 +1708,6 @@ func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus *
|
||||
return &appv1.ApplicationCondition{Type: appv1.ApplicationConditionSyncError, Message: message}
|
||||
}
|
||||
}
|
||||
|
||||
appIf := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(app.Namespace)
|
||||
_, err := argo.SetAppOperation(appIf, app.Name, &op)
|
||||
if err != nil {
|
||||
@@ -1694,20 +1722,41 @@ func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus *
|
||||
|
||||
// alreadyAttemptedSync returns whether or not the most recent sync was performed against the
|
||||
// commitSHA and with the same app source config which are currently set in the app
|
||||
func alreadyAttemptedSync(app *appv1.Application, commitSHA string) (bool, synccommon.OperationPhase) {
|
||||
func alreadyAttemptedSync(app *appv1.Application, commitSHA string, commitSHAsMS []string, hasMultipleSources bool) (bool, synccommon.OperationPhase) {
|
||||
if app.Status.OperationState == nil || app.Status.OperationState.Operation.Sync == nil || app.Status.OperationState.SyncResult == nil {
|
||||
return false, ""
|
||||
}
|
||||
if app.Status.OperationState.SyncResult.Revision != commitSHA {
|
||||
return false, ""
|
||||
if hasMultipleSources {
|
||||
if !reflect.DeepEqual(app.Status.OperationState.SyncResult.Revisions, commitSHAsMS) {
|
||||
return false, ""
|
||||
}
|
||||
} else {
|
||||
if app.Status.OperationState.SyncResult.Revision != commitSHA {
|
||||
return false, ""
|
||||
}
|
||||
}
|
||||
|
||||
if hasMultipleSources {
|
||||
// Ignore differences in target revision, since we already just verified commitSHAs are equal,
|
||||
// and we do not want to trigger auto-sync due to things like HEAD != master
|
||||
specSources := app.Spec.Sources.DeepCopy()
|
||||
syncSources := app.Status.OperationState.SyncResult.Sources.DeepCopy()
|
||||
for _, source := range specSources {
|
||||
source.TargetRevision = ""
|
||||
}
|
||||
for _, source := range syncSources {
|
||||
source.TargetRevision = ""
|
||||
}
|
||||
return reflect.DeepEqual(app.Spec.Sources, app.Status.OperationState.SyncResult.Sources), app.Status.OperationState.Phase
|
||||
} else {
|
||||
// Ignore differences in target revision, since we already just verified commitSHAs are equal,
|
||||
// and we do not want to trigger auto-sync due to things like HEAD != master
|
||||
specSource := app.Spec.Source.DeepCopy()
|
||||
specSource.TargetRevision = ""
|
||||
syncResSource := app.Status.OperationState.SyncResult.Source.DeepCopy()
|
||||
syncResSource.TargetRevision = ""
|
||||
return reflect.DeepEqual(app.Spec.GetSource(), app.Status.OperationState.SyncResult.Source), app.Status.OperationState.Phase
|
||||
}
|
||||
// Ignore differences in target revision, since we already just verified commitSHAs are equal,
|
||||
// and we do not want to trigger auto-sync due to things like HEAD != master
|
||||
specSource := app.Spec.Source.DeepCopy()
|
||||
specSource.TargetRevision = ""
|
||||
syncResSource := app.Status.OperationState.SyncResult.Source.DeepCopy()
|
||||
syncResSource.TargetRevision = ""
|
||||
return reflect.DeepEqual(app.Spec.Source, app.Status.OperationState.SyncResult.Source), app.Status.OperationState.Phase
|
||||
}
|
||||
|
||||
func (ctrl *ApplicationController) shouldSelfHeal(app *appv1.Application) (bool, time.Duration) {
|
||||
@@ -1729,6 +1778,13 @@ func (ctrl *ApplicationController) canProcessApp(obj interface{}) bool {
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
// Only process given app if it exists in a watched namespace, or in the
|
||||
// control plane's namespace.
|
||||
if app.Namespace != ctrl.namespace && !glob.MatchStringInList(ctrl.applicationNamespaces, app.Namespace, false) {
|
||||
return false
|
||||
}
|
||||
|
||||
if ctrl.clusterFilter != nil {
|
||||
cluster, err := ctrl.db.GetCluster(context.Background(), app.Spec.Destination.Server)
|
||||
if err != nil {
|
||||
@@ -1737,12 +1793,6 @@ func (ctrl *ApplicationController) canProcessApp(obj interface{}) bool {
|
||||
return ctrl.clusterFilter(cluster)
|
||||
}
|
||||
|
||||
// Only process given app if it exists in a watched namespace, or in the
|
||||
// control plane's namespace.
|
||||
if app.Namespace != ctrl.namespace && !glob.MatchStringInList(ctrl.applicationNamespaces, app.Namespace, false) {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -1798,7 +1848,7 @@ func (ctrl *ApplicationController) newApplicationInformerAndLister() (cache.Shar
|
||||
// If the application is not allowed to use the project,
|
||||
// log an error.
|
||||
if _, err := ctrl.getAppProj(app); err != nil {
|
||||
ctrl.setAppCondition(app, appv1.ApplicationCondition{Type: appv1.ApplicationConditionUnknownError, Message: err.Error()})
|
||||
ctrl.setAppCondition(app, ctrl.projectErrorToCondition(err, app))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1869,6 +1919,19 @@ func (ctrl *ApplicationController) newApplicationInformerAndLister() (cache.Shar
|
||||
return informer, lister
|
||||
}
|
||||
|
||||
func (ctrl *ApplicationController) projectErrorToCondition(err error, app *appv1.Application) appv1.ApplicationCondition {
|
||||
var condition appv1.ApplicationCondition
|
||||
if apierr.IsNotFound(err) {
|
||||
condition = appv1.ApplicationCondition{
|
||||
Type: appv1.ApplicationConditionInvalidSpecError,
|
||||
Message: fmt.Sprintf("Application referencing project %s which does not exist", app.Spec.Project),
|
||||
}
|
||||
} else {
|
||||
condition = appv1.ApplicationCondition{Type: appv1.ApplicationConditionUnknownError, Message: err.Error()}
|
||||
}
|
||||
return condition
|
||||
}
|
||||
|
||||
func (ctrl *ApplicationController) RegisterClusterSecretUpdater(ctx context.Context) {
|
||||
updater := NewClusterInfoUpdater(ctrl.stateCache, ctrl.db, ctrl.appLister.Applications(""), ctrl.cache, ctrl.clusterFilter, ctrl.getAppProj, ctrl.namespace)
|
||||
go updater.Run(ctx)
|
||||
|
||||
@@ -209,6 +209,55 @@ status:
|
||||
repoURL: https://github.com/argoproj/argocd-example-apps.git
|
||||
`
|
||||
|
||||
var fakeMultiSourceApp = `
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: Application
|
||||
metadata:
|
||||
uid: "123"
|
||||
name: my-app
|
||||
namespace: ` + test.FakeArgoCDNamespace + `
|
||||
spec:
|
||||
destination:
|
||||
namespace: ` + test.FakeDestNamespace + `
|
||||
server: https://localhost:6443
|
||||
project: default
|
||||
sources:
|
||||
- path: some/path
|
||||
repoURL: https://github.com/argoproj/argocd-example-apps.git
|
||||
- path: some/other/path
|
||||
repoURL: https://github.com/argoproj/argocd-example-apps-fake.git
|
||||
syncPolicy:
|
||||
automated: {}
|
||||
status:
|
||||
operationState:
|
||||
finishedAt: 2018-09-21T23:50:29Z
|
||||
message: successfully synced
|
||||
operation:
|
||||
sync:
|
||||
revisions:
|
||||
- HEAD
|
||||
- HEAD
|
||||
phase: Succeeded
|
||||
startedAt: 2018-09-21T23:50:25Z
|
||||
syncResult:
|
||||
resources:
|
||||
- kind: RoleBinding
|
||||
message: |-
|
||||
rolebinding.rbac.authorization.k8s.io/always-outofsync reconciled
|
||||
rolebinding.rbac.authorization.k8s.io/always-outofsync configured
|
||||
name: always-outofsync
|
||||
namespace: default
|
||||
status: Synced
|
||||
revisions:
|
||||
- aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||
- bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
|
||||
sources:
|
||||
- path: some/path
|
||||
repoURL: https://github.com/argoproj/argocd-example-apps.git
|
||||
- path: some/other/path
|
||||
repoURL: https://github.com/argoproj/argocd-example-apps-fake.git
|
||||
`
|
||||
|
||||
var fakeAppWithDestName = `
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: Application
|
||||
@@ -263,6 +312,10 @@ func newFakeApp() *argoappv1.Application {
|
||||
return createFakeApp(fakeApp)
|
||||
}
|
||||
|
||||
func newFakeMultiSourceApp() *argoappv1.Application {
|
||||
return createFakeApp(fakeMultiSourceApp)
|
||||
}
|
||||
|
||||
func newFakeAppWithDestMismatch() *argoappv1.Application {
|
||||
return createFakeApp(fakeAppWithDestMismatch)
|
||||
}
|
||||
@@ -857,101 +910,133 @@ func TestSetOperationStateOnDeletedApp(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNeedRefreshAppStatus(t *testing.T) {
|
||||
ctrl := newFakeController(&fakeData{apps: []runtime.Object{}})
|
||||
|
||||
app := newFakeApp()
|
||||
now := metav1.Now()
|
||||
app.Status.ReconciledAt = &now
|
||||
app.Status.Sync = argoappv1.SyncStatus{
|
||||
Status: argoappv1.SyncStatusCodeSynced,
|
||||
ComparedTo: argoappv1.ComparedTo{
|
||||
Source: app.Spec.Source,
|
||||
Destination: app.Spec.Destination,
|
||||
testCases := []struct {
|
||||
name string
|
||||
app *argoappv1.Application
|
||||
}{
|
||||
{
|
||||
name: "single-source app",
|
||||
app: newFakeApp(),
|
||||
},
|
||||
{
|
||||
name: "multi-source app",
|
||||
app: newFakeMultiSourceApp(),
|
||||
},
|
||||
}
|
||||
|
||||
// no need to refresh just reconciled application
|
||||
needRefresh, _, _ := ctrl.needRefreshAppStatus(app, 1*time.Hour, 2*time.Hour)
|
||||
assert.False(t, needRefresh)
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
ctrl := newFakeController(&fakeData{apps: []runtime.Object{}})
|
||||
app := tc.app
|
||||
now := metav1.Now()
|
||||
app.Status.ReconciledAt = &now
|
||||
|
||||
// refresh app using the 'deepest' requested comparison level
|
||||
ctrl.requestAppRefresh(app.Name, CompareWithRecent.Pointer(), nil)
|
||||
ctrl.requestAppRefresh(app.Name, ComparisonWithNothing.Pointer(), nil)
|
||||
app.Status.Sync = argoappv1.SyncStatus{
|
||||
Status: argoappv1.SyncStatusCodeSynced,
|
||||
ComparedTo: argoappv1.ComparedTo{
|
||||
Destination: app.Spec.Destination,
|
||||
},
|
||||
}
|
||||
|
||||
needRefresh, refreshType, compareWith := ctrl.needRefreshAppStatus(app, 1*time.Hour, 2*time.Hour)
|
||||
assert.True(t, needRefresh)
|
||||
assert.Equal(t, argoappv1.RefreshTypeNormal, refreshType)
|
||||
assert.Equal(t, CompareWithRecent, compareWith)
|
||||
if app.Spec.HasMultipleSources() {
|
||||
app.Status.Sync.ComparedTo.Sources = app.Spec.Sources
|
||||
} else {
|
||||
app.Status.Sync.ComparedTo.Source = app.Spec.GetSource()
|
||||
}
|
||||
|
||||
// refresh application which status is not reconciled using latest commit
|
||||
app.Status.Sync = argoappv1.SyncStatus{Status: argoappv1.SyncStatusCodeUnknown}
|
||||
// no need to refresh just reconciled application
|
||||
needRefresh, _, _ := ctrl.needRefreshAppStatus(app, 1*time.Hour, 2*time.Hour)
|
||||
assert.False(t, needRefresh)
|
||||
|
||||
needRefresh, refreshType, compareWith = ctrl.needRefreshAppStatus(app, 1*time.Hour, 2*time.Hour)
|
||||
assert.True(t, needRefresh)
|
||||
assert.Equal(t, argoappv1.RefreshTypeNormal, refreshType)
|
||||
assert.Equal(t, CompareWithLatestForceResolve, compareWith)
|
||||
// refresh app using the 'deepest' requested comparison level
|
||||
ctrl.requestAppRefresh(app.Name, CompareWithRecent.Pointer(), nil)
|
||||
ctrl.requestAppRefresh(app.Name, ComparisonWithNothing.Pointer(), nil)
|
||||
|
||||
{
|
||||
// refresh app using the 'latest' level if comparison expired
|
||||
app := app.DeepCopy()
|
||||
ctrl.requestAppRefresh(app.Name, CompareWithRecent.Pointer(), nil)
|
||||
reconciledAt := metav1.NewTime(time.Now().UTC().Add(-1 * time.Hour))
|
||||
app.Status.ReconciledAt = &reconciledAt
|
||||
needRefresh, refreshType, compareWith = ctrl.needRefreshAppStatus(app, 1*time.Minute, 2*time.Hour)
|
||||
assert.True(t, needRefresh)
|
||||
assert.Equal(t, argoappv1.RefreshTypeNormal, refreshType)
|
||||
assert.Equal(t, CompareWithLatestForceResolve, compareWith)
|
||||
}
|
||||
needRefresh, refreshType, compareWith := ctrl.needRefreshAppStatus(app, 1*time.Hour, 2*time.Hour)
|
||||
assert.True(t, needRefresh)
|
||||
assert.Equal(t, argoappv1.RefreshTypeNormal, refreshType)
|
||||
assert.Equal(t, CompareWithRecent, compareWith)
|
||||
|
||||
{
|
||||
// refresh app using the 'latest' level if comparison expired for hard refresh
|
||||
app := app.DeepCopy()
|
||||
app.Status.Sync = argoappv1.SyncStatus{
|
||||
Status: argoappv1.SyncStatusCodeSynced,
|
||||
ComparedTo: argoappv1.ComparedTo{
|
||||
Source: app.Spec.Source,
|
||||
Destination: app.Spec.Destination,
|
||||
},
|
||||
}
|
||||
ctrl.requestAppRefresh(app.Name, CompareWithRecent.Pointer(), nil)
|
||||
reconciledAt := metav1.NewTime(time.Now().UTC().Add(-1 * time.Hour))
|
||||
app.Status.ReconciledAt = &reconciledAt
|
||||
needRefresh, refreshType, compareWith = ctrl.needRefreshAppStatus(app, 2*time.Hour, 1*time.Minute)
|
||||
assert.True(t, needRefresh)
|
||||
assert.Equal(t, argoappv1.RefreshTypeHard, refreshType)
|
||||
assert.Equal(t, CompareWithLatest, compareWith)
|
||||
}
|
||||
// refresh application which status is not reconciled using latest commit
|
||||
app.Status.Sync = argoappv1.SyncStatus{Status: argoappv1.SyncStatusCodeUnknown}
|
||||
|
||||
{
|
||||
app := app.DeepCopy()
|
||||
// execute hard refresh if app has refresh annotation
|
||||
reconciledAt := metav1.NewTime(time.Now().UTC().Add(-1 * time.Hour))
|
||||
app.Status.ReconciledAt = &reconciledAt
|
||||
app.Annotations = map[string]string{
|
||||
v1alpha1.AnnotationKeyRefresh: string(argoappv1.RefreshTypeHard),
|
||||
}
|
||||
needRefresh, refreshType, compareWith = ctrl.needRefreshAppStatus(app, 1*time.Hour, 2*time.Hour)
|
||||
assert.True(t, needRefresh)
|
||||
assert.Equal(t, argoappv1.RefreshTypeHard, refreshType)
|
||||
assert.Equal(t, CompareWithLatestForceResolve, compareWith)
|
||||
}
|
||||
needRefresh, refreshType, compareWith = ctrl.needRefreshAppStatus(app, 1*time.Hour, 2*time.Hour)
|
||||
assert.True(t, needRefresh)
|
||||
assert.Equal(t, argoappv1.RefreshTypeNormal, refreshType)
|
||||
assert.Equal(t, CompareWithLatestForceResolve, compareWith)
|
||||
|
||||
{
|
||||
app := app.DeepCopy()
|
||||
// ensure that CompareWithLatest level is used if application source has changed
|
||||
ctrl.requestAppRefresh(app.Name, ComparisonWithNothing.Pointer(), nil)
|
||||
// sample app source change
|
||||
app.Spec.Source.Helm = &argoappv1.ApplicationSourceHelm{
|
||||
Parameters: []argoappv1.HelmParameter{{
|
||||
Name: "foo",
|
||||
Value: "bar",
|
||||
}},
|
||||
}
|
||||
t.Run("refresh app using the 'latest' level if comparison expired", func(t *testing.T) {
|
||||
app := app.DeepCopy()
|
||||
ctrl.requestAppRefresh(app.Name, CompareWithRecent.Pointer(), nil)
|
||||
reconciledAt := metav1.NewTime(time.Now().UTC().Add(-1 * time.Hour))
|
||||
app.Status.ReconciledAt = &reconciledAt
|
||||
needRefresh, refreshType, compareWith = ctrl.needRefreshAppStatus(app, 1*time.Minute, 2*time.Hour)
|
||||
assert.True(t, needRefresh)
|
||||
assert.Equal(t, argoappv1.RefreshTypeNormal, refreshType)
|
||||
assert.Equal(t, CompareWithLatestForceResolve, compareWith)
|
||||
})
|
||||
|
||||
needRefresh, refreshType, compareWith = ctrl.needRefreshAppStatus(app, 1*time.Hour, 2*time.Hour)
|
||||
assert.True(t, needRefresh)
|
||||
assert.Equal(t, argoappv1.RefreshTypeNormal, refreshType)
|
||||
assert.Equal(t, CompareWithLatestForceResolve, compareWith)
|
||||
t.Run("refresh app using the 'latest' level if comparison expired for hard refresh", func(t *testing.T) {
|
||||
app := app.DeepCopy()
|
||||
app.Status.Sync = argoappv1.SyncStatus{
|
||||
Status: argoappv1.SyncStatusCodeSynced,
|
||||
ComparedTo: argoappv1.ComparedTo{
|
||||
Destination: app.Spec.Destination,
|
||||
},
|
||||
}
|
||||
if app.Spec.HasMultipleSources() {
|
||||
app.Status.Sync.ComparedTo.Sources = app.Spec.Sources
|
||||
} else {
|
||||
app.Status.Sync.ComparedTo.Source = app.Spec.GetSource()
|
||||
}
|
||||
ctrl.requestAppRefresh(app.Name, CompareWithRecent.Pointer(), nil)
|
||||
reconciledAt := metav1.NewTime(time.Now().UTC().Add(-1 * time.Hour))
|
||||
app.Status.ReconciledAt = &reconciledAt
|
||||
needRefresh, refreshType, compareWith = ctrl.needRefreshAppStatus(app, 2*time.Hour, 1*time.Minute)
|
||||
assert.True(t, needRefresh)
|
||||
assert.Equal(t, argoappv1.RefreshTypeHard, refreshType)
|
||||
assert.Equal(t, CompareWithLatest, compareWith)
|
||||
})
|
||||
|
||||
t.Run("execute hard refresh if app has refresh annotation", func(t *testing.T) {
|
||||
app := app.DeepCopy()
|
||||
reconciledAt := metav1.NewTime(time.Now().UTC().Add(-1 * time.Hour))
|
||||
app.Status.ReconciledAt = &reconciledAt
|
||||
app.Annotations = map[string]string{
|
||||
v1alpha1.AnnotationKeyRefresh: string(argoappv1.RefreshTypeHard),
|
||||
}
|
||||
needRefresh, refreshType, compareWith = ctrl.needRefreshAppStatus(app, 1*time.Hour, 2*time.Hour)
|
||||
assert.True(t, needRefresh)
|
||||
assert.Equal(t, argoappv1.RefreshTypeHard, refreshType)
|
||||
assert.Equal(t, CompareWithLatestForceResolve, compareWith)
|
||||
})
|
||||
|
||||
t.Run("ensure that CompareWithLatest level is used if application source has changed", func(t *testing.T) {
|
||||
app := app.DeepCopy()
|
||||
ctrl.requestAppRefresh(app.Name, ComparisonWithNothing.Pointer(), nil)
|
||||
// sample app source change
|
||||
if app.Spec.HasMultipleSources() {
|
||||
app.Spec.Sources[0].Helm = &argoappv1.ApplicationSourceHelm{
|
||||
Parameters: []argoappv1.HelmParameter{{
|
||||
Name: "foo",
|
||||
Value: "bar",
|
||||
}},
|
||||
}
|
||||
} else {
|
||||
app.Spec.Source.Helm = &argoappv1.ApplicationSourceHelm{
|
||||
Parameters: []argoappv1.HelmParameter{{
|
||||
Name: "foo",
|
||||
Value: "bar",
|
||||
}},
|
||||
}
|
||||
}
|
||||
|
||||
needRefresh, refreshType, compareWith = ctrl.needRefreshAppStatus(app, 1*time.Hour, 2*time.Hour)
|
||||
assert.True(t, needRefresh)
|
||||
assert.Equal(t, argoappv1.RefreshTypeNormal, refreshType)
|
||||
assert.Equal(t, CompareWithLatestForceResolve, compareWith)
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1012,7 +1097,7 @@ func TestUpdateReconciledAt(t *testing.T) {
|
||||
app := newFakeApp()
|
||||
reconciledAt := metav1.NewTime(time.Now().Add(-1 * time.Second))
|
||||
app.Status = argoappv1.ApplicationStatus{ReconciledAt: &reconciledAt}
|
||||
app.Status.Sync = argoappv1.SyncStatus{ComparedTo: argoappv1.ComparedTo{Source: app.Spec.Source, Destination: app.Spec.Destination}}
|
||||
app.Status.Sync = argoappv1.SyncStatus{ComparedTo: argoappv1.ComparedTo{Source: app.Spec.GetSource(), Destination: app.Spec.Destination}}
|
||||
ctrl := newFakeController(&fakeData{
|
||||
apps: []runtime.Object{app, &defaultProj},
|
||||
manifestResponse: &apiclient.ManifestResponse{
|
||||
@@ -1068,6 +1153,34 @@ func TestUpdateReconciledAt(t *testing.T) {
|
||||
|
||||
}
|
||||
|
||||
func TestProjectErrorToCondition(t *testing.T) {
|
||||
app := newFakeApp()
|
||||
app.Spec.Project = "wrong project"
|
||||
ctrl := newFakeController(&fakeData{
|
||||
apps: []runtime.Object{app, &defaultProj},
|
||||
manifestResponse: &apiclient.ManifestResponse{
|
||||
Manifests: []string{},
|
||||
Namespace: test.FakeDestNamespace,
|
||||
Server: test.FakeClusterURL,
|
||||
Revision: "abc123",
|
||||
},
|
||||
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
|
||||
})
|
||||
key, _ := cache.MetaNamespaceKeyFunc(app)
|
||||
ctrl.appRefreshQueue.Add(key)
|
||||
ctrl.requestAppRefresh(app.Name, CompareWithRecent.Pointer(), nil)
|
||||
|
||||
ctrl.processAppRefreshQueueItem()
|
||||
|
||||
obj, ok, err := ctrl.appInformer.GetIndexer().GetByKey(key)
|
||||
assert.True(t, ok)
|
||||
assert.NoError(t, err)
|
||||
updatedApp := obj.(*argoappv1.Application)
|
||||
assert.Equal(t, argoappv1.ApplicationConditionInvalidSpecError, updatedApp.Status.Conditions[0].Type)
|
||||
assert.Equal(t, "Application referencing project wrong project which does not exist", updatedApp.Status.Conditions[0].Message)
|
||||
assert.Equal(t, argoappv1.ApplicationConditionInvalidSpecError, updatedApp.Status.Conditions[0].Type)
|
||||
}
|
||||
|
||||
func TestFinalizeProjectDeletion_HasApplications(t *testing.T) {
|
||||
app := newFakeApp()
|
||||
proj := &argoappv1.AppProject{ObjectMeta: metav1.ObjectMeta{Name: "default", Namespace: test.FakeArgoCDNamespace}}
|
||||
@@ -1345,3 +1458,31 @@ func TestToAppKey(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_canProcessApp(t *testing.T) {
|
||||
app := newFakeApp()
|
||||
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}})
|
||||
ctrl.applicationNamespaces = []string{"good"}
|
||||
t.Run("without cluster filter, good namespace", func(t *testing.T) {
|
||||
app.Namespace = "good"
|
||||
canProcess := ctrl.canProcessApp(app)
|
||||
assert.True(t, canProcess)
|
||||
})
|
||||
t.Run("without cluster filter, bad namespace", func(t *testing.T) {
|
||||
app.Namespace = "bad"
|
||||
canProcess := ctrl.canProcessApp(app)
|
||||
assert.False(t, canProcess)
|
||||
})
|
||||
t.Run("with cluster filter, good namespace", func(t *testing.T) {
|
||||
app.Namespace = "good"
|
||||
ctrl.clusterFilter = func(_ *argoappv1.Cluster) bool { return true }
|
||||
canProcess := ctrl.canProcessApp(app)
|
||||
assert.True(t, canProcess)
|
||||
})
|
||||
t.Run("with cluster filter, bad namespace", func(t *testing.T) {
|
||||
app.Namespace = "bad"
|
||||
ctrl.clusterFilter = func(_ *argoappv1.Cluster) bool { return true }
|
||||
canProcess := ctrl.canProcessApp(app)
|
||||
assert.False(t, canProcess)
|
||||
})
|
||||
}
|
||||
|
||||
24
controller/cache/cache.go
vendored
24
controller/cache/cache.go
vendored
@@ -25,6 +25,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/controller/metrics"
|
||||
@@ -389,6 +390,25 @@ func (c *liveStateCache) getCluster(server string) (clustercache.ClusterCache, e
|
||||
return nil, fmt.Errorf("controller is configured to ignore cluster %s", cluster.Server)
|
||||
}
|
||||
|
||||
resourceCustomLabels, err := c.settingsMgr.GetResourceCustomLabels()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error getting custom label: %w", err)
|
||||
}
|
||||
|
||||
clusterCacheConfig := cluster.RESTConfig()
|
||||
// Controller dynamically fetches all resource types available on the cluster
|
||||
// using a discovery API that may contain deprecated APIs.
|
||||
// This causes log flooding when managing a large number of clusters.
|
||||
// https://github.com/argoproj/argo-cd/issues/11973
|
||||
// However, we can safely suppress deprecation warnings
|
||||
// because we do not rely on resources with a particular API group or version.
|
||||
// https://kubernetes.io/blog/2020/09/03/warnings/#customize-client-handling
|
||||
//
|
||||
// Completely suppress warning logs only for log levels that are less than Debug.
|
||||
if log.GetLevel() < log.DebugLevel {
|
||||
clusterCacheConfig.WarningHandler = rest.NoWarnings{}
|
||||
}
|
||||
|
||||
clusterCacheOpts := []clustercache.UpdateSettingsFunc{
|
||||
clustercache.SetListSemaphore(semaphore.NewWeighted(clusterCacheListSemaphoreSize)),
|
||||
clustercache.SetListPageSize(clusterCacheListPageSize),
|
||||
@@ -400,7 +420,7 @@ func (c *liveStateCache) getCluster(server string) (clustercache.ClusterCache, e
|
||||
clustercache.SetClusterResources(cluster.ClusterResources),
|
||||
clustercache.SetPopulateResourceInfoHandler(func(un *unstructured.Unstructured, isRoot bool) (interface{}, bool) {
|
||||
res := &ResourceInfo{}
|
||||
populateNodeInfo(un, res)
|
||||
populateNodeInfo(un, res, resourceCustomLabels)
|
||||
c.lock.RLock()
|
||||
cacheSettings := c.cacheSettings
|
||||
c.lock.RUnlock()
|
||||
@@ -420,7 +440,7 @@ func (c *liveStateCache) getCluster(server string) (clustercache.ClusterCache, e
|
||||
clustercache.SetRetryOptions(clusterCacheAttemptLimit, clusterCacheRetryUseBackoff, isRetryableError),
|
||||
}
|
||||
|
||||
clusterCache = clustercache.NewClusterCache(cluster.RESTConfig(), clusterCacheOpts...)
|
||||
clusterCache = clustercache.NewClusterCache(clusterCacheConfig, clusterCacheOpts...)
|
||||
|
||||
_ = clusterCache.OnResourceUpdated(func(newRes *clustercache.Resource, oldRes *clustercache.Resource, namespaceResources map[kube.ResourceKey]*clustercache.Resource) {
|
||||
toNotify := make(map[string]bool)
|
||||
|
||||
11
controller/cache/info.go
vendored
11
controller/cache/info.go
vendored
@@ -19,12 +19,21 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/util/resource"
|
||||
)
|
||||
|
||||
func populateNodeInfo(un *unstructured.Unstructured, res *ResourceInfo) {
|
||||
func populateNodeInfo(un *unstructured.Unstructured, res *ResourceInfo, customLabels []string) {
|
||||
gvk := un.GroupVersionKind()
|
||||
revision := resource.GetRevision(un)
|
||||
if revision > 0 {
|
||||
res.Info = append(res.Info, v1alpha1.InfoItem{Name: "Revision", Value: fmt.Sprintf("Rev:%v", revision)})
|
||||
}
|
||||
if len(customLabels) > 0 {
|
||||
if labels := un.GetLabels(); labels != nil {
|
||||
for _, customLabel := range customLabels {
|
||||
if value, ok := labels[customLabel]; ok {
|
||||
res.Info = append(res.Info, v1alpha1.InfoItem{Name: customLabel, Value: value})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
switch gvk.Group {
|
||||
case "":
|
||||
switch gvk.Kind {
|
||||
|
||||
76
controller/cache/info_test.go
vendored
76
controller/cache/info_test.go
vendored
@@ -271,7 +271,7 @@ func TestGetPodInfo(t *testing.T) {
|
||||
`)
|
||||
|
||||
info := &ResourceInfo{}
|
||||
populateNodeInfo(pod, info)
|
||||
populateNodeInfo(pod, info, []string{})
|
||||
assert.Equal(t, []v1alpha1.InfoItem{
|
||||
{Name: "Node", Value: "minikube"},
|
||||
{Name: "Containers", Value: "0/1"},
|
||||
@@ -302,7 +302,7 @@ status:
|
||||
`)
|
||||
|
||||
info := &ResourceInfo{}
|
||||
populateNodeInfo(node, info)
|
||||
populateNodeInfo(node, info, []string{})
|
||||
assert.Equal(t, &NodeInfo{
|
||||
Name: "minikube",
|
||||
Capacity: v1.ResourceList{v1.ResourceMemory: resource.MustParse("6091320Ki"), v1.ResourceCPU: resource.MustParse("6")},
|
||||
@@ -312,7 +312,7 @@ status:
|
||||
|
||||
func TestGetServiceInfo(t *testing.T) {
|
||||
info := &ResourceInfo{}
|
||||
populateNodeInfo(testService, info)
|
||||
populateNodeInfo(testService, info, []string{})
|
||||
assert.Equal(t, 0, len(info.Info))
|
||||
assert.Equal(t, &v1alpha1.ResourceNetworkingInfo{
|
||||
TargetLabels: map[string]string{"app": "guestbook"},
|
||||
@@ -322,7 +322,7 @@ func TestGetServiceInfo(t *testing.T) {
|
||||
|
||||
func TestGetLinkAnnotatedServiceInfo(t *testing.T) {
|
||||
info := &ResourceInfo{}
|
||||
populateNodeInfo(testLinkAnnotatedService, info)
|
||||
populateNodeInfo(testLinkAnnotatedService, info, []string{})
|
||||
assert.Equal(t, 0, len(info.Info))
|
||||
assert.Equal(t, &v1alpha1.ResourceNetworkingInfo{
|
||||
TargetLabels: map[string]string{"app": "guestbook"},
|
||||
@@ -333,7 +333,7 @@ func TestGetLinkAnnotatedServiceInfo(t *testing.T) {
|
||||
|
||||
func TestGetIstioVirtualServiceInfo(t *testing.T) {
|
||||
info := &ResourceInfo{}
|
||||
populateNodeInfo(testIstioVirtualService, info)
|
||||
populateNodeInfo(testIstioVirtualService, info, []string{})
|
||||
assert.Equal(t, 0, len(info.Info))
|
||||
require.NotNil(t, info.NetworkingInfo)
|
||||
require.NotNil(t, info.NetworkingInfo.TargetRefs)
|
||||
@@ -363,7 +363,7 @@ func TestGetIngressInfo(t *testing.T) {
|
||||
}
|
||||
for _, tc := range tests {
|
||||
info := &ResourceInfo{}
|
||||
populateNodeInfo(tc.Ingress, info)
|
||||
populateNodeInfo(tc.Ingress, info, []string{})
|
||||
assert.Equal(t, 0, len(info.Info))
|
||||
sort.Slice(info.NetworkingInfo.TargetRefs, func(i, j int) bool {
|
||||
return strings.Compare(info.NetworkingInfo.TargetRefs[j].Name, info.NetworkingInfo.TargetRefs[i].Name) < 0
|
||||
@@ -388,7 +388,7 @@ func TestGetIngressInfo(t *testing.T) {
|
||||
|
||||
func TestGetLinkAnnotatedIngressInfo(t *testing.T) {
|
||||
info := &ResourceInfo{}
|
||||
populateNodeInfo(testLinkAnnotatedIngress, info)
|
||||
populateNodeInfo(testLinkAnnotatedIngress, info, []string{})
|
||||
assert.Equal(t, 0, len(info.Info))
|
||||
sort.Slice(info.NetworkingInfo.TargetRefs, func(i, j int) bool {
|
||||
return strings.Compare(info.NetworkingInfo.TargetRefs[j].Name, info.NetworkingInfo.TargetRefs[i].Name) < 0
|
||||
@@ -412,7 +412,7 @@ func TestGetLinkAnnotatedIngressInfo(t *testing.T) {
|
||||
|
||||
func TestGetIngressInfoWildCardPath(t *testing.T) {
|
||||
info := &ResourceInfo{}
|
||||
populateNodeInfo(testIngressWildCardPath, info)
|
||||
populateNodeInfo(testIngressWildCardPath, info, []string{})
|
||||
assert.Equal(t, 0, len(info.Info))
|
||||
sort.Slice(info.NetworkingInfo.TargetRefs, func(i, j int) bool {
|
||||
return strings.Compare(info.NetworkingInfo.TargetRefs[j].Name, info.NetworkingInfo.TargetRefs[i].Name) < 0
|
||||
@@ -436,7 +436,7 @@ func TestGetIngressInfoWildCardPath(t *testing.T) {
|
||||
|
||||
func TestGetIngressInfoWithoutTls(t *testing.T) {
|
||||
info := &ResourceInfo{}
|
||||
populateNodeInfo(testIngressWithoutTls, info)
|
||||
populateNodeInfo(testIngressWithoutTls, info, []string{})
|
||||
assert.Equal(t, 0, len(info.Info))
|
||||
sort.Slice(info.NetworkingInfo.TargetRefs, func(i, j int) bool {
|
||||
return strings.Compare(info.NetworkingInfo.TargetRefs[j].Name, info.NetworkingInfo.TargetRefs[i].Name) < 0
|
||||
@@ -481,7 +481,7 @@ func TestGetIngressInfoWithHost(t *testing.T) {
|
||||
- ip: 107.178.210.11`)
|
||||
|
||||
info := &ResourceInfo{}
|
||||
populateNodeInfo(ingress, info)
|
||||
populateNodeInfo(ingress, info, []string{})
|
||||
|
||||
assert.Equal(t, &v1alpha1.ResourceNetworkingInfo{
|
||||
Ingress: []v1.LoadBalancerIngress{{IP: "107.178.210.11"}},
|
||||
@@ -514,7 +514,7 @@ func TestGetIngressInfoNoHost(t *testing.T) {
|
||||
`)
|
||||
|
||||
info := &ResourceInfo{}
|
||||
populateNodeInfo(ingress, info)
|
||||
populateNodeInfo(ingress, info, []string{})
|
||||
|
||||
assert.Equal(t, &v1alpha1.ResourceNetworkingInfo{
|
||||
TargetRefs: []v1alpha1.ResourceRef{{
|
||||
@@ -549,7 +549,7 @@ func TestExternalUrlWithSubPath(t *testing.T) {
|
||||
- ip: 107.178.210.11`)
|
||||
|
||||
info := &ResourceInfo{}
|
||||
populateNodeInfo(ingress, info)
|
||||
populateNodeInfo(ingress, info, []string{})
|
||||
|
||||
expectedExternalUrls := []string{"https://107.178.210.11/my/sub/path/"}
|
||||
assert.Equal(t, expectedExternalUrls, info.NetworkingInfo.ExternalURLs)
|
||||
@@ -585,7 +585,7 @@ func TestExternalUrlWithMultipleSubPaths(t *testing.T) {
|
||||
- ip: 107.178.210.11`)
|
||||
|
||||
info := &ResourceInfo{}
|
||||
populateNodeInfo(ingress, info)
|
||||
populateNodeInfo(ingress, info, []string{})
|
||||
|
||||
expectedExternalUrls := []string{"https://helm-guestbook.com/my/sub/path/", "https://helm-guestbook.com/my/sub/path/2", "https://helm-guestbook.com"}
|
||||
actualURLs := info.NetworkingInfo.ExternalURLs
|
||||
@@ -615,7 +615,7 @@ func TestExternalUrlWithNoSubPath(t *testing.T) {
|
||||
- ip: 107.178.210.11`)
|
||||
|
||||
info := &ResourceInfo{}
|
||||
populateNodeInfo(ingress, info)
|
||||
populateNodeInfo(ingress, info, []string{})
|
||||
|
||||
expectedExternalUrls := []string{"https://107.178.210.11"}
|
||||
assert.Equal(t, expectedExternalUrls, info.NetworkingInfo.ExternalURLs)
|
||||
@@ -643,8 +643,54 @@ func TestExternalUrlWithNetworkingApi(t *testing.T) {
|
||||
- ip: 107.178.210.11`)
|
||||
|
||||
info := &ResourceInfo{}
|
||||
populateNodeInfo(ingress, info)
|
||||
populateNodeInfo(ingress, info, []string{})
|
||||
|
||||
expectedExternalUrls := []string{"https://107.178.210.11"}
|
||||
assert.Equal(t, expectedExternalUrls, info.NetworkingInfo.ExternalURLs)
|
||||
}
|
||||
|
||||
func TestCustomLabel(t *testing.T) {
|
||||
configmap := strToUnstructured(`
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: cm`)
|
||||
|
||||
info := &ResourceInfo{}
|
||||
populateNodeInfo(configmap, info, []string{"my-label"})
|
||||
|
||||
assert.Equal(t, 0, len(info.Info))
|
||||
|
||||
configmap = strToUnstructured(`
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: cm
|
||||
labels:
|
||||
my-label: value`)
|
||||
|
||||
info = &ResourceInfo{}
|
||||
populateNodeInfo(configmap, info, []string{"my-label", "other-label"})
|
||||
|
||||
assert.Equal(t, 1, len(info.Info))
|
||||
assert.Equal(t, "my-label", info.Info[0].Name)
|
||||
assert.Equal(t, "value", info.Info[0].Value)
|
||||
|
||||
configmap = strToUnstructured(`
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: cm
|
||||
labels:
|
||||
my-label: value
|
||||
other-label: value2`)
|
||||
|
||||
info = &ResourceInfo{}
|
||||
populateNodeInfo(configmap, info, []string{"my-label", "other-label"})
|
||||
|
||||
assert.Equal(t, 2, len(info.Info))
|
||||
assert.Equal(t, "my-label", info.Info[0].Name)
|
||||
assert.Equal(t, "value", info.Info[0].Value)
|
||||
assert.Equal(t, "other-label", info.Info[1].Name)
|
||||
assert.Equal(t, "value2", info.Info[1].Value)
|
||||
}
|
||||
|
||||
@@ -117,7 +117,7 @@ func (c *clusterInfoUpdater) updateClusterInfo(cluster appv1.Cluster, info *cach
|
||||
}
|
||||
if info != nil {
|
||||
clusterInfo.ServerVersion = info.K8SVersion
|
||||
clusterInfo.APIVersions = argo.APIResourcesToStrings(info.APIResources, false)
|
||||
clusterInfo.APIVersions = argo.APIResourcesToStrings(info.APIResources, true)
|
||||
if info.LastCacheSyncTime == nil {
|
||||
clusterInfo.ConnectionState.Status = appv1.ConnectionStatusUnknown
|
||||
} else if info.SyncError == nil {
|
||||
|
||||
@@ -381,7 +381,7 @@ func (c *appCollector) collectApps(ch chan<- prometheus.Metric, app *argoappv1.A
|
||||
healthStatus = health.HealthStatusUnknown
|
||||
}
|
||||
|
||||
addGauge(descAppInfo, 1, git.NormalizeGitURL(app.Spec.Source.RepoURL), app.Spec.Destination.Server, app.Spec.Destination.Namespace, string(syncStatus), string(healthStatus), operation)
|
||||
addGauge(descAppInfo, 1, git.NormalizeGitURL(app.Spec.GetSource().RepoURL), app.Spec.Destination.Server, app.Spec.Destination.Namespace, string(syncStatus), string(healthStatus), operation)
|
||||
|
||||
if len(c.appLabels) > 0 {
|
||||
labelValues := []string{}
|
||||
|
||||
@@ -62,7 +62,7 @@ type managedResource struct {
|
||||
|
||||
// AppStateManager defines methods which allow to compare application spec and actual application state.
|
||||
type AppStateManager interface {
|
||||
CompareAppState(app *v1alpha1.Application, project *appv1.AppProject, revision string, source v1alpha1.ApplicationSource, noCache bool, noRevisionCache bool, localObjects []string) *comparisonResult
|
||||
CompareAppState(app *v1alpha1.Application, project *appv1.AppProject, revisions []string, sources []v1alpha1.ApplicationSource, noCache bool, noRevisionCache bool, localObjects []string, hasMultipleSources bool) *comparisonResult
|
||||
SyncAppState(app *v1alpha1.Application, state *v1alpha1.OperationState)
|
||||
}
|
||||
|
||||
@@ -75,6 +75,8 @@ type comparisonResult struct {
|
||||
reconciliationResult sync.ReconciliationResult
|
||||
diffConfig argodiff.DiffConfig
|
||||
appSourceType v1alpha1.ApplicationSourceType
|
||||
// appSourceTypes stores the SourceType for each application source under sources field
|
||||
appSourceTypes []v1alpha1.ApplicationSourceType
|
||||
// timings maps phases of comparison to the duration it took to complete (for statistical purposes)
|
||||
timings map[string]time.Duration
|
||||
diffResultList *diff.DiffResultList
|
||||
@@ -105,7 +107,8 @@ type appStateManager struct {
|
||||
persistResourceHealth bool
|
||||
}
|
||||
|
||||
func (m *appStateManager) getRepoObjs(app *v1alpha1.Application, source v1alpha1.ApplicationSource, appLabelKey, revision string, noCache, noRevisionCache, verifySignature bool, proj *v1alpha1.AppProject) ([]*unstructured.Unstructured, *apiclient.ManifestResponse, error) {
|
||||
func (m *appStateManager) getRepoObjs(app *v1alpha1.Application, sources []v1alpha1.ApplicationSource, appLabelKey string, revisions []string, noCache, noRevisionCache, verifySignature bool, proj *v1alpha1.AppProject) ([]*unstructured.Unstructured, map[*v1alpha1.ApplicationSource]*apiclient.ManifestResponse, error) {
|
||||
|
||||
ts := stats.NewTimingStats()
|
||||
helmRepos, err := m.db.ListHelmRepositories(context.Background())
|
||||
if err != nil {
|
||||
@@ -115,11 +118,7 @@ func (m *appStateManager) getRepoObjs(app *v1alpha1.Application, source v1alpha1
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
ts.AddCheckpoint("helm_ms")
|
||||
repo, err := m.db.GetRepository(context.Background(), source.RepoURL)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
ts.AddCheckpoint("repo_ms")
|
||||
helmRepositoryCredentials, err := m.db.GetAllHelmRepositoryCredentials(context.Background())
|
||||
if err != nil {
|
||||
@@ -129,15 +128,6 @@ func (m *appStateManager) getRepoObjs(app *v1alpha1.Application, source v1alpha1
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
conn, repoClient, err := m.repoClientset.NewRepoServerClient()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
defer io.Close(conn)
|
||||
|
||||
if revision == "" {
|
||||
revision = source.TargetRevision
|
||||
}
|
||||
|
||||
plugins, err := m.settingsMgr.GetConfigManagementPlugins()
|
||||
if err != nil {
|
||||
@@ -158,48 +148,87 @@ func (m *appStateManager) getRepoObjs(app *v1alpha1.Application, source v1alpha1
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
kustomizeOptions, err := kustomizeSettings.GetOptions(app.Spec.Source)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
helmOptions, err := m.settingsMgr.GetHelmSettings()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
ts.AddCheckpoint("build_options_ms")
|
||||
serverVersion, apiResources, err := m.liveStateCache.GetVersionsInfo(app.Spec.Destination.Server)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
ts.AddCheckpoint("version_ms")
|
||||
manifestInfo, err := repoClient.GenerateManifest(context.Background(), &apiclient.ManifestRequest{
|
||||
Repo: repo,
|
||||
Repos: permittedHelmRepos,
|
||||
Revision: revision,
|
||||
NoCache: noCache,
|
||||
NoRevisionCache: noRevisionCache,
|
||||
AppLabelKey: appLabelKey,
|
||||
AppName: app.InstanceName(m.namespace),
|
||||
Namespace: app.Spec.Destination.Namespace,
|
||||
ApplicationSource: &source,
|
||||
Plugins: tools,
|
||||
KustomizeOptions: kustomizeOptions,
|
||||
KubeVersion: serverVersion,
|
||||
ApiVersions: argo.APIResourcesToStrings(apiResources, true),
|
||||
VerifySignature: verifySignature,
|
||||
HelmRepoCreds: permittedHelmCredentials,
|
||||
TrackingMethod: string(argo.GetTrackingMethod(m.settingsMgr)),
|
||||
EnabledSourceTypes: enabledSourceTypes,
|
||||
HelmOptions: helmOptions,
|
||||
})
|
||||
conn, repoClient, err := m.repoClientset.NewRepoServerClient()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
targetObjs, err := unmarshalManifests(manifestInfo.Manifests)
|
||||
defer io.Close(conn)
|
||||
|
||||
manifestInfoMap := make(map[*v1alpha1.ApplicationSource]*apiclient.ManifestResponse)
|
||||
targetObjs := make([]*unstructured.Unstructured, 0)
|
||||
|
||||
// Store the map of all sources having ref field into a map for applications with sources field
|
||||
refSources, err := argo.GetRefSources(context.Background(), app.Spec, m.db)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, fmt.Errorf("failed to get ref sources: %v", err)
|
||||
}
|
||||
|
||||
for i, source := range sources {
|
||||
if len(revisions) < len(sources) || revisions[i] == "" {
|
||||
revisions[i] = source.TargetRevision
|
||||
}
|
||||
ts.AddCheckpoint("helm_ms")
|
||||
repo, err := m.db.GetRepository(context.Background(), source.RepoURL)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
kustomizeOptions, err := kustomizeSettings.GetOptions(source)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
ts.AddCheckpoint("version_ms")
|
||||
log.Debugf("Generating Manifest for source %s revision %s", source, revisions[i])
|
||||
manifestInfo, err := repoClient.GenerateManifest(context.Background(), &apiclient.ManifestRequest{
|
||||
Repo: repo,
|
||||
Repos: permittedHelmRepos,
|
||||
Revision: revisions[i],
|
||||
NoCache: noCache,
|
||||
NoRevisionCache: noRevisionCache,
|
||||
AppLabelKey: appLabelKey,
|
||||
AppName: app.InstanceName(m.namespace),
|
||||
Namespace: app.Spec.Destination.Namespace,
|
||||
ApplicationSource: &source,
|
||||
Plugins: tools,
|
||||
KustomizeOptions: kustomizeOptions,
|
||||
KubeVersion: serverVersion,
|
||||
ApiVersions: argo.APIResourcesToStrings(apiResources, true),
|
||||
VerifySignature: verifySignature,
|
||||
HelmRepoCreds: permittedHelmCredentials,
|
||||
TrackingMethod: string(argo.GetTrackingMethod(m.settingsMgr)),
|
||||
EnabledSourceTypes: enabledSourceTypes,
|
||||
HelmOptions: helmOptions,
|
||||
HasMultipleSources: app.Spec.HasMultipleSources(),
|
||||
RefSources: refSources,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// GenerateManifest can return empty ManifestResponse without error if app has multiple sources
|
||||
// and if any of the source does not have path and chart field not specified.
|
||||
// In that scenario, we continue to the next source
|
||||
if app.Spec.HasMultipleSources() && len(manifestInfo.Manifests) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
targetObj, err := unmarshalManifests(manifestInfo.Manifests)
|
||||
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
targetObjs = append(targetObjs, targetObj...)
|
||||
manifestInfoMap[&source] = manifestInfo
|
||||
}
|
||||
|
||||
ts.AddCheckpoint("unmarshal_ms")
|
||||
@@ -209,7 +238,7 @@ func (m *appStateManager) getRepoObjs(app *v1alpha1.Application, source v1alpha1
|
||||
}
|
||||
logCtx = logCtx.WithField("time_ms", time.Since(ts.StartTime).Milliseconds())
|
||||
logCtx.Info("getRepoObjs stats")
|
||||
return targetObjs, manifestInfo, nil
|
||||
return targetObjs, manifestInfoMap, nil
|
||||
}
|
||||
|
||||
func unmarshalManifests(manifests []string) ([]*unstructured.Unstructured, error) {
|
||||
@@ -325,7 +354,7 @@ func verifyGnuPGSignature(revision string, project *appv1.AppProject, manifestIn
|
||||
// CompareAppState compares application git state to the live app state, using the specified
|
||||
// revision and supplied source. If revision or overrides are empty, then compares against
|
||||
// revision and overrides in the app spec.
|
||||
func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *appv1.AppProject, revision string, source v1alpha1.ApplicationSource, noCache bool, noRevisionCache bool, localManifests []string) *comparisonResult {
|
||||
func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *appv1.AppProject, revisions []string, sources []v1alpha1.ApplicationSource, noCache bool, noRevisionCache bool, localManifests []string, hasMultipleSources bool) *comparisonResult {
|
||||
ts := stats.NewTimingStats()
|
||||
appLabelKey, resourceOverrides, resFilter, err := m.getComparisonSettings()
|
||||
|
||||
@@ -333,12 +362,24 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *ap
|
||||
|
||||
// return unknown comparison result if basic comparison settings cannot be loaded
|
||||
if err != nil {
|
||||
return &comparisonResult{
|
||||
syncStatus: &v1alpha1.SyncStatus{
|
||||
ComparedTo: appv1.ComparedTo{Source: source, Destination: app.Spec.Destination},
|
||||
Status: appv1.SyncStatusCodeUnknown,
|
||||
},
|
||||
healthStatus: &appv1.HealthStatus{Status: health.HealthStatusUnknown},
|
||||
if hasMultipleSources {
|
||||
return &comparisonResult{
|
||||
syncStatus: &v1alpha1.SyncStatus{
|
||||
ComparedTo: appv1.ComparedTo{Destination: app.Spec.Destination, Sources: sources},
|
||||
Status: appv1.SyncStatusCodeUnknown,
|
||||
Revisions: revisions,
|
||||
},
|
||||
healthStatus: &appv1.HealthStatus{Status: health.HealthStatusUnknown},
|
||||
}
|
||||
} else {
|
||||
return &comparisonResult{
|
||||
syncStatus: &v1alpha1.SyncStatus{
|
||||
ComparedTo: appv1.ComparedTo{Source: sources[0], Destination: app.Spec.Destination},
|
||||
Status: appv1.SyncStatusCodeUnknown,
|
||||
Revision: revisions[0],
|
||||
},
|
||||
healthStatus: &appv1.HealthStatus{Status: health.HealthStatusUnknown},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -356,11 +397,21 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *ap
|
||||
logCtx.Infof("Comparing app state (cluster: %s, namespace: %s)", app.Spec.Destination.Server, app.Spec.Destination.Namespace)
|
||||
|
||||
var targetObjs []*unstructured.Unstructured
|
||||
var manifestInfo *apiclient.ManifestResponse
|
||||
now := metav1.Now()
|
||||
|
||||
var manifestInfoMap map[*v1alpha1.ApplicationSource]*apiclient.ManifestResponse
|
||||
|
||||
if len(localManifests) == 0 {
|
||||
targetObjs, manifestInfo, err = m.getRepoObjs(app, source, appLabelKey, revision, noCache, noRevisionCache, verifySignature, project)
|
||||
// If the length of revisions is not same as the length of sources,
|
||||
// we take the revisions from the sources directly for all the sources.
|
||||
if len(revisions) != len(sources) {
|
||||
revisions = make([]string, 0)
|
||||
for _, source := range sources {
|
||||
revisions = append(revisions, source.TargetRevision)
|
||||
}
|
||||
}
|
||||
|
||||
targetObjs, manifestInfoMap, err = m.getRepoObjs(app, sources, appLabelKey, revisions, noCache, noRevisionCache, verifySignature, project)
|
||||
if err != nil {
|
||||
targetObjs = make([]*unstructured.Unstructured, 0)
|
||||
conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: err.Error(), LastTransitionTime: &now})
|
||||
@@ -382,7 +433,10 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *ap
|
||||
failedToLoadObjs = true
|
||||
}
|
||||
}
|
||||
manifestInfo = nil
|
||||
// empty out manifestInfoMap
|
||||
for as := range manifestInfoMap {
|
||||
delete(manifestInfoMap, as)
|
||||
}
|
||||
}
|
||||
ts.AddCheckpoint("git_ms")
|
||||
|
||||
@@ -416,7 +470,8 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *ap
|
||||
conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: err.Error(), LastTransitionTime: &now})
|
||||
failedToLoadObjs = true
|
||||
}
|
||||
logCtx.Debugf("Retrieved lived manifests")
|
||||
|
||||
logCtx.Debugf("Retrieved live manifests")
|
||||
|
||||
// filter out all resources which are not permitted in the application project
|
||||
for k, v := range liveObjByKey {
|
||||
@@ -459,10 +514,16 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *ap
|
||||
log.Warnf("Could not get compare options from ConfigMap (assuming defaults): %v", err)
|
||||
compareOptions = settings.GetDefaultDiffOptions()
|
||||
}
|
||||
manifestRevisions := make([]string, 0)
|
||||
|
||||
for _, manifestInfo := range manifestInfoMap {
|
||||
manifestRevisions = append(manifestRevisions, manifestInfo.Revision)
|
||||
}
|
||||
|
||||
// restore comparison using cached diff result if previous comparison was performed for the same revision
|
||||
revisionChanged := manifestInfo == nil || app.Status.Sync.Revision != manifestInfo.Revision
|
||||
specChanged := !reflect.DeepEqual(app.Status.Sync.ComparedTo, appv1.ComparedTo{Source: app.Spec.Source, Destination: app.Spec.Destination})
|
||||
revisionChanged := len(manifestInfoMap) != len(sources) || !reflect.DeepEqual(app.Status.Sync.Revisions, manifestRevisions)
|
||||
specChanged := !reflect.DeepEqual(app.Status.Sync.ComparedTo, appv1.ComparedTo{Source: app.Spec.GetSource(), Destination: app.Spec.Destination, Sources: sources})
|
||||
|
||||
_, refreshRequested := app.IsRefreshRequested()
|
||||
noCache = noCache || refreshRequested || app.Status.Expired(m.statusRefreshTimeout) || specChanged || revisionChanged
|
||||
|
||||
@@ -514,7 +575,7 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *ap
|
||||
}
|
||||
gvk := obj.GroupVersionKind()
|
||||
|
||||
isSelfReferencedObj := m.isSelfReferencedObj(liveObj, appLabelKey, trackingMethod)
|
||||
isSelfReferencedObj := m.isSelfReferencedObj(liveObj, targetObj, app.GetName(), appLabelKey, trackingMethod)
|
||||
|
||||
resState := v1alpha1.ResourceStatus{
|
||||
Namespace: obj.GetNamespace(),
|
||||
@@ -591,16 +652,32 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *ap
|
||||
if failedToLoadObjs {
|
||||
syncCode = v1alpha1.SyncStatusCodeUnknown
|
||||
}
|
||||
syncStatus := v1alpha1.SyncStatus{
|
||||
ComparedTo: appv1.ComparedTo{
|
||||
Source: source,
|
||||
Destination: app.Spec.Destination,
|
||||
},
|
||||
Status: syncCode,
|
||||
var revision string
|
||||
|
||||
if !hasMultipleSources && len(manifestRevisions) > 0 {
|
||||
revision = manifestRevisions[0]
|
||||
}
|
||||
if manifestInfo != nil {
|
||||
syncStatus.Revision = manifestInfo.Revision
|
||||
var syncStatus v1alpha1.SyncStatus
|
||||
if hasMultipleSources {
|
||||
syncStatus = v1alpha1.SyncStatus{
|
||||
ComparedTo: appv1.ComparedTo{
|
||||
Destination: app.Spec.Destination,
|
||||
Sources: sources,
|
||||
},
|
||||
Status: syncCode,
|
||||
Revisions: manifestRevisions,
|
||||
}
|
||||
} else {
|
||||
syncStatus = v1alpha1.SyncStatus{
|
||||
ComparedTo: appv1.ComparedTo{
|
||||
Destination: app.Spec.Destination,
|
||||
Source: app.Spec.GetSource(),
|
||||
},
|
||||
Status: syncCode,
|
||||
Revision: revision,
|
||||
}
|
||||
}
|
||||
|
||||
ts.AddCheckpoint("sync_ms")
|
||||
|
||||
healthStatus, err := setApplicationHealth(managedResources, resourceSummaries, resourceOverrides, app, m.persistResourceHealth)
|
||||
@@ -611,8 +688,10 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *ap
|
||||
// Git has already performed the signature verification via its GPG interface, and the result is available
|
||||
// in the manifest info received from the repository server. We now need to form our opinion about the result
|
||||
// and stop processing if we do not agree about the outcome.
|
||||
if gpg.IsGPGEnabled() && verifySignature && manifestInfo != nil {
|
||||
conditions = append(conditions, verifyGnuPGSignature(revision, project, manifestInfo)...)
|
||||
for _, manifestInfo := range manifestInfoMap {
|
||||
if gpg.IsGPGEnabled() && verifySignature && manifestInfo != nil {
|
||||
conditions = append(conditions, verifyGnuPGSignature(manifestInfo.Revision, project, manifestInfo)...)
|
||||
}
|
||||
}
|
||||
|
||||
compRes := comparisonResult{
|
||||
@@ -624,9 +703,18 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *ap
|
||||
diffConfig: diffConfig,
|
||||
diffResultList: diffResults,
|
||||
}
|
||||
if manifestInfo != nil {
|
||||
compRes.appSourceType = v1alpha1.ApplicationSourceType(manifestInfo.SourceType)
|
||||
|
||||
if hasMultipleSources {
|
||||
for _, manifestInfo := range manifestInfoMap {
|
||||
compRes.appSourceTypes = append(compRes.appSourceTypes, appv1.ApplicationSourceType(manifestInfo.SourceType))
|
||||
}
|
||||
} else {
|
||||
for _, manifestInfo := range manifestInfoMap {
|
||||
compRes.appSourceType = v1alpha1.ApplicationSourceType(manifestInfo.SourceType)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
app.Status.SetConditions(conditions, map[appv1.ApplicationConditionType]bool{
|
||||
appv1.ApplicationConditionComparisonError: true,
|
||||
appv1.ApplicationConditionSharedResourceWarning: true,
|
||||
@@ -638,18 +726,29 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *ap
|
||||
return &compRes
|
||||
}
|
||||
|
||||
func (m *appStateManager) persistRevisionHistory(app *v1alpha1.Application, revision string, source v1alpha1.ApplicationSource, startedAt metav1.Time) error {
|
||||
func (m *appStateManager) persistRevisionHistory(app *v1alpha1.Application, revision string, source v1alpha1.ApplicationSource, revisions []string, sources []v1alpha1.ApplicationSource, hasMultipleSources bool, startedAt metav1.Time) error {
|
||||
var nextID int64
|
||||
if len(app.Status.History) > 0 {
|
||||
nextID = app.Status.History.LastRevisionHistory().ID + 1
|
||||
}
|
||||
app.Status.History = append(app.Status.History, v1alpha1.RevisionHistory{
|
||||
Revision: revision,
|
||||
DeployedAt: metav1.NewTime(time.Now().UTC()),
|
||||
DeployStartedAt: &startedAt,
|
||||
ID: nextID,
|
||||
Source: source,
|
||||
})
|
||||
|
||||
if hasMultipleSources {
|
||||
app.Status.History = append(app.Status.History, v1alpha1.RevisionHistory{
|
||||
DeployedAt: metav1.NewTime(time.Now().UTC()),
|
||||
DeployStartedAt: &startedAt,
|
||||
ID: nextID,
|
||||
Sources: sources,
|
||||
Revisions: revisions,
|
||||
})
|
||||
} else {
|
||||
app.Status.History = append(app.Status.History, v1alpha1.RevisionHistory{
|
||||
Revision: revision,
|
||||
DeployedAt: metav1.NewTime(time.Now().UTC()),
|
||||
DeployStartedAt: &startedAt,
|
||||
ID: nextID,
|
||||
Source: source,
|
||||
})
|
||||
}
|
||||
|
||||
app.Status.History = app.Status.History.Trunc(app.Spec.GetRevisionHistoryLimit())
|
||||
|
||||
@@ -699,12 +798,13 @@ func NewAppStateManager(
|
||||
}
|
||||
|
||||
// isSelfReferencedObj returns whether the given obj is managed by the application
|
||||
// according to the values in the tracking annotation. It returns true when all
|
||||
// of the properties in the annotation (name, namespace, group and kind) match
|
||||
// the properties of the inspected object, or if the tracking method used does
|
||||
// not provide the required properties for matching.
|
||||
func (m *appStateManager) isSelfReferencedObj(obj *unstructured.Unstructured, appLabelKey string, trackingMethod v1alpha1.TrackingMethod) bool {
|
||||
if obj == nil {
|
||||
// according to the values of the tracking id (aka app instance value) annotation.
|
||||
// It returns true when all of the properties of the tracking id (app name, namespace,
|
||||
// group and kind) match the properties of the live object, or if the tracking method
|
||||
// used does not provide the required properties for matching.
|
||||
// Reference: https://github.com/argoproj/argo-cd/issues/8683
|
||||
func (m *appStateManager) isSelfReferencedObj(live, config *unstructured.Unstructured, appName, appLabelKey string, trackingMethod v1alpha1.TrackingMethod) bool {
|
||||
if live == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -714,17 +814,42 @@ func (m *appStateManager) isSelfReferencedObj(obj *unstructured.Unstructured, ap
|
||||
return true
|
||||
}
|
||||
|
||||
// In order for us to assume obj to be managed by this application, the
|
||||
// values from the annotation have to match the properties from the live
|
||||
// object. Cluster scoped objects carry the app's destination namespace
|
||||
// in the tracking annotation, but are unique in GVK + name combination.
|
||||
appInstance := m.resourceTracking.GetAppInstance(obj, appLabelKey, trackingMethod)
|
||||
if appInstance != nil {
|
||||
return (obj.GetNamespace() == appInstance.Namespace || obj.GetNamespace() == "") &&
|
||||
obj.GetName() == appInstance.Name &&
|
||||
obj.GetObjectKind().GroupVersionKind().Group == appInstance.Group &&
|
||||
obj.GetObjectKind().GroupVersionKind().Kind == appInstance.Kind
|
||||
// config != nil is the best-case scenario for constructing an accurate
|
||||
// Tracking ID. `config` is the "desired state" (from git/helm/etc.).
|
||||
// Using the desired state is important when there is an ApiGroup upgrade.
|
||||
// When upgrading, the comparison must be made with the new tracking ID.
|
||||
// Example:
|
||||
// live resource annotation will be:
|
||||
// ingress-app:extensions/Ingress:default/some-ingress
|
||||
// when it should be:
|
||||
// ingress-app:networking.k8s.io/Ingress:default/some-ingress
|
||||
// More details in: https://github.com/argoproj/argo-cd/pull/11012
|
||||
var aiv argo.AppInstanceValue
|
||||
if config != nil {
|
||||
aiv = argo.UnstructuredToAppInstanceValue(config, appName, "")
|
||||
return isSelfReferencedObj(live, aiv)
|
||||
}
|
||||
|
||||
// If config is nil then compare the live resource with the value
|
||||
// of the annotation. In this case, in order to validate if obj is
|
||||
// managed by this application, the values from the annotation have
|
||||
// to match the properties from the live object. Cluster scoped objects
|
||||
// carry the app's destination namespace in the tracking annotation,
|
||||
// but are unique in GVK + name combination.
|
||||
appInstance := m.resourceTracking.GetAppInstance(live, appLabelKey, trackingMethod)
|
||||
if appInstance != nil {
|
||||
return isSelfReferencedObj(live, *appInstance)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// isSelfReferencedObj returns true if the given Tracking ID (`aiv`) matches
|
||||
// the given object. It returns false when the ID doesn't match. This sometimes
|
||||
// happens when a tracking label or annotation gets accidentally copied to a
|
||||
// different resource.
|
||||
func isSelfReferencedObj(obj *unstructured.Unstructured, aiv argo.AppInstanceValue) bool {
|
||||
return (obj.GetNamespace() == aiv.Namespace || obj.GetNamespace() == "") &&
|
||||
obj.GetName() == aiv.Name &&
|
||||
obj.GetObjectKind().GroupVersionKind().Group == aiv.Group &&
|
||||
obj.GetObjectKind().GroupVersionKind().Kind == aiv.Kind
|
||||
}
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
v1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@@ -37,7 +38,11 @@ func TestCompareAppStateEmpty(t *testing.T) {
|
||||
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
|
||||
}
|
||||
ctrl := newFakeController(&data)
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, "", app.Spec.Source, false, false, nil)
|
||||
sources := make([]argoappv1.ApplicationSource, 0)
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "")
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
|
||||
assert.NotNil(t, compRes)
|
||||
assert.NotNil(t, compRes.syncStatus)
|
||||
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
|
||||
@@ -60,7 +65,11 @@ func TestCompareAppStateMissing(t *testing.T) {
|
||||
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
|
||||
}
|
||||
ctrl := newFakeController(&data)
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, "", app.Spec.Source, false, false, nil)
|
||||
sources := make([]argoappv1.ApplicationSource, 0)
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "")
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
|
||||
assert.NotNil(t, compRes)
|
||||
assert.NotNil(t, compRes.syncStatus)
|
||||
assert.Equal(t, argoappv1.SyncStatusCodeOutOfSync, compRes.syncStatus.Status)
|
||||
@@ -87,7 +96,11 @@ func TestCompareAppStateExtra(t *testing.T) {
|
||||
},
|
||||
}
|
||||
ctrl := newFakeController(&data)
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, "", app.Spec.Source, false, false, nil)
|
||||
sources := make([]argoappv1.ApplicationSource, 0)
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "")
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
|
||||
assert.NotNil(t, compRes)
|
||||
assert.Equal(t, argoappv1.SyncStatusCodeOutOfSync, compRes.syncStatus.Status)
|
||||
assert.Equal(t, 1, len(compRes.resources))
|
||||
@@ -113,7 +126,11 @@ func TestCompareAppStateHook(t *testing.T) {
|
||||
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
|
||||
}
|
||||
ctrl := newFakeController(&data)
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, "", app.Spec.Source, false, false, nil)
|
||||
sources := make([]argoappv1.ApplicationSource, 0)
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "")
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
|
||||
assert.NotNil(t, compRes)
|
||||
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
|
||||
assert.Equal(t, 0, len(compRes.resources))
|
||||
@@ -140,7 +157,11 @@ func TestCompareAppStateSkipHook(t *testing.T) {
|
||||
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
|
||||
}
|
||||
ctrl := newFakeController(&data)
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, "", app.Spec.Source, false, false, nil)
|
||||
sources := make([]argoappv1.ApplicationSource, 0)
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "")
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
|
||||
assert.NotNil(t, compRes)
|
||||
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
|
||||
assert.Equal(t, 1, len(compRes.resources))
|
||||
@@ -166,7 +187,11 @@ func TestCompareAppStateCompareOptionIgnoreExtraneous(t *testing.T) {
|
||||
}
|
||||
ctrl := newFakeController(&data)
|
||||
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, "", app.Spec.Source, false, false, nil)
|
||||
sources := make([]argoappv1.ApplicationSource, 0)
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "")
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
|
||||
|
||||
assert.NotNil(t, compRes)
|
||||
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
|
||||
@@ -194,7 +219,11 @@ func TestCompareAppStateExtraHook(t *testing.T) {
|
||||
},
|
||||
}
|
||||
ctrl := newFakeController(&data)
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, "", app.Spec.Source, false, false, nil)
|
||||
sources := make([]argoappv1.ApplicationSource, 0)
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "")
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
|
||||
|
||||
assert.NotNil(t, compRes)
|
||||
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
|
||||
@@ -237,7 +266,11 @@ func TestCompareAppStateDuplicatedNamespacedResources(t *testing.T) {
|
||||
},
|
||||
}
|
||||
ctrl := newFakeController(&data)
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, "", app.Spec.Source, false, false, nil)
|
||||
sources := make([]argoappv1.ApplicationSource, 0)
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "")
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
|
||||
|
||||
assert.NotNil(t, compRes)
|
||||
assert.Equal(t, 1, len(app.Status.Conditions))
|
||||
@@ -288,7 +321,11 @@ func TestSetHealth(t *testing.T) {
|
||||
},
|
||||
})
|
||||
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, "", app.Spec.Source, false, false, nil)
|
||||
sources := make([]argoappv1.ApplicationSource, 0)
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "")
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
|
||||
|
||||
assert.Equal(t, health.HealthStatusHealthy, compRes.healthStatus.Status)
|
||||
}
|
||||
@@ -320,7 +357,11 @@ func TestSetHealthSelfReferencedApp(t *testing.T) {
|
||||
},
|
||||
})
|
||||
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, "", app.Spec.Source, false, false, nil)
|
||||
sources := make([]argoappv1.ApplicationSource, 0)
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "")
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
|
||||
|
||||
assert.Equal(t, health.HealthStatusHealthy, compRes.healthStatus.Status)
|
||||
}
|
||||
@@ -390,7 +431,11 @@ func TestReturnUnknownComparisonStateOnSettingLoadError(t *testing.T) {
|
||||
},
|
||||
})
|
||||
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, "", app.Spec.Source, false, false, nil)
|
||||
sources := make([]argoappv1.ApplicationSource, 0)
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "")
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
|
||||
|
||||
assert.Equal(t, health.HealthStatusUnknown, compRes.healthStatus.Status)
|
||||
assert.Equal(t, argoappv1.SyncStatusCodeUnknown, compRes.syncStatus.Status)
|
||||
@@ -437,7 +482,7 @@ func Test_appStateManager_persistRevisionHistory(t *testing.T) {
|
||||
app.Spec.RevisionHistoryLimit = &i
|
||||
}
|
||||
addHistory := func() {
|
||||
err := manager.persistRevisionHistory(app, "my-revision", argoappv1.ApplicationSource{}, metav1.Time{})
|
||||
err := manager.persistRevisionHistory(app, "my-revision", argoappv1.ApplicationSource{}, []string{}, []argoappv1.ApplicationSource{}, false, metav1.Time{})
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
addHistory()
|
||||
@@ -473,7 +518,7 @@ func Test_appStateManager_persistRevisionHistory(t *testing.T) {
|
||||
assert.Len(t, app.Status.History, 9)
|
||||
|
||||
metav1NowTime := metav1.NewTime(time.Now())
|
||||
err := manager.persistRevisionHistory(app, "my-revision", argoappv1.ApplicationSource{}, metav1NowTime)
|
||||
err := manager.persistRevisionHistory(app, "my-revision", argoappv1.ApplicationSource{}, []string{}, []argoappv1.ApplicationSource{}, false, metav1NowTime)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, app.Status.History.LastRevisionHistory().DeployStartedAt, &metav1NowTime)
|
||||
}
|
||||
@@ -527,7 +572,11 @@ func TestSignedResponseNoSignatureRequired(t *testing.T) {
|
||||
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
|
||||
}
|
||||
ctrl := newFakeController(&data)
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, "", app.Spec.Source, false, false, nil)
|
||||
sources := make([]argoappv1.ApplicationSource, 0)
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "")
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
|
||||
assert.NotNil(t, compRes)
|
||||
assert.NotNil(t, compRes.syncStatus)
|
||||
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
|
||||
@@ -549,7 +598,11 @@ func TestSignedResponseNoSignatureRequired(t *testing.T) {
|
||||
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
|
||||
}
|
||||
ctrl := newFakeController(&data)
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, "", app.Spec.Source, false, false, nil)
|
||||
sources := make([]argoappv1.ApplicationSource, 0)
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "")
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
|
||||
assert.NotNil(t, compRes)
|
||||
assert.NotNil(t, compRes.syncStatus)
|
||||
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
|
||||
@@ -578,7 +631,11 @@ func TestSignedResponseSignatureRequired(t *testing.T) {
|
||||
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
|
||||
}
|
||||
ctrl := newFakeController(&data)
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &signedProj, "", app.Spec.Source, false, false, nil)
|
||||
sources := make([]argoappv1.ApplicationSource, 0)
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "")
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &signedProj, revisions, sources, false, false, nil, false)
|
||||
assert.NotNil(t, compRes)
|
||||
assert.NotNil(t, compRes.syncStatus)
|
||||
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
|
||||
@@ -600,7 +657,11 @@ func TestSignedResponseSignatureRequired(t *testing.T) {
|
||||
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
|
||||
}
|
||||
ctrl := newFakeController(&data)
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &signedProj, "abc123", app.Spec.Source, false, false, nil)
|
||||
sources := make([]argoappv1.ApplicationSource, 0)
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "abc123")
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &signedProj, revisions, sources, false, false, nil, false)
|
||||
assert.NotNil(t, compRes)
|
||||
assert.NotNil(t, compRes.syncStatus)
|
||||
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
|
||||
@@ -622,7 +683,11 @@ func TestSignedResponseSignatureRequired(t *testing.T) {
|
||||
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
|
||||
}
|
||||
ctrl := newFakeController(&data)
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &signedProj, "abc123", app.Spec.Source, false, false, nil)
|
||||
sources := make([]argoappv1.ApplicationSource, 0)
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "abc123")
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &signedProj, revisions, sources, false, false, nil, false)
|
||||
assert.NotNil(t, compRes)
|
||||
assert.NotNil(t, compRes.syncStatus)
|
||||
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
|
||||
@@ -644,7 +709,11 @@ func TestSignedResponseSignatureRequired(t *testing.T) {
|
||||
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
|
||||
}
|
||||
ctrl := newFakeController(&data)
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &signedProj, "abc123", app.Spec.Source, false, false, nil)
|
||||
sources := make([]argoappv1.ApplicationSource, 0)
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "abc123")
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &signedProj, revisions, sources, false, false, nil, false)
|
||||
assert.NotNil(t, compRes)
|
||||
assert.NotNil(t, compRes.syncStatus)
|
||||
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
|
||||
@@ -669,7 +738,11 @@ func TestSignedResponseSignatureRequired(t *testing.T) {
|
||||
ctrl := newFakeController(&data)
|
||||
testProj := signedProj
|
||||
testProj.Spec.SignatureKeys[0].KeyID = "4AEE18F83AFDEB24"
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &testProj, "abc123", app.Spec.Source, false, false, nil)
|
||||
sources := make([]argoappv1.ApplicationSource, 0)
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "abc123")
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &testProj, revisions, sources, false, false, nil, false)
|
||||
assert.NotNil(t, compRes)
|
||||
assert.NotNil(t, compRes.syncStatus)
|
||||
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
|
||||
@@ -694,7 +767,11 @@ func TestSignedResponseSignatureRequired(t *testing.T) {
|
||||
// it doesn't matter for our test whether local manifests are valid
|
||||
localManifests := []string{"foobar"}
|
||||
ctrl := newFakeController(&data)
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &signedProj, "abc123", app.Spec.Source, false, false, localManifests)
|
||||
sources := make([]argoappv1.ApplicationSource, 0)
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "abc123")
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &signedProj, revisions, sources, false, false, localManifests, false)
|
||||
assert.NotNil(t, compRes)
|
||||
assert.NotNil(t, compRes.syncStatus)
|
||||
assert.Equal(t, argoappv1.SyncStatusCodeUnknown, compRes.syncStatus.Status)
|
||||
@@ -719,7 +796,11 @@ func TestSignedResponseSignatureRequired(t *testing.T) {
|
||||
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
|
||||
}
|
||||
ctrl := newFakeController(&data)
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &signedProj, "abc123", app.Spec.Source, false, false, nil)
|
||||
sources := make([]argoappv1.ApplicationSource, 0)
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "abc123")
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &signedProj, revisions, sources, false, false, nil, false)
|
||||
assert.NotNil(t, compRes)
|
||||
assert.NotNil(t, compRes.syncStatus)
|
||||
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
|
||||
@@ -728,7 +809,7 @@ func TestSignedResponseSignatureRequired(t *testing.T) {
|
||||
assert.Len(t, app.Status.Conditions, 0)
|
||||
}
|
||||
|
||||
// Signature required and local manifests supplied and GPG subystem is disabled - sync
|
||||
// Signature required and local manifests supplied and GPG subsystem is disabled - sync
|
||||
{
|
||||
app := newFakeApp()
|
||||
data := fakeData{
|
||||
@@ -744,7 +825,11 @@ func TestSignedResponseSignatureRequired(t *testing.T) {
|
||||
// it doesn't matter for our test whether local manifests are valid
|
||||
localManifests := []string{""}
|
||||
ctrl := newFakeController(&data)
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &signedProj, "abc123", app.Spec.Source, false, false, localManifests)
|
||||
sources := make([]argoappv1.ApplicationSource, 0)
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "abc123")
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &signedProj, revisions, sources, false, false, localManifests, false)
|
||||
assert.NotNil(t, compRes)
|
||||
assert.NotNil(t, compRes.syncStatus)
|
||||
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
|
||||
@@ -852,6 +937,19 @@ func TestIsLiveResourceManaged(t *testing.T) {
|
||||
},
|
||||
},
|
||||
})
|
||||
managedWrongAPIGroup := kube.MustToUnstructured(&networkingv1.Ingress{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "networking.k8s.io/v1",
|
||||
Kind: "Ingress",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "some-ingress",
|
||||
Namespace: "default",
|
||||
Annotations: map[string]string{
|
||||
common.AnnotationKeyAppInstance: "guestbook:extensions/Ingress:default/some-ingress",
|
||||
},
|
||||
},
|
||||
})
|
||||
ctrl := newFakeController(&fakeData{
|
||||
apps: []runtime.Object{app, &defaultProj},
|
||||
manifestResponse: &apiclient.ManifestResponse{
|
||||
@@ -870,30 +968,69 @@ func TestIsLiveResourceManaged(t *testing.T) {
|
||||
})
|
||||
|
||||
manager := ctrl.appStateManager.(*appStateManager)
|
||||
appName := "guestbook"
|
||||
|
||||
// Managed resource w/ annotations
|
||||
assert.True(t, manager.isSelfReferencedObj(managedObj, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel))
|
||||
assert.True(t, manager.isSelfReferencedObj(managedObj, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation))
|
||||
t.Run("will return true if trackingid matches the resource", func(t *testing.T) {
|
||||
// given
|
||||
t.Parallel()
|
||||
configObj := managedObj.DeepCopy()
|
||||
|
||||
// Managed resource w/ label
|
||||
assert.True(t, manager.isSelfReferencedObj(managedObjWithLabel, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel))
|
||||
// then
|
||||
assert.True(t, manager.isSelfReferencedObj(managedObj, configObj, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel))
|
||||
assert.True(t, manager.isSelfReferencedObj(managedObj, configObj, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation))
|
||||
})
|
||||
t.Run("will return true if tracked with label", func(t *testing.T) {
|
||||
// given
|
||||
t.Parallel()
|
||||
configObj := managedObjWithLabel.DeepCopy()
|
||||
|
||||
// Wrong resource name
|
||||
assert.True(t, manager.isSelfReferencedObj(unmanagedObjWrongName, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel))
|
||||
assert.False(t, manager.isSelfReferencedObj(unmanagedObjWrongName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation))
|
||||
// then
|
||||
assert.True(t, manager.isSelfReferencedObj(managedObjWithLabel, configObj, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel))
|
||||
})
|
||||
t.Run("will handle if trackingId has wrong resource name and config is nil", func(t *testing.T) {
|
||||
// given
|
||||
t.Parallel()
|
||||
|
||||
// Wrong resource group
|
||||
assert.True(t, manager.isSelfReferencedObj(unmanagedObjWrongGroup, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel))
|
||||
assert.False(t, manager.isSelfReferencedObj(unmanagedObjWrongGroup, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation))
|
||||
// then
|
||||
assert.True(t, manager.isSelfReferencedObj(unmanagedObjWrongName, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel))
|
||||
assert.False(t, manager.isSelfReferencedObj(unmanagedObjWrongName, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation))
|
||||
})
|
||||
t.Run("will handle if trackingId has wrong resource group and config is nil", func(t *testing.T) {
|
||||
// given
|
||||
t.Parallel()
|
||||
|
||||
// Wrong resource kind
|
||||
assert.True(t, manager.isSelfReferencedObj(unmanagedObjWrongKind, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel))
|
||||
assert.False(t, manager.isSelfReferencedObj(unmanagedObjWrongKind, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation))
|
||||
// then
|
||||
assert.True(t, manager.isSelfReferencedObj(unmanagedObjWrongGroup, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel))
|
||||
assert.False(t, manager.isSelfReferencedObj(unmanagedObjWrongGroup, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation))
|
||||
})
|
||||
t.Run("will handle if trackingId has wrong kind and config is nil", func(t *testing.T) {
|
||||
// given
|
||||
t.Parallel()
|
||||
|
||||
// Wrong resource namespace
|
||||
assert.True(t, manager.isSelfReferencedObj(unmanagedObjWrongNamespace, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel))
|
||||
assert.False(t, manager.isSelfReferencedObj(unmanagedObjWrongNamespace, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotationAndLabel))
|
||||
// then
|
||||
assert.True(t, manager.isSelfReferencedObj(unmanagedObjWrongKind, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel))
|
||||
assert.False(t, manager.isSelfReferencedObj(unmanagedObjWrongKind, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation))
|
||||
})
|
||||
t.Run("will handle if trackingId has wrong namespace and config is nil", func(t *testing.T) {
|
||||
// given
|
||||
t.Parallel()
|
||||
|
||||
// Nil resource
|
||||
assert.True(t, manager.isSelfReferencedObj(nil, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation))
|
||||
// then
|
||||
assert.True(t, manager.isSelfReferencedObj(unmanagedObjWrongNamespace, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel))
|
||||
assert.False(t, manager.isSelfReferencedObj(unmanagedObjWrongNamespace, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotationAndLabel))
|
||||
})
|
||||
t.Run("will return true if live is nil", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
assert.True(t, manager.isSelfReferencedObj(nil, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation))
|
||||
})
|
||||
|
||||
t.Run("will handle upgrade in desired state APIGroup", func(t *testing.T) {
|
||||
// given
|
||||
t.Parallel()
|
||||
config := managedWrongAPIGroup.DeepCopy()
|
||||
delete(config.GetAnnotations(), common.AnnotationKeyAppInstance)
|
||||
|
||||
// then
|
||||
assert.True(t, manager.isSelfReferencedObj(managedWrongAPIGroup, config, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -9,6 +9,8 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
cdcommon "github.com/argoproj/argo-cd/v2/common"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/sync"
|
||||
"github.com/argoproj/gitops-engine/pkg/sync/common"
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
@@ -20,7 +22,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/managedfields"
|
||||
"k8s.io/kubectl/pkg/util/openapi"
|
||||
|
||||
cdcommon "github.com/argoproj/argo-cd/v2/common"
|
||||
"github.com/argoproj/argo-cd/v2/controller/metrics"
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
listersv1alpha1 "github.com/argoproj/argo-cd/v2/pkg/client/listers/application/v1alpha1"
|
||||
@@ -65,6 +66,8 @@ func (m *appStateManager) SyncAppState(app *v1alpha1.Application, state *v1alpha
|
||||
var syncOp v1alpha1.SyncOperation
|
||||
var syncRes *v1alpha1.SyncOperationResult
|
||||
var source v1alpha1.ApplicationSource
|
||||
var sources []v1alpha1.ApplicationSource
|
||||
revisions := make([]string, 0)
|
||||
|
||||
if state.Operation.Sync == nil {
|
||||
state.Phase = common.OperationFailed
|
||||
@@ -82,31 +85,53 @@ func (m *appStateManager) SyncAppState(app *v1alpha1.Application, state *v1alpha
|
||||
return
|
||||
}
|
||||
|
||||
if syncOp.Source == nil {
|
||||
// normal sync case (where source is taken from app.spec.source)
|
||||
source = app.Spec.Source
|
||||
if syncOp.Source == nil || (syncOp.Sources != nil && len(syncOp.Sources) > 0) {
|
||||
// normal sync case (where source is taken from app.spec.sources)
|
||||
if app.Spec.HasMultipleSources() {
|
||||
sources = app.Spec.Sources
|
||||
} else {
|
||||
// normal sync case (where source is taken from app.spec.source)
|
||||
source = app.Spec.GetSource()
|
||||
sources = make([]v1alpha1.ApplicationSource, 0)
|
||||
}
|
||||
} else {
|
||||
// rollback case
|
||||
source = *state.Operation.Sync.Source
|
||||
if app.Spec.HasMultipleSources() {
|
||||
sources = state.Operation.Sync.Sources
|
||||
} else {
|
||||
source = *state.Operation.Sync.Source
|
||||
sources = make([]v1alpha1.ApplicationSource, 0)
|
||||
}
|
||||
}
|
||||
|
||||
if state.SyncResult != nil {
|
||||
syncRes = state.SyncResult
|
||||
revision = state.SyncResult.Revision
|
||||
revisions = append(revisions, state.SyncResult.Revisions...)
|
||||
} else {
|
||||
syncRes = &v1alpha1.SyncOperationResult{}
|
||||
// status.operationState.syncResult.source. must be set properly since auto-sync relies
|
||||
// on this information to decide if it should sync (if source is different than the last
|
||||
// sync attempt)
|
||||
syncRes.Source = source
|
||||
if app.Spec.HasMultipleSources() {
|
||||
syncRes.Sources = sources
|
||||
} else {
|
||||
syncRes.Source = source
|
||||
}
|
||||
state.SyncResult = syncRes
|
||||
}
|
||||
|
||||
if revision == "" {
|
||||
// if we get here, it means we did not remember a commit SHA which we should be syncing to.
|
||||
// This typically indicates we are just about to begin a brand new sync/rollback operation.
|
||||
// Take the value in the requested operation. We will resolve this to a SHA later.
|
||||
revision = syncOp.Revision
|
||||
// if we get here, it means we did not remember a commit SHA which we should be syncing to.
|
||||
// This typically indicates we are just about to begin a brand new sync/rollback operation.
|
||||
// Take the value in the requested operation. We will resolve this to a SHA later.
|
||||
if app.Spec.HasMultipleSources() {
|
||||
if len(revisions) != len(sources) {
|
||||
revisions = syncOp.Revisions
|
||||
}
|
||||
} else {
|
||||
if revision == "" {
|
||||
revision = syncOp.Revision
|
||||
}
|
||||
}
|
||||
|
||||
proj, err := argo.GetAppProject(app, listersv1alpha1.NewAppProjectLister(m.projInformer.GetIndexer()), m.namespace, m.settingsMgr, m.db, context.TODO())
|
||||
@@ -116,10 +141,23 @@ func (m *appStateManager) SyncAppState(app *v1alpha1.Application, state *v1alpha
|
||||
return
|
||||
}
|
||||
|
||||
compareResult := m.CompareAppState(app, proj, revision, source, false, true, syncOp.Manifests)
|
||||
if app.Spec.HasMultipleSources() {
|
||||
revisions = syncRes.Revisions
|
||||
} else {
|
||||
revisions = append(revisions, revision)
|
||||
}
|
||||
|
||||
if !app.Spec.HasMultipleSources() {
|
||||
sources = []v1alpha1.ApplicationSource{source}
|
||||
revisions = []string{revision}
|
||||
}
|
||||
|
||||
compareResult := m.CompareAppState(app, proj, revisions, sources, false, true, syncOp.Manifests, app.Spec.HasMultipleSources())
|
||||
// We now have a concrete commit SHA. Save this in the sync result revision so that we remember
|
||||
// what we should be syncing to when resuming operations.
|
||||
|
||||
syncRes.Revision = compareResult.syncStatus.Revision
|
||||
syncRes.Revisions = compareResult.syncStatus.Revisions
|
||||
|
||||
// If there are any comparison or spec errors error conditions do not perform the operation
|
||||
if errConditions := app.Status.GetConditions(map[v1alpha1.ApplicationConditionType]bool{
|
||||
@@ -212,14 +250,7 @@ func (m *appStateManager) SyncAppState(app *v1alpha1.Application, state *v1alpha
|
||||
}
|
||||
trackingMethod := argo.GetTrackingMethod(m.settingsMgr)
|
||||
|
||||
syncCtx, cleanup, err := sync.NewSyncContext(
|
||||
compareResult.syncStatus.Revision,
|
||||
reconciliationResult,
|
||||
restConfig,
|
||||
rawConfig,
|
||||
m.kubectl,
|
||||
app.Spec.Destination.Namespace,
|
||||
openAPISchema,
|
||||
opts := []sync.SyncOpt{
|
||||
sync.WithLogr(logutils.NewLogrusLogger(logEntry)),
|
||||
sync.WithHealthOverride(lua.ResourceHealthOverrides(resourceOverrides)),
|
||||
sync.WithPermissionValidator(func(un *unstructured.Unstructured, res *v1.APIResource) error {
|
||||
@@ -246,16 +277,9 @@ func (m *appStateManager) SyncAppState(app *v1alpha1.Application, state *v1alpha
|
||||
sync.WithResourcesFilter(func(key kube.ResourceKey, target *unstructured.Unstructured, live *unstructured.Unstructured) bool {
|
||||
return (len(syncOp.Resources) == 0 ||
|
||||
argo.ContainsSyncResource(key.Name, key.Namespace, schema.GroupVersionKind{Kind: key.Kind, Group: key.Group}, syncOp.Resources)) &&
|
||||
m.isSelfReferencedObj(live, appLabelKey, trackingMethod)
|
||||
m.isSelfReferencedObj(live, target, app.GetName(), appLabelKey, trackingMethod)
|
||||
}),
|
||||
sync.WithManifestValidation(!syncOp.SyncOptions.HasOption(common.SyncOptionsDisableValidation)),
|
||||
sync.WithNamespaceCreation(syncOp.SyncOptions.HasOption("CreateNamespace=true"), func(un *unstructured.Unstructured) bool {
|
||||
if un != nil && kube.GetAppInstanceLabel(un, cdcommon.LabelKeyAppInstance) != "" {
|
||||
kube.UnsetLabel(un, cdcommon.LabelKeyAppInstance)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}),
|
||||
sync.WithSyncWaveHook(delayBetweenSyncWaves),
|
||||
sync.WithPruneLast(syncOp.SyncOptions.HasOption(common.SyncOptionPruneLast)),
|
||||
sync.WithResourceModificationChecker(syncOp.SyncOptions.HasOption("ApplyOutOfSyncOnly=true"), compareResult.diffResultList),
|
||||
@@ -263,6 +287,21 @@ func (m *appStateManager) SyncAppState(app *v1alpha1.Application, state *v1alpha
|
||||
sync.WithReplace(syncOp.SyncOptions.HasOption(common.SyncOptionReplace)),
|
||||
sync.WithServerSideApply(syncOp.SyncOptions.HasOption(common.SyncOptionServerSideApply)),
|
||||
sync.WithServerSideApplyManager(cdcommon.ArgoCDSSAManager),
|
||||
}
|
||||
|
||||
if syncOp.SyncOptions.HasOption("CreateNamespace=true") {
|
||||
opts = append(opts, sync.WithNamespaceModifier(syncNamespace(m.resourceTracking, appLabelKey, trackingMethod, app.Name, app.Spec.SyncPolicy)))
|
||||
}
|
||||
|
||||
syncCtx, cleanup, err := sync.NewSyncContext(
|
||||
compareResult.syncStatus.Revision,
|
||||
reconciliationResult,
|
||||
restConfig,
|
||||
rawConfig,
|
||||
m.kubectl,
|
||||
app.Spec.Destination.Namespace,
|
||||
openAPISchema,
|
||||
opts...,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
@@ -301,7 +340,7 @@ func (m *appStateManager) SyncAppState(app *v1alpha1.Application, state *v1alpha
|
||||
logEntry.WithField("duration", time.Since(start)).Info("sync/terminate complete")
|
||||
|
||||
if !syncOp.DryRun && len(syncOp.Resources) == 0 && state.Phase.Successful() {
|
||||
err := m.persistRevisionHistory(app, compareResult.syncStatus.Revision, source, state.StartedAt)
|
||||
err := m.persistRevisionHistory(app, compareResult.syncStatus.Revision, source, compareResult.syncStatus.Revisions, compareResult.syncStatus.ComparedTo.Sources, app.Spec.HasMultipleSources(), state.StartedAt)
|
||||
if err != nil {
|
||||
state.Phase = common.OperationError
|
||||
state.Message = fmt.Sprintf("failed to record sync to history: %v", err)
|
||||
|
||||
55
controller/sync_namespace.go
Normal file
55
controller/sync_namespace.go
Normal file
@@ -0,0 +1,55 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo"
|
||||
gitopscommon "github.com/argoproj/gitops-engine/pkg/sync/common"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
)
|
||||
|
||||
// syncNamespace determine if Argo CD should create and/or manage the namespace
|
||||
// where the application will be deployed.
|
||||
func syncNamespace(resourceTracking argo.ResourceTracking, appLabelKey string, trackingMethod v1alpha1.TrackingMethod, appName string, syncPolicy *v1alpha1.SyncPolicy) func(m, l *unstructured.Unstructured) (bool, error) {
|
||||
// This function must return true for the managed namespace to be synced.
|
||||
return func(managedNs, liveNs *unstructured.Unstructured) (bool, error) {
|
||||
if managedNs == nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
isNewNamespace := liveNs == nil
|
||||
isManagedNamespace := syncPolicy != nil && syncPolicy.ManagedNamespaceMetadata != nil
|
||||
|
||||
// should only sync the namespace if it doesn't exist in k8s or if
|
||||
// syncPolicy is defined to manage the metadata
|
||||
if !isManagedNamespace && !isNewNamespace {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if isManagedNamespace {
|
||||
managedNamespaceMetadata := syncPolicy.ManagedNamespaceMetadata
|
||||
managedNs.SetLabels(managedNamespaceMetadata.Labels)
|
||||
// managedNamespaceMetadata relies on SSA in order to avoid overriding
|
||||
// existing labels and annotations in namespaces
|
||||
managedNs.SetAnnotations(appendSSAAnnotation(managedNamespaceMetadata.Annotations))
|
||||
}
|
||||
|
||||
// TODO: https://github.com/argoproj/argo-cd/issues/11196
|
||||
// err := resourceTracking.SetAppInstance(managedNs, appLabelKey, appName, "", trackingMethod)
|
||||
// if err != nil {
|
||||
// return false, fmt.Errorf("failed to set app instance tracking on the namespace %s: %s", managedNs.GetName(), err)
|
||||
// }
|
||||
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
// appendSSAAnnotation will set the managed namespace to be synced
|
||||
// with server-side apply
|
||||
func appendSSAAnnotation(in map[string]string) map[string]string {
|
||||
r := map[string]string{}
|
||||
for k, v := range in {
|
||||
r[k] = v
|
||||
}
|
||||
r[gitopscommon.AnnotationSyncOptions] = gitopscommon.SyncOptionServerSideApply
|
||||
return r
|
||||
}
|
||||
261
controller/sync_namespace_test.go
Normal file
261
controller/sync_namespace_test.go
Normal file
@@ -0,0 +1,261 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"github.com/argoproj/argo-cd/v2/common"
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func createFakeNamespace(uid string, resourceVersion string, labels map[string]string, annotations map[string]string) *unstructured.Unstructured {
|
||||
un := unstructured.Unstructured{}
|
||||
un.SetUID(types.UID(uid))
|
||||
un.SetResourceVersion(resourceVersion)
|
||||
un.SetLabels(labels)
|
||||
un.SetAnnotations(annotations)
|
||||
un.SetKind("Namespace")
|
||||
un.SetName("some-namespace")
|
||||
return &un
|
||||
}
|
||||
|
||||
func Test_shouldNamespaceSync(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
syncPolicy *v1alpha1.SyncPolicy
|
||||
managedNs *unstructured.Unstructured
|
||||
liveNs *unstructured.Unstructured
|
||||
expected bool
|
||||
expectedLabels map[string]string
|
||||
expectedAnnotations map[string]string
|
||||
}{
|
||||
{
|
||||
name: "liveNs is nil and syncPolicy is nil",
|
||||
expected: false,
|
||||
managedNs: nil,
|
||||
liveNs: nil,
|
||||
syncPolicy: nil,
|
||||
},
|
||||
{
|
||||
name: "liveNs is nil and syncPolicy is not nil",
|
||||
expected: false,
|
||||
managedNs: nil,
|
||||
liveNs: nil,
|
||||
syncPolicy: &v1alpha1.SyncPolicy{
|
||||
ManagedNamespaceMetadata: nil,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "liveNs is nil and syncPolicy has labels and annotations",
|
||||
expected: false,
|
||||
managedNs: nil,
|
||||
liveNs: nil,
|
||||
expectedLabels: map[string]string{"my-cool-label": "some-value"},
|
||||
expectedAnnotations: map[string]string{"my-cool-annotation": "some-value"},
|
||||
syncPolicy: &v1alpha1.SyncPolicy{
|
||||
ManagedNamespaceMetadata: &v1alpha1.ManagedNamespaceMetadata{
|
||||
Labels: map[string]string{"my-cool-label": "some-value"},
|
||||
Annotations: map[string]string{"my-cool-annotation": "some-value"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "namespace does not yet exist and managedNamespaceMetadata nil",
|
||||
expected: true,
|
||||
expectedLabels: map[string]string{},
|
||||
expectedAnnotations: map[string]string{},
|
||||
managedNs: createFakeNamespace("", "", map[string]string{}, map[string]string{}),
|
||||
liveNs: nil,
|
||||
syncPolicy: &v1alpha1.SyncPolicy{
|
||||
ManagedNamespaceMetadata: nil,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "namespace does not yet exist and managedNamespaceMetadata not nil",
|
||||
expected: true,
|
||||
expectedAnnotations: map[string]string{"argocd.argoproj.io/sync-options": "ServerSideApply=true"},
|
||||
managedNs: createFakeNamespace("", "", map[string]string{}, map[string]string{}),
|
||||
liveNs: nil,
|
||||
syncPolicy: &v1alpha1.SyncPolicy{
|
||||
ManagedNamespaceMetadata: &v1alpha1.ManagedNamespaceMetadata{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "namespace does not yet exist and managedNamespaceMetadata has empty labels map",
|
||||
expected: true,
|
||||
expectedLabels: map[string]string{},
|
||||
expectedAnnotations: map[string]string{"argocd.argoproj.io/sync-options": "ServerSideApply=true"},
|
||||
managedNs: createFakeNamespace("", "", map[string]string{}, map[string]string{}),
|
||||
liveNs: nil,
|
||||
syncPolicy: &v1alpha1.SyncPolicy{
|
||||
ManagedNamespaceMetadata: &v1alpha1.ManagedNamespaceMetadata{
|
||||
Labels: map[string]string{},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "namespace does not yet exist and managedNamespaceMetadata has empty annotations map",
|
||||
expected: true,
|
||||
expectedAnnotations: map[string]string{"argocd.argoproj.io/sync-options": "ServerSideApply=true"},
|
||||
managedNs: createFakeNamespace("", "", map[string]string{}, map[string]string{}),
|
||||
liveNs: nil,
|
||||
syncPolicy: &v1alpha1.SyncPolicy{
|
||||
ManagedNamespaceMetadata: &v1alpha1.ManagedNamespaceMetadata{
|
||||
Annotations: map[string]string{},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "namespace does not yet exist and managedNamespaceMetadata has empty annotations and labels map",
|
||||
expected: true,
|
||||
expectedLabels: map[string]string{},
|
||||
expectedAnnotations: map[string]string{"argocd.argoproj.io/sync-options": "ServerSideApply=true"},
|
||||
managedNs: createFakeNamespace("", "", map[string]string{}, map[string]string{}),
|
||||
liveNs: nil,
|
||||
syncPolicy: &v1alpha1.SyncPolicy{
|
||||
ManagedNamespaceMetadata: &v1alpha1.ManagedNamespaceMetadata{
|
||||
Labels: map[string]string{},
|
||||
Annotations: map[string]string{},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "namespace does not yet exist and managedNamespaceMetadata has labels",
|
||||
expected: true,
|
||||
expectedLabels: map[string]string{"my-cool-label": "some-value"},
|
||||
expectedAnnotations: map[string]string{"argocd.argoproj.io/sync-options": "ServerSideApply=true"},
|
||||
managedNs: createFakeNamespace("", "", map[string]string{}, map[string]string{}),
|
||||
liveNs: nil,
|
||||
syncPolicy: &v1alpha1.SyncPolicy{
|
||||
ManagedNamespaceMetadata: &v1alpha1.ManagedNamespaceMetadata{
|
||||
Labels: map[string]string{"my-cool-label": "some-value"},
|
||||
Annotations: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "namespace does not yet exist and managedNamespaceMetadata has annotations",
|
||||
expected: true,
|
||||
expectedAnnotations: map[string]string{"my-cool-annotation": "some-value", "argocd.argoproj.io/sync-options": "ServerSideApply=true"},
|
||||
managedNs: createFakeNamespace("", "", map[string]string{}, map[string]string{}),
|
||||
liveNs: nil,
|
||||
syncPolicy: &v1alpha1.SyncPolicy{
|
||||
ManagedNamespaceMetadata: &v1alpha1.ManagedNamespaceMetadata{
|
||||
Labels: nil,
|
||||
Annotations: map[string]string{"my-cool-annotation": "some-value"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "namespace does not yet exist and managedNamespaceMetadata has annotations and labels",
|
||||
expected: true,
|
||||
expectedLabels: map[string]string{"my-cool-label": "some-value"},
|
||||
expectedAnnotations: map[string]string{"my-cool-annotation": "some-value", "argocd.argoproj.io/sync-options": "ServerSideApply=true"},
|
||||
managedNs: createFakeNamespace("", "", map[string]string{}, map[string]string{}),
|
||||
liveNs: nil,
|
||||
syncPolicy: &v1alpha1.SyncPolicy{
|
||||
ManagedNamespaceMetadata: &v1alpha1.ManagedNamespaceMetadata{
|
||||
Labels: map[string]string{"my-cool-label": "some-value"},
|
||||
Annotations: map[string]string{"my-cool-annotation": "some-value"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "namespace exists with no labels or annotations and managedNamespaceMetadata has labels",
|
||||
expected: true,
|
||||
expectedLabels: map[string]string{"my-cool-label": "some-value"},
|
||||
expectedAnnotations: map[string]string{"argocd.argoproj.io/sync-options": "ServerSideApply=true"},
|
||||
managedNs: createFakeNamespace("", "", map[string]string{}, map[string]string{}),
|
||||
liveNs: createFakeNamespace("something", "1", map[string]string{}, map[string]string{}),
|
||||
syncPolicy: &v1alpha1.SyncPolicy{
|
||||
ManagedNamespaceMetadata: &v1alpha1.ManagedNamespaceMetadata{
|
||||
Labels: map[string]string{"my-cool-label": "some-value"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "namespace exists with no labels or annotations and managedNamespaceMetadata has annotations",
|
||||
expected: true,
|
||||
expectedAnnotations: map[string]string{"my-cool-annotation": "some-value", "argocd.argoproj.io/sync-options": "ServerSideApply=true"},
|
||||
managedNs: createFakeNamespace("", "", map[string]string{}, map[string]string{}),
|
||||
liveNs: createFakeNamespace("something", "1", map[string]string{}, map[string]string{}),
|
||||
syncPolicy: &v1alpha1.SyncPolicy{
|
||||
ManagedNamespaceMetadata: &v1alpha1.ManagedNamespaceMetadata{
|
||||
Annotations: map[string]string{"my-cool-annotation": "some-value"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "namespace exists with no labels or annotations and managedNamespaceMetadata has annotations and labels",
|
||||
expected: true,
|
||||
expectedLabels: map[string]string{"my-cool-label": "some-value"},
|
||||
expectedAnnotations: map[string]string{"my-cool-annotation": "some-value", "argocd.argoproj.io/sync-options": "ServerSideApply=true"},
|
||||
managedNs: createFakeNamespace("", "", map[string]string{}, map[string]string{}),
|
||||
liveNs: createFakeNamespace("something", "1", map[string]string{}, map[string]string{}),
|
||||
syncPolicy: &v1alpha1.SyncPolicy{
|
||||
ManagedNamespaceMetadata: &v1alpha1.ManagedNamespaceMetadata{
|
||||
Labels: map[string]string{"my-cool-label": "some-value"},
|
||||
Annotations: map[string]string{"my-cool-annotation": "some-value"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "namespace exists with labels and managedNamespaceMetadata has mismatching labels",
|
||||
expected: true,
|
||||
expectedAnnotations: map[string]string{"argocd.argoproj.io/sync-options": "ServerSideApply=true"},
|
||||
expectedLabels: map[string]string{"my-cool-label": "some-value", "my-other-label": "some-other-value"},
|
||||
managedNs: createFakeNamespace("", "", map[string]string{}, map[string]string{}),
|
||||
liveNs: createFakeNamespace("something", "1", map[string]string{"my-cool-label": "some-value"}, map[string]string{}),
|
||||
syncPolicy: &v1alpha1.SyncPolicy{
|
||||
ManagedNamespaceMetadata: &v1alpha1.ManagedNamespaceMetadata{
|
||||
Labels: map[string]string{"my-cool-label": "some-value", "my-other-label": "some-other-value"},
|
||||
Annotations: map[string]string{},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "namespace exists with annotations and managedNamespaceMetadata has mismatching annotations",
|
||||
expected: true,
|
||||
expectedLabels: map[string]string{},
|
||||
expectedAnnotations: map[string]string{"my-cool-annotation": "some-value", "argocd.argoproj.io/sync-options": "ServerSideApply=true"},
|
||||
managedNs: createFakeNamespace("", "", map[string]string{}, map[string]string{}),
|
||||
liveNs: createFakeNamespace("something", "1", map[string]string{}, map[string]string{"my-cool-annotation": "some-value", "my-other-annotation": "some-other-value"}),
|
||||
syncPolicy: &v1alpha1.SyncPolicy{
|
||||
ManagedNamespaceMetadata: &v1alpha1.ManagedNamespaceMetadata{
|
||||
Labels: map[string]string{},
|
||||
Annotations: map[string]string{"my-cool-annotation": "some-value"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "namespace exists with annotations and labels managedNamespaceMetadata has mismatching annotations and labels",
|
||||
expected: true,
|
||||
expectedLabels: map[string]string{"my-cool-label": "some-value", "my-other-label": "some-other-value"},
|
||||
expectedAnnotations: map[string]string{"my-cool-annotation": "some-value", "my-other-annotation": "some-other-value", "argocd.argoproj.io/sync-options": "ServerSideApply=true"},
|
||||
managedNs: createFakeNamespace("", "", map[string]string{}, map[string]string{}),
|
||||
liveNs: createFakeNamespace("something", "1", map[string]string{"my-cool-label": "some-value"}, map[string]string{"my-cool-annotation": "some-value"}),
|
||||
syncPolicy: &v1alpha1.SyncPolicy{
|
||||
ManagedNamespaceMetadata: &v1alpha1.ManagedNamespaceMetadata{
|
||||
Labels: map[string]string{"my-cool-label": "some-value", "my-other-label": "some-other-value"},
|
||||
Annotations: map[string]string{"my-cool-annotation": "some-value", "my-other-annotation": "some-other-value"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
actual, err := syncNamespace(argo.NewResourceTracking(), common.LabelKeyAppInstance, argo.TrackingMethodAnnotation, "some-app", tt.syncPolicy)(tt.managedNs, tt.liveNs)
|
||||
assert.NoError(t, err)
|
||||
|
||||
if tt.managedNs != nil {
|
||||
assert.Equal(t, tt.expectedLabels, tt.managedNs.GetLabels())
|
||||
assert.Equal(t, tt.expectedAnnotations, tt.managedNs.GetAnnotations())
|
||||
}
|
||||
|
||||
assert.Equalf(t, tt.expected, actual, "syncNamespace(%v)", tt.syncPolicy)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -50,12 +50,12 @@ func TestPersistRevisionHistory(t *testing.T) {
|
||||
}}
|
||||
ctrl.appStateManager.SyncAppState(app, opState)
|
||||
// Ensure we record spec.source into sync result
|
||||
assert.Equal(t, app.Spec.Source, opState.SyncResult.Source)
|
||||
assert.Equal(t, app.Spec.GetSource(), opState.SyncResult.Source)
|
||||
|
||||
updatedApp, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(app.Namespace).Get(context.Background(), app.Name, v1.GetOptions{})
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 1, len(updatedApp.Status.History))
|
||||
assert.Equal(t, app.Spec.Source, updatedApp.Status.History[0].Source)
|
||||
assert.Equal(t, app.Spec.GetSource(), updatedApp.Status.History[0].Source)
|
||||
assert.Equal(t, "abc123", updatedApp.Status.History[0].Revision)
|
||||
}
|
||||
|
||||
|
||||
BIN
docs/assets/repo-add-google-cloud-source.png
Normal file
BIN
docs/assets/repo-add-google-cloud-source.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 107 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 60 KiB After Width: | Height: | Size: 55 KiB |
@@ -1,15 +0,0 @@
|
||||
Money given to the Argo CD project as part of the Internet Bug Bounty program is used in three ways:
|
||||
|
||||
1. To reward CVE patch contributors
|
||||
2. To offer bounties on security enhancements (as announced by label/comment on Issues)
|
||||
3. To sponsor security-relevant dependencies
|
||||
|
||||
If someone’s primary full-time job responsibility is to work on Argo CD, then their eligibility to receive this money is limited. (Determining this is up to the maintainers’ discretion. Someone who contributes an average of three commits per week during work hours probably meets the definition. A first-time contributor who uses Argo CD daily as an SRE does not.)
|
||||
|
||||
A full-time Argo CD author is not eligible to receive rewards for CVE patch contributions. This avoids any risk of the appearance that a full-time Argo CD author is incentivized to introduce CVEs.
|
||||
|
||||
A full-time Argo CD author is eligible to receive bounties for security enhancements if and only if the vast majority of the work is done in their free time (non-work hours). Busy work like resolving merge conflicts during work hours is acceptable (to avoid over-burdening the process).
|
||||
|
||||
An Argo CD dependency is eligible to receive donations if it is listed in the Argo CD SBOM or if it is a binary invoked by Argo CD (like Helm). The dependency is not eligible for donations if a full-time Argo CD author is the primary author of the dependency.
|
||||
|
||||
Offers and transfers of rewards, bounties, and donations will be made from time to time by the Argo CD maintainers, based on the current project needs and the amount of money available from IBB. The process should be lightweight and consensus-based for now. If necessary, a more structured system can be established based on experience gained from early rewards/bounties/donations
|
||||
@@ -65,12 +65,12 @@ make builder-image IMAGE_NAMESPACE=argoproj IMAGE_TAG=v1.0.0
|
||||
|
||||
## Public CD
|
||||
|
||||
Every commit to master is built and published to `ghcr.io/argoproj/argocd:<version>-<short-sha>`. The list of images is available at
|
||||
Every commit to master is built and published to `ghcr.io/argoproj/argo-cd/argocd:<version>-<short-sha>`. The list of images is available at
|
||||
https://github.com/argoproj/argo-cd/packages.
|
||||
|
||||
!!! note
|
||||
GitHub docker registry [requires](https://github.community/t5/GitHub-Actions/docker-pull-from-public-GitHub-Package-Registry-fail-with-quot/m-p/32888#M1294) authentication to read
|
||||
even publicly available packages. Follow the steps from Kubernetes [documentation](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry)
|
||||
to configure image pull secret if you want to use `ghcr.io/argoproj/argocd` image.
|
||||
to configure image pull secret if you want to use `ghcr.io/argoproj/argo-cd/argocd` image.
|
||||
|
||||
The image is automatically deployed to the dev Argo CD instance: [https://cd.apps.argoproj.io/](https://cd.apps.argoproj.io/)
|
||||
|
||||
@@ -103,7 +103,7 @@ Design documents are usually submitted as PR and use [this template](https://git
|
||||
|
||||
Our community regularly meets virtually to discuss issues, ideas and enhancements around Argo CD. We do invite you to join this virtual meetings if you want to bring up certain things (including your enhancement proposals), participate in our triaging or just want to get to know other contributors.
|
||||
|
||||
The current cadence of our meetings is weekly, every Thursday at 4pm UTC (9am Pacific, 12pm Eastern, 6pm Central European, 9:30pm Indian). We use Zoom to conduct these meetings.
|
||||
The current cadence of our meetings is weekly, every Thursday at 4:15pm UTC (8:15am Pacific, 11:15am Eastern, 5:15pm Central European, 9:45pm Indian). We use Zoom to conduct these meetings.
|
||||
|
||||
* [Agenda document (Google Docs, includes Zoom link)](https://docs.google.com/document/d/1xkoFkVviB70YBzSEa4bDnu-rUZ1sIFtwKKG1Uw8XsY8)
|
||||
|
||||
|
||||
112
docs/developer-guide/contributors-quickstart.md
Normal file
112
docs/developer-guide/contributors-quickstart.md
Normal file
@@ -0,0 +1,112 @@
|
||||
# Contributors Quick-Start
|
||||
|
||||
This guide is a starting point for first-time contributors running Argo CD locally for the first time.
|
||||
|
||||
It skips advanced topics such as codegen, which are covered in the [running locally guide](running-locally.md)
|
||||
and the [toolchain guide](toolchain-guide.md).
|
||||
|
||||
## Getting Started
|
||||
|
||||
### Install Go
|
||||
|
||||
- Install version 1.18 or newer (Verify version by running `go version`)
|
||||
|
||||
- Get current value of `GOPATH` env:
|
||||
```shell
|
||||
go env | grep path
|
||||
```
|
||||
- Change directory into that path
|
||||
```shell
|
||||
cd <path>
|
||||
```
|
||||
|
||||
### Clone the Argo CD repo
|
||||
|
||||
```shell
|
||||
mkdir -p src/github.com/argoproj/ &&
|
||||
cd src/github.com/argoproj &&
|
||||
git clone https://github.com/argoproj/argo-cd.git
|
||||
```
|
||||
|
||||
### Install Docker
|
||||
|
||||
<https://docs.docker.com/engine/install/>
|
||||
|
||||
### Install or Upgrade `kind` (Optional - Should work with any local cluster)
|
||||
|
||||
<https://kind.sigs.k8s.io/docs/user/quick-start/>
|
||||
|
||||
### Start Your Local Cluster
|
||||
|
||||
```shell
|
||||
kind create cluster
|
||||
```
|
||||
|
||||
### Install Argo CD
|
||||
|
||||
```shell
|
||||
kubectl create namespace argocd &&
|
||||
kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/master/manifests/install.yaml
|
||||
```
|
||||
|
||||
Set kubectl config to avoid specifying the namespace in every kubectl command.
|
||||
All following commands in this guide assume the namespace is already set.
|
||||
|
||||
```shell
|
||||
kubectl config set-context --current --namespace=argocd
|
||||
```
|
||||
|
||||
### Install `yarn`
|
||||
|
||||
<https://classic.yarnpkg.com/lang/en/docs/install/>
|
||||
|
||||
### Install `goreman`
|
||||
|
||||
<https://github.com/mattn/goreman#getting-started>
|
||||
|
||||
### Run Argo CD
|
||||
|
||||
```shell
|
||||
cd argo-cd
|
||||
make start-local ARGOCD_GPG_ENABLED=false
|
||||
```
|
||||
|
||||
- Navigate to <localhost:4000> to the ArgoCD UI on browser
|
||||
- It may take a few minutes for the UI to be responsive
|
||||
|
||||
!!! note
|
||||
If the UI is not working, check the logs from `make start-local`. The logs are `DEBUG` level by default. If the logs are
|
||||
too noisy to find the problem, try editing log levels for the commands in the `Procfile` in the root of the Argo CD repo.
|
||||
|
||||
## Making Changes
|
||||
|
||||
### UI Changes
|
||||
|
||||
Modifying the User-Interface (by editing .tsx or .scss files) auto-reloads the changes on port 4000.
|
||||
|
||||
### Backend Changes
|
||||
|
||||
Modifying the API server, repo server, or a controller requires restarting the current `make start-local` session to reflect the changes.
|
||||
|
||||
### CLI Changes
|
||||
|
||||
Modifying the CLI requires restarting the current `make start-local` session to reflect the changes.
|
||||
|
||||
To test most CLI commands, you will need to log in.
|
||||
|
||||
First, get the auto-generated secret:
|
||||
|
||||
```shell
|
||||
kubectl -n argocd get secret argocd-initial-admin-secret -o jsonpath="{.data.password}" | base64 -d; echo
|
||||
```
|
||||
|
||||
Then log in using that password and username `admin`:
|
||||
|
||||
```shell
|
||||
dist/argocd login localhost:8080
|
||||
```
|
||||
|
||||
---
|
||||
Congrats on making it to the end of this runbook! 🚀
|
||||
|
||||
For more on Argo CD, find us in Slack - <https://slack.cncf.io/> [#argo-contributors](https://cloud-native.slack.com/archives/C020XM04CUW)
|
||||
@@ -20,13 +20,15 @@ curl -sSfL https://raw.githubusercontent.com/argoproj/argo-cd/stable/manifests/i
|
||||
## Connect
|
||||
Connect to one of the services, for example, to debug the main ArgoCD server run:
|
||||
```shell
|
||||
kubectl config set-context --current --namespace argocd
|
||||
telepresence helm install # Installs telepresence into your cluster
|
||||
telepresence connect # Starts the connection to your cluster
|
||||
telepresence intercept argocd-server --port 8083:8083 --port 8080:8080 --env-file .envrc.remote --namespace argocd # Starts the interception
|
||||
telepresence connect # Starts the connection to your cluster (bound to the current namespace)
|
||||
telepresence intercept argocd-server --port 8080:http --env-file .envrc.remote # Starts the interception
|
||||
```
|
||||
* `--port` forwards traffic of remote ports 8080 and 8083 to the same ports locally
|
||||
* `--port` forwards traffic of remote port http to 8080 locally (use `--port 8080:https` if argocd-server terminates TLS)
|
||||
* `--env-file` writes all the environment variables of the remote pod into a local file, the variables are also set on the subprocess of the `--run` command
|
||||
* `--namespace` specifies that the `argocd-server` is located in the `argocd` namespace
|
||||
|
||||
With this, any traffic that hits your argocd-server service in the cluster (e.g. through a LB / ingress) will be forwarded to your laptop on port 8080. So that you can now start argocd-server locally to debug or test new code. If you launch argocd-server using the environment variables in `.envrc.remote`, it is able to fetch all the configmaps, secrets and so on from the cluster and transparently connect to the other microservices so that no further configuration should be necessary, and it behaves exactly the same as in the cluster.
|
||||
|
||||
List current status of Telepresence using:
|
||||
```shell
|
||||
@@ -63,11 +65,11 @@ Once a connection is established, use your favorite tools to start the server lo
|
||||
* Run `./dist/argocd-server`
|
||||
|
||||
### VSCode
|
||||
In VSCode use the integrated terminal to run the Telepresence command to connect. Then, to run argocd-server service use the following configuration.
|
||||
Update the configuration file to point to kubeconfig file: `KUBECONFIG=` (required)
|
||||
In VSCode use the following launch configuration to run argocd-server:
|
||||
|
||||
```json
|
||||
{
|
||||
"name": "Launch",
|
||||
"name": "Launch argocd-server",
|
||||
"type": "go",
|
||||
"request": "launch",
|
||||
"mode": "auto",
|
||||
@@ -82,3 +84,4 @@ Update the configuration file to point to kubeconfig file: `KUBECONFIG=` (requir
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
@@ -1,49 +1,78 @@
|
||||
# Release Process And Cadence
|
||||
|
||||
Argo CD is being developed using the following process:
|
||||
## Release Cycle
|
||||
|
||||
* Maintainers commit to work on set of features and enhancements and create GitHub milestone to track the work.
|
||||
* We are trying to avoid delaying release and prefer moving the feature into the next release if we cannot complete it on time.
|
||||
* The new release is published every **3 months**.
|
||||
* Critical bug-fixes are cherry-picked into the release branch and delivered using patch releases as frequently as needed.
|
||||
### Schedule
|
||||
|
||||
## Release Planning
|
||||
These are the upcoming releases dates:
|
||||
|
||||
We are using GitHub milestones to perform release planning and tracking. Each release milestone includes two type of issues:
|
||||
| Release | Release Planning Meeting | Release Candidate 1 | General Availability | Release Champion | Checklist |
|
||||
|---------|--------------------------|-----------------------|----------------------|-------------------------------------------------------|---------------------------------------------------------------|
|
||||
| v2.6 | Monday, Dec. 12, 2022 | Monday, Dec. 19, 2022 | Monday, Feb. 6, 2023 | [William Tam](https://github.com/wtam2018) | [checklist](https://github.com/argoproj/argo-cd/issues/11563) |
|
||||
| v2.7 | Monday, Mar. 6, 2023 | Monday, Mar. 20, 2023 | Monday, May. 1, 2023 | [Pavel Kostohrys](https://github.com/pasha-codefresh) |
|
||||
| v2.8 | Monday, Jun. 5, 2023 | Monday, Jun. 19, 2023 | Monday, Aug. 7, 2023 | [Keith Chong](https://github.keithchong)
|
||||
| v2.9 | Monday, Sep. 4, 2023 | Monday, Sep. 18, 2023 | Monday, Nov. 6, 2023 |
|
||||
|
||||
* Issues that maintainers committed to working on. Maintainers decide which features they are committing to work on during the next release based on
|
||||
their availability. Typically issues added offline by each maintainer and finalized during the contributors' meeting. Each such issue should be
|
||||
assigned to maintainer who plans to implement and test it.
|
||||
* Nice to have improvements contributed by community contributors. Nice to have issues are typically not critical, smallish enhancements that could
|
||||
be contributed by community contributors. Maintainers are not committing to implement them but committing to review PR from the community.
|
||||
Actual release dates might differ from the plan by a few days.
|
||||
|
||||
The milestone should have a clear description of the most important features as well as the expected end date. This should provide clarity to end-users
|
||||
about what to expect from the next release and when.
|
||||
### Release Process
|
||||
|
||||
In addition to the next milestone, we need to maintain a draft of the upcoming release milestone.
|
||||
#### Minor Releases (e.g. 2.x.0)
|
||||
|
||||
## Community Contributions
|
||||
A minor Argo CD release occurs four times a year, once every three months. Each General Availability (GA) release is
|
||||
preceded by several Release Candidates (RCs). The first RC is released three weeks before the scheduled GA date. This
|
||||
effectively means that there is a three-week feature freeze.
|
||||
|
||||
We receive a lot of contributions from our awesome community, and we're very grateful for that fact. However, reviewing and testing PRs is a lot of (unplanned) work and therefore, we cannot guarantee that contributions (especially large or complex ones) made by the community receive a timely review within a release's time frame. Maintainers may decide on their own to put work on a PR together with the contributor and in this case, the maintainer will self-assigned the PR and thereby committing to review, eventually merge and later test it on the release scope.
|
||||
These are the approximate release dates:
|
||||
|
||||
## Release Testing
|
||||
* The first Monday of February
|
||||
* The first Monday of May
|
||||
* The first Monday of August
|
||||
* The first Monday of November
|
||||
|
||||
We need to make sure that each change, both from maintainers and community contributors, is tested well and have someone who is going to fix last-minute
|
||||
bugs. In order to ensure it, each merged pull request must have an assigned maintainer before it gets merged. The assigned maintainer will be working on
|
||||
testing the introduced changes and fixing of any introduced bugs.
|
||||
Dates may be shifted slightly to accommodate holidays. Those shifts should be minimal.
|
||||
|
||||
We have a code freeze period two weeks before the release until the release branch is created. During code freeze no feature PR should be merged and it is ok
|
||||
to merge bug fixes.
|
||||
#### Patch Releases (e.g. 2.5.x)
|
||||
|
||||
Maintainers assigned to a PR that's been merged should drive testing and work on fixing last-minute issues. For tracking purposes after verifying PR the assigned
|
||||
the maintainer should label it with a `verified` label.
|
||||
Argo CD patch releases occur on an as-needed basis. Only the three most recent minor versions are eligible for patch
|
||||
releases. Versions older than the three most recent minor versions are considered EOL and will not receive bug fixes or
|
||||
security updates.
|
||||
|
||||
## Releasing
|
||||
#### Minor Release Planning Meeting
|
||||
|
||||
The releasing procedure is described in [releasing](./releasing.md) document. Before closing the release milestone following should be verified:
|
||||
Roughly two weeks before the RC date, there will be a meeting to discuss which features are planned for the RC. This meeting is
|
||||
for contributors to advocate for certain features. Features which have at least one approver (besides the contributor)
|
||||
who can assure they will review/merge by the RC date will be included in the release milestone. All other features will
|
||||
be dropped from the milestone (and potentially shifted to the next one).
|
||||
|
||||
- [ ] All merged PRs and verified (verify and remove `needs-verification` label):
|
||||
- [ ] Triage issues reported by `yarn audit` and ensure there are no exploitable security issues.
|
||||
- [ ] Roadmap is updated based one current release changes
|
||||
- [ ] Next release milestone is created
|
||||
- [ ] Upcoming release milestone is updated
|
||||
Since not everyone will be able to attend the meeting, there will be a meeting doc. Contributors can add their feature
|
||||
to a table, and Approvers can add their name to the table. Features with a corresponding approver will remain in the
|
||||
release milestone.
|
||||
|
||||
#### Release Champion
|
||||
|
||||
To help manage all the steps involved in a release, we will have a Release Champion. The Release Champion will be
|
||||
responsible for a checklist of items for their release. The checklist is an issue template in the Argo CD repository.
|
||||
|
||||
The Release Champion can be anyone in the Argo CD community. Some tasks (like cherry-picking bug fixes and cutting
|
||||
releases) require [Approver](https://github.com/argoproj/argoproj/blob/master/community/membership.md#community-membership)
|
||||
membership. The Release Champion can delegate tasks when necessary and will be responsible for coordinating with the
|
||||
Approver.
|
||||
|
||||
### Feature Acceptance Criteria
|
||||
|
||||
To be eligible for inclusion in a minor release, a new feature must meet the following criteria before the release’s RC
|
||||
date.
|
||||
|
||||
If it is a large feature that involves significant design decisions, that feature must be described in a Proposal, and
|
||||
that Proposal must be reviewed and merged.
|
||||
|
||||
The feature PR must include:
|
||||
|
||||
* Tests (passing)
|
||||
* Documentation
|
||||
* If necessary, a note in the Upgrading docs for the planned minor release
|
||||
* The PR must be reviewed, approved, and merged by an Approver.
|
||||
|
||||
If these criteria are not met by the RC date, the feature will be ineligible for inclusion in the RC series or GA for
|
||||
that minor release. It will have to wait for the next minor release.
|
||||
|
||||
44
docs/faq.md
44
docs/faq.md
@@ -122,9 +122,9 @@ To terminate the sync, click on the "synchronisation" then "terminate":
|
||||
|
||||
 
|
||||
|
||||
## Why Is My App Out Of Sync Even After Syncing?
|
||||
## Why Is My App `Out Of Sync` Even After Syncing?
|
||||
|
||||
Is some cases, the tool you use may conflict with Argo CD by adding the `app.kubernetes.io/instance` label. E.g. using
|
||||
In some cases, the tool you use may conflict with Argo CD by adding the `app.kubernetes.io/instance` label. E.g. using
|
||||
Kustomize common labels feature.
|
||||
|
||||
Argo CD automatically sets the `app.kubernetes.io/instance` label and uses it to determine which resources form the app.
|
||||
@@ -142,7 +142,7 @@ The default polling interval is 3 minutes (180 seconds).
|
||||
You can change the setting by updating the `timeout.reconciliation` value in the [argocd-cm](https://github.com/argoproj/argo-cd/blob/2d6ce088acd4fb29271ffb6f6023dbb27594d59b/docs/operator-manual/argocd-cm.yaml#L279-L282) config map. If there are any Git changes, ArgoCD will only update applications with the [auto-sync setting](user-guide/auto_sync.md) enabled. If you set it to `0` then Argo CD will stop polling Git repositories automatically and you can only use alternative methods such as [webhooks](operator-manual/webhook.md) and/or manual syncs for deploying applications.
|
||||
|
||||
|
||||
## Why Are My Resource Limits Out Of Sync?
|
||||
## Why Are My Resource Limits `Out Of Sync`?
|
||||
|
||||
Kubernetes has normalized your resource limits when they are applied, and then Argo CD has then compared the version in
|
||||
your generated manifests to the normalized one is Kubernetes - they won't match.
|
||||
@@ -157,7 +157,7 @@ E.g.
|
||||
To fix this use diffing
|
||||
customizations [settings](./user-guide/diffing.md#known-kubernetes-types-in-crds-resource-limits-volume-mounts-etc).
|
||||
|
||||
## How Do I Fix "invalid cookie, longer than max length 4093"?
|
||||
## How Do I Fix `invalid cookie, longer than max length 4093`?
|
||||
|
||||
Argo CD uses a JWT as the auth token. You likely are part of many groups and have gone over the 4KB limit which is set
|
||||
for cookies. You can get the list of groups by opening "developer tools -> network"
|
||||
@@ -224,4 +224,38 @@ resource.customizations.health.bitnami.com_SealedSecret: |
|
||||
hs.status = "Healthy"
|
||||
hs.message = "Controller doesn't report resource status"
|
||||
return hs
|
||||
```
|
||||
```
|
||||
|
||||
## How do I fix `The order in patch list … doesn't match $setElementOrder list: …`?
|
||||
|
||||
An application may trigger a sync error labeled a `ComparisonError` with a message like:
|
||||
|
||||
> The order in patch list: [map[name:**KEY_BC** value:150] map[name:**KEY_BC** value:500] map[name:**KEY_BD** value:250] map[name:**KEY_BD** value:500] map[name:KEY_BI value:something]] doesn't match $setElementOrder list: [map[name:KEY_AA] map[name:KEY_AB] map[name:KEY_AC] map[name:KEY_AD] map[name:KEY_AE] map[name:KEY_AF] map[name:KEY_AG] map[name:KEY_AH] map[name:KEY_AI] map[name:KEY_AJ] map[name:KEY_AK] map[name:KEY_AL] map[name:KEY_AM] map[name:KEY_AN] map[name:KEY_AO] map[name:KEY_AP] map[name:KEY_AQ] map[name:KEY_AR] map[name:KEY_AS] map[name:KEY_AT] map[name:KEY_AU] map[name:KEY_AV] map[name:KEY_AW] map[name:KEY_AX] map[name:KEY_AY] map[name:KEY_AZ] map[name:KEY_BA] map[name:KEY_BB] map[name:**KEY_BC**] map[name:**KEY_BD**] map[name:KEY_BE] map[name:KEY_BF] map[name:KEY_BG] map[name:KEY_BH] map[name:KEY_BI] map[name:**KEY_BC**] map[name:**KEY_BD**]]
|
||||
|
||||
|
||||
There are two parts to the message:
|
||||
|
||||
1. `The order in patch list: [`
|
||||
|
||||
This identifies values for items, especially items that appear multiple times:
|
||||
|
||||
> map[name:**KEY_BC** value:150] map[name:**KEY_BC** value:500] map[name:**KEY_BD** value:250] map[name:**KEY_BD** value:500] map[name:KEY_BI value:something]
|
||||
|
||||
You'll want to identify the keys that are duplicated -- you can focus on the first part, as each duplicated key will appear, once for each of its value with its value in the first list. The second list is really just
|
||||
|
||||
`]`
|
||||
|
||||
2. `doesn't match $setElementOrder list: [`
|
||||
|
||||
This includes all of the keys. It's included for debugging purposes -- you don't need to pay much attention to it. It will give you a hint about the precise location in the list for the duplicated keys:
|
||||
|
||||
> map[name:KEY_AA] map[name:KEY_AB] map[name:KEY_AC] map[name:KEY_AD] map[name:KEY_AE] map[name:KEY_AF] map[name:KEY_AG] map[name:KEY_AH] map[name:KEY_AI] map[name:KEY_AJ] map[name:KEY_AK] map[name:KEY_AL] map[name:KEY_AM] map[name:KEY_AN] map[name:KEY_AO] map[name:KEY_AP] map[name:KEY_AQ] map[name:KEY_AR] map[name:KEY_AS] map[name:KEY_AT] map[name:KEY_AU] map[name:KEY_AV] map[name:KEY_AW] map[name:KEY_AX] map[name:KEY_AY] map[name:KEY_AZ] map[name:KEY_BA] map[name:KEY_BB] map[name:**KEY_BC**] map[name:**KEY_BD**] map[name:KEY_BE] map[name:KEY_BF] map[name:KEY_BG] map[name:KEY_BH] map[name:KEY_BI] map[name:**KEY_BC**] map[name:**KEY_BD**]
|
||||
|
||||
`]`
|
||||
|
||||
In this case, the duplicated keys have been **emphasized** to help you identify the problematic keys. Many editors have the ability to highlight all instances of a string, using such an editor can help with such problems.
|
||||
|
||||
The most common instance of this error is with `env:` fields for `containers`.
|
||||
|
||||
!!! note "Dynamic applications"
|
||||
It's possible that your application is being generated by a tool in which case the duplication might not be evident within the scope of a single file. If you have trouble debugging this problem, consider filing a ticket to the owner of the generator tool asking them to improve its validation and error reporting.
|
||||
|
||||
@@ -78,10 +78,10 @@ The API server can then be accessed using https://localhost:8080
|
||||
The initial password for the `admin` account is auto-generated and stored as
|
||||
clear text in the field `password` in a secret named `argocd-initial-admin-secret`
|
||||
in your Argo CD installation namespace. You can simply retrieve this password
|
||||
using `kubectl`:
|
||||
using the `argocd` CLI:
|
||||
|
||||
```bash
|
||||
kubectl -n argocd get secret argocd-initial-admin-secret -o jsonpath="{.data.password}" | base64 -d; echo
|
||||
argocd admin initial-password -n argocd
|
||||
```
|
||||
|
||||
!!! warning
|
||||
|
||||
220
docs/operator-manual/app-any-namespace.md
Normal file
220
docs/operator-manual/app-any-namespace.md
Normal file
@@ -0,0 +1,220 @@
|
||||
# Applications in any namespace
|
||||
|
||||
**Current feature state**: Beta
|
||||
|
||||
!!! warning
|
||||
Please read this documentation carefully before you enable this feature. Misconfiguration could lead to potential security issues.
|
||||
|
||||
## Introduction
|
||||
|
||||
As of version 2.5, Argo CD supports managing `Application` resources in namespaces other than the control plane's namespace (which is usually `argocd`), but this feature has to be explicitly enabled and configured appropriately.
|
||||
|
||||
Argo CD administrators can define a certain set of namespaces where `Application` resources may be created, updated and reconciled in. However, applications in these additional namespaces will only be allowed to use certain `AppProjects`, as configured by the Argo CD administrators. This allows ordinary Argo CD users (e.g. application teams) to use patterns like declarative management of `Application` resources, implementing app-of-apps and others without the risk of a privilege escalation through usage of other `AppProjects` that would exceed the permissions granted to the application teams.
|
||||
|
||||
Some manual steps will need to be performed by the Argo CD administrator in order to enable this feature.
|
||||
|
||||
!!! note
|
||||
This feature is considered beta as of now. Some of the implementation details may change over the course of time until it is promoted to a stable status. We will be happy if early adopters use this feature and provide us with bug reports and feedback.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
### Cluster-scoped Argo CD installation
|
||||
|
||||
This feature can only be enabled and used when your Argo CD is installed as a cluster-wide instance, so it has permissions to list and manipulate resources on a cluster scope. It will *not* work with an Argo CD installed in namespace-scoped mode.
|
||||
|
||||
### Switch resource tracking method
|
||||
|
||||
Also, while technically not necessary, it is strongly suggested that you switch the application tracking method from the default `label` setting to either `annotation` or `annotation+label`. The reasoning for this is, that application names will be a composite of the namespace's name and the name of the `Application`, and this can easily exceed the 63 characters length limit imposed on label values. Annotations have a notably greater length limit.
|
||||
|
||||
To enable annotation based resource tracking, refer to the documentation about [resource tracking methods](../../user-guide/resource_tracking/)
|
||||
|
||||
## Implementation details
|
||||
|
||||
### Overview
|
||||
|
||||
In order for an application to be managed and reconciled outside the Argo CD's control plane namespace, two prerequisites must match:
|
||||
|
||||
1. The `Application`'s namespace must be explicitly enabled using the `--application-namespaces` parameter for the `argocd-application-controller` and `argocd-server` workloads. This parameter controls the list of namespaces that Argo CD will be allowed to source `Application` resources from globally. Any namespace not configured here cannot be used from any `AppProject`.
|
||||
1. The `AppProject` referenced by the `.spec.project` field of the `Application` must have the namespace listed in its `.spec.sourceNamespaces` field. This setting will determine whether an `Application` may use a certain `AppProject`. If an `Application` specifies an `AppProject` that is not allowed, Argo CD refuses to process this `Application`. As stated above, any namespace configured in the `.spec.sourceNamespaces` field must also be enabled globally.
|
||||
|
||||
`Applications` in different namespaces can be created and managed just like any other `Application` in the `argocd` namespace previously, either declaratively or through the Argo CD API (e.g. using the CLI, the web UI, the REST API, etc).
|
||||
|
||||
### Reconfigure Argo CD to allow certain namespaces
|
||||
|
||||
#### Change workload startup parameters
|
||||
|
||||
In order to enable this feature, the Argo CD administrator must reconfigure the `argocd-server` and `argocd-application-controller` workloads to add the `--application-namespaces` parameter to the container's startup command.
|
||||
|
||||
The `--application-namespaces` parameter takes a comma-separated list of namespaces where `Applications` are to be allowed in. Each entry of the list supports shell-style wildcards such as `*`, so for example the entry `app-team-*` would match `app-team-one` and `app-team-two`. To enable all namespaces on the cluster where Argo CD is running on, you can just specify `*`, i.e. `--application-namespaces=*`.
|
||||
|
||||
The startup parameters for both, the `argocd-server` and the `argocd-application-controller` can also be conveniently set up and kept in sync by specifying the `application.namespaces` settings in the `argocd-cmd-params-cm` ConfigMap _instead_ of changing the manifests for the respective workloads. For example:
|
||||
|
||||
```yaml
|
||||
data:
|
||||
application.namespaces: app-team-one, app-team-two
|
||||
```
|
||||
|
||||
would allow the `app-team-one` and `app-team-two` namespaces for managing `Application` resources. After a change to the `argocd-cmd-params-cm` namespace, the appropriate workloads need to be restarted:
|
||||
|
||||
```bash
|
||||
kubectl rollout restart -n argocd deployment argocd-server
|
||||
kubectl rollout restart -n argocd statefulset argocd-application-controller
|
||||
```
|
||||
|
||||
#### Adapt Kubernetes RBAC
|
||||
|
||||
We decided to not extend the Kubernetes RBAC for the `argocd-server` workload by default for the time being. If you want `Applications` in other namespaces to be managed by the Argo CD API (i.e. the CLI and UI), you need to extend the Kubernetes permissions for the `argocd-server` ServiceAccount.
|
||||
|
||||
We supply a `ClusterRole` and `ClusterRoleBinding` suitable for this purpose in the `examples/k8s-rbac/argocd-server-applications` directory. For a default Argo CD installation (i.e. installed to the `argocd` namespace), you can just apply them as-is:
|
||||
|
||||
```shell
|
||||
kubectl apply -f examples/k8s-rbac/argocd-server-applications/
|
||||
```
|
||||
|
||||
!!! note
|
||||
At some later point in time, we may make this cluster role part of the default installation manifests.
|
||||
|
||||
### Allowing additional namespaces in an AppProject
|
||||
|
||||
Any user with Kubernetes access to the Argo CD control plane's namespace (`argocd`), especially those with permissions to create or update `Applications` in a declarative way, is to be considered an Argo CD admin.
|
||||
|
||||
This prevented unprivileged Argo CD users from declaratively creating or managing `Applications` in the past. Those users were constrained to using the API instead, subject to Argo CD RBAC which ensures only `Applications` in allowed `AppProjects` were created.
|
||||
|
||||
For an `Application` to be created outside the `argocd` namespace, the `AppProject` referred to in the `Application`'s `.spec.project` field must include the `Application`'s namespace in its `.spec.sourceNamespaces` field.
|
||||
|
||||
For example, consider the two following (incomplete) `AppProject` specs:
|
||||
|
||||
```yaml
|
||||
kind: AppProject
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
metadata:
|
||||
name: project-one
|
||||
namespace: argocd
|
||||
spec:
|
||||
sourceNamespaces:
|
||||
- namespace-one
|
||||
```
|
||||
|
||||
and
|
||||
|
||||
```yaml
|
||||
kind: AppProject
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
metadata:
|
||||
name: project-two
|
||||
namespace: argocd
|
||||
spec:
|
||||
sourceNamespaces:
|
||||
- namespace-two
|
||||
```
|
||||
|
||||
In order for an Application to set `.spec.project` to `project-one`, it would have to be created in either namespace `namespace-one` or `argocd`. Likewise, in order for an Application to set `.spec.project` to `project-two`, it would have to be created in either namespace `namespace-two` or `argocd`.
|
||||
|
||||
If an Application in `namespace-two` would set their `.spec.project` to `project-one` or an Application in `namespace-one` would set their `.spec.project` to `project-two`, Argo CD would consider this as a permission violation and refuse to reconcile the Application.
|
||||
|
||||
Also, the Argo CD API will enforce these constraints, regardless of the Argo CD RBAC permissions.
|
||||
|
||||
The `.spec.sourceNamespaces` field of the `AppProject` is a list that can contain an arbitrary amount of namespaces, and each entry supports shell-style wildcard, so that you can allow namespaces with patterns like `team-one-*`.
|
||||
|
||||
!!! warning
|
||||
Do not add user controlled namespaces in the `.spec.sourceNamespaces` field of any privileged AppProject like the `default` project. Always make sure that the AppProject follows the principle of granting least required privileges. Never grant access to the `argocd` namespace within the AppProject.
|
||||
|
||||
!!! note
|
||||
For backwards compatibility, Applications in the Argo CD control plane's namespace (`argocd`) are allowed to set their `.spec.project` field to reference any AppProject, regardless of the restrictions placed by the AppProject's `.spec.sourceNamespaces` field.
|
||||
|
||||
### Application names
|
||||
|
||||
For the CLI and UI, applications are now referred to and displayed as in the format `<namespace>/<name>`.
|
||||
|
||||
For backwards compatibility, if the namespace of the Application is the control plane's namespace (i.e. `argocd`), the `<namespace>` can be omitted from the application name when referring to it. For example, the application names `argocd/someapp` and `someapp` are semantically the same and refer to the same application in the CLI and the UI.
|
||||
|
||||
### Application RBAC
|
||||
|
||||
The RBAC syntax for Application objects has been changed from `<project>/<application>` to `<project>/<namespace>/<application>` to accomodate the need to restrict access based on the source namespace of the Application to be managed.
|
||||
|
||||
For backwards compatibility, Applications in the `argocd` namespace can still be refered to as `<project>/<application>` in the RBAC policy rules.
|
||||
|
||||
Wildcards do not make any distinction between project and application namespaces yet. For example, the following RBAC rule would match any application belonging to project `foo`, regardless of the namespace it is created in:
|
||||
|
||||
```
|
||||
p, somerole, applications, get, foo/*, allow
|
||||
```
|
||||
|
||||
If you want to restrict access to be granted only to `Applications` in project `foo` within namespace `bar`, the rule would need to be adapted as follows:
|
||||
|
||||
```
|
||||
p, somerole, applications, get, foo/bar/*, allow
|
||||
```
|
||||
|
||||
## Managing applications in other namespaces
|
||||
|
||||
### Declaratively
|
||||
|
||||
For declarative management of Applications, just create the Application from a YAML or JSON manifest in the desired namespace. Make sure that the `.spec.project` field refers to an AppProject that allows this namespace. For example, the following (incomplete) Application manifest creates an Application in the namespace `some-namespace`:
|
||||
|
||||
```yaml
|
||||
kind: Application
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
metadata:
|
||||
name: some-app
|
||||
namespace: some-namespace
|
||||
spec:
|
||||
project: some-project
|
||||
# ...
|
||||
```
|
||||
|
||||
The project `some-project` will then need to specify `some-namespace` in the list of allowed source namespaces, e.g.
|
||||
|
||||
```yaml
|
||||
kind: AppProject
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
metadata:
|
||||
name: some-project
|
||||
namespace: argocd
|
||||
spec:
|
||||
sourceNamespaces:
|
||||
- some-namespace
|
||||
```
|
||||
|
||||
### Using the CLI
|
||||
|
||||
You can use all existing Argo CD CLI commands for managing applications in other namespaces, exactly as you would use the CLI to manage applications in the control plane's namespace.
|
||||
|
||||
For example, to retrieve the `Application` named `foo` in the namespace `bar`, you can use the following CLI command:
|
||||
|
||||
```shell
|
||||
argocd app get foo/bar
|
||||
```
|
||||
|
||||
Likewise, to manage this application, keep referring to it as `foo/bar`:
|
||||
|
||||
```bash
|
||||
# Create an application
|
||||
argocd app create foo/bar ...
|
||||
# Sync the application
|
||||
argocd app sync foo/bar
|
||||
# Delete the application
|
||||
argocd app delete foo/bar
|
||||
# Retrieve application's manifest
|
||||
argocd app manifests foo/bar
|
||||
```
|
||||
|
||||
As stated previously, for applications in the Argo CD's control plane namespace, you can omit the namespace from the application name.
|
||||
|
||||
### Using the UI
|
||||
|
||||
Similar to the CLI, you can refer to the application in the UI as `foo/bar`.
|
||||
|
||||
For example, to create an application named `bar` in the namespace `foo` in the web UI, set the application name in the creation dialogue's _Application Name_ field to `foo/bar`. If the namespace is omitted, the control plane's namespace will be used.
|
||||
|
||||
### Using the REST API
|
||||
|
||||
If you are using the REST API, the namespace for `Application` cannot be specified as the application name, and resources need to be specified using the optional `appNamespace` query parameter. For example, to work with the `Application` resource named `foo` in the namespace `bar`, the request would look like follows:
|
||||
|
||||
```bash
|
||||
GET /api/v1/applications/foo?appNamespace=bar
|
||||
```
|
||||
|
||||
For other operations such as `POST` and `PUT`, the `appNamespace` parameter must be part of the request's payload.
|
||||
|
||||
For `Application` resources in the control plane namespace, this parameter can be omitted.
|
||||
@@ -45,6 +45,9 @@ spec:
|
||||
valueFiles:
|
||||
- values-prod.yaml
|
||||
|
||||
# Ignore locally missing valueFiles when installing Helm chart. Defaults to false
|
||||
ignoreMissingValueFiles: false
|
||||
|
||||
# Values file as block file
|
||||
values: |
|
||||
ingress:
|
||||
@@ -61,6 +64,9 @@ spec:
|
||||
hosts:
|
||||
- mydomain.example.com
|
||||
|
||||
# Skip custom resource definition installation if chart contains custom resource definitions. Defaults to false
|
||||
skipCrds: false
|
||||
|
||||
# Optional Helm version to template with. If omitted it will fall back to look at the 'apiVersion' in Chart.yaml
|
||||
# and decide which Helm binary to use automatically. This field can be either 'v2' or 'v3'.
|
||||
version: v2
|
||||
@@ -108,18 +114,36 @@ spec:
|
||||
|
||||
# plugin specific config
|
||||
plugin:
|
||||
# Only set the plugin name if the plugin is defined in argocd-cm.
|
||||
# If the plugin is defined as a sidecar, omit the name. The plugin will be automatically matched with the
|
||||
# If the plugin is defined as a sidecar and name is not passed, the plugin will be automatically matched with the
|
||||
# Application according to the plugin's discovery rules.
|
||||
name: mypluginname
|
||||
# environment variables passed to the plugin
|
||||
env:
|
||||
- name: FOO
|
||||
value: bar
|
||||
# Plugin parameters are new in v2.5.
|
||||
parameters:
|
||||
- name: string-param
|
||||
string: example-string
|
||||
- name: array-param
|
||||
array: [item1, item2]
|
||||
- name: map-param
|
||||
map:
|
||||
param-name: param-value
|
||||
|
||||
# Sources field specifies the list of sources for the application
|
||||
sources:
|
||||
- repoURL: https://github.com/argoproj/argocd-example-apps.git # Can point to either a Helm chart repo or a git repo.
|
||||
targetRevision: HEAD # For Helm, this refers to the chart version.
|
||||
path: guestbook # This has no meaning for Helm charts pulled directly from a Helm repo instead of git.
|
||||
ref: my-repo # For Helm, acts as a reference to this source for fetching values files from this source. Has no meaning when under `source` field
|
||||
|
||||
# Destination cluster and namespace to deploy the application
|
||||
destination:
|
||||
# cluster API URL
|
||||
server: https://kubernetes.default.svc
|
||||
# or cluster name
|
||||
# name: in-cluster
|
||||
# The namespace will only be set for namespace-scoped resources that have not set a value for .metadata.namespace
|
||||
namespace: guestbook
|
||||
|
||||
@@ -134,6 +158,15 @@ spec:
|
||||
- CreateNamespace=true # Namespace Auto-Creation ensures that namespace specified as the application destination exists in the destination cluster.
|
||||
- PrunePropagationPolicy=foreground # Supported policies are background, foreground and orphan.
|
||||
- PruneLast=true # Allow the ability for resource pruning to happen as a final, implicit wave of a sync operation
|
||||
managedNamespaceMetadata: # Sets the metadata for the application namespace. Only valid if CreateNamespace=true (see above), otherwise it's a no-op.
|
||||
labels: # The labels to set on the application namespace
|
||||
any: label
|
||||
you: like
|
||||
annotations: # The annotations to set on the application namespace
|
||||
the: same
|
||||
applies: for
|
||||
annotations: on-the-namespace
|
||||
|
||||
# The retry feature is available since v1.7
|
||||
retry:
|
||||
limit: 5 # number of failed sync attempt retries; unlimited number of attempts if less than 0
|
||||
|
||||
@@ -18,7 +18,7 @@ See 'How to modify ApplicationSet container parameters' below for detailed steps
|
||||
|
||||
The ApplicationSet controller supports a parameter `--policy`, which is specified on launch (within the controller Deployment container), and which restricts what types of modifications will be made to managed Argo CD `Application` resources.
|
||||
|
||||
The `--policy` parameter takes three values: `sync`, `create-only`, and `create-update`. (`sync` is the default, which is used if the `--policy` parameter is not specified; the other policies are described below).
|
||||
The `--policy` parameter takes one of the following valid values: `sync`, `create-only`, `create-update`, and `create-delete`. (`sync` is the default, which is used if the `--policy` parameter is not specified; the other policies are described below).
|
||||
|
||||
To allow the ApplicationSet controller to *create* `Application` resources, but prevent any further modification, such as deletion, or modification of Application fields, add this parameter in the ApplicationSet controller:
|
||||
```
|
||||
@@ -34,6 +34,13 @@ To allow the ApplicationSet controller to create or modify `Application` resourc
|
||||
|
||||
This may be useful to users looking for additional protection against deletion of the Applications generated by the controller.
|
||||
|
||||
### Policy - `create-delete`: Prevent ApplicationSet controller from updating Applications
|
||||
|
||||
To allow the ApplicationSet controller to create or delete `Application` resources, but prevent Applications from being updated, add the following parameter to the ApplicationSet controller `Deployment`:
|
||||
```
|
||||
--policy create-delete
|
||||
```
|
||||
|
||||
### Prevent an `Application`'s child resources from being deleted, when the parent Application is deleted
|
||||
|
||||
By default, when an `Application` resource is deleted by the ApplicationSet controller, all of the child resources of the Application will be deleted as well (such as, all of the Application's `Deployments`, `Services`, etc).
|
||||
|
||||
@@ -70,6 +70,8 @@ The generator parameters are:
|
||||
|
||||
**Note**: The right-most path name always becomes `{{path.basename}}`. For example, for `- path: /one/two/three/four`, `{{path.basename}}` is `four`.
|
||||
|
||||
**Note**: If the `pathParamPrefix` option is specified, all `path`-related parameter names above will be prefixed with the specified value and a dot separator. E.g., if `pathParamPrefix` is `myRepo`, then the generated parameter name would be `myRepo.path` instead of `path`. Using this option is necessary in a Matrix generator where both child generators are Git generators (to avoid conflicts when merging the child generators’ items).
|
||||
|
||||
Whenever a new Helm chart/Kustomize YAML/Application/plain subdirectory is added to the Git repository, the ApplicationSet controller will detect this change and automatically deploy the resulting manifests within new `Application` resources.
|
||||
|
||||
As with other generators, clusters *must* already be defined within Argo CD, in order to generate Applications for them.
|
||||
@@ -108,7 +110,7 @@ spec:
|
||||
server: https://kubernetes.default.svc
|
||||
namespace: '{{path.basename}}'
|
||||
```
|
||||
(*The full example can be found [here](https://github.com/argoproj/argo-cd/tree/master/examples/applicationset/git-generator-directory/excludes).*)
|
||||
(*The full example can be found [here](https://github.com/argoproj/argo-cd/tree/master/applicationset/examples/git-generator-directory/excludes).*)
|
||||
|
||||
This example excludes the `exclude-helm-guestbook` directory from the list of directories scanned for this `ApplicationSet` resource.
|
||||
|
||||
@@ -284,6 +286,7 @@ In addition to the flattened key/value pairs from the configuration file, the fo
|
||||
**Note**: The right-most *directory* name always becomes `{{path.basename}}`. For example, from `- path: /one/two/three/four/config.json`, `{{path.basename}}` will be `four`.
|
||||
The filename can always be accessed using `{{path.filename}}`.
|
||||
|
||||
**Note**: If the `pathParamPrefix` option is specified, all `path`-related parameter names above will be prefixed with the specified value and a dot separator. E.g., if `pathParamPrefix` is `myRepo`, then the generated parameter name would be `myRepo.path` instead of `path`. Using this option is necessary in a Matrix generator where both child generators are Git generators (to avoid conflicts when merging the child generators’ items).
|
||||
|
||||
## Webhook Configuration
|
||||
|
||||
|
||||
@@ -11,6 +11,8 @@ By combining both generators parameters, to produce every possible combination,
|
||||
|
||||
Any set of generators may be used, with the combined values of those generators inserted into the `template` parameters, as usual.
|
||||
|
||||
**Note**: If both child generators are Git generators, one or both of them must use the `pathParamPrefix` option to avoid conflicts when merging the child generators’ items.
|
||||
|
||||
## Example: Git Directory generator + Cluster generator
|
||||
|
||||
As an example, imagine that we have two clusters:
|
||||
@@ -166,6 +168,102 @@ In the 2nd child generator, the label selector with label `kubernetes.io/environ
|
||||
So in the above example, clusters with the label `kubernetes.io/environment: prod` will have only prod-specific configuration (ie. `prod/config.json`) applied to it, wheres clusters
|
||||
with the label `kubernetes.io/environment: dev` will have only dev-specific configuration (ie. `dev/config.json`)
|
||||
|
||||
## Example: Two Git Generators Using `pathParamPrefix`
|
||||
|
||||
The matrix generator will fail if its children produce results containing identical keys with differing values.
|
||||
This poses a problem for matrix generators where both children are Git generators since they auto-populate `path`-related parameters in their outputs.
|
||||
To avoid this problem, specify a `pathParamPrefix` on one or both of the child generators to avoid conflicting parameter keys in the output.
|
||||
|
||||
```yaml
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: two-gits-with-path-param-prefix
|
||||
spec:
|
||||
generators:
|
||||
- matrix:
|
||||
generators:
|
||||
# git file generator referencing files containing details about each
|
||||
# app to be deployed (e.g., `appName`).
|
||||
- git:
|
||||
repoURL: https://github.com/some-org/some-repo.git
|
||||
revision: HEAD
|
||||
files:
|
||||
- path: "apps/*.json"
|
||||
pathParamPrefix: app
|
||||
# git file generator referencing files containing details about
|
||||
# locations to which each app should deploy (e.g., `region` and
|
||||
# `clusterName`).
|
||||
- git:
|
||||
repoURL: https://github.com/some-org/some-repo.git
|
||||
revision: HEAD
|
||||
files:
|
||||
- path: "targets/{{appName}}/*.json"
|
||||
pathParamPrefix: target
|
||||
template: {} # ...
|
||||
```
|
||||
|
||||
Then, given the following file structure/content:
|
||||
|
||||
```
|
||||
├── apps
|
||||
│ ├── app-one.json
|
||||
│ │ { "appName": "app-one" }
|
||||
│ └── app-two.json
|
||||
│ { "appName": "app-two" }
|
||||
└── targets
|
||||
├── app-one
|
||||
│ ├── east-cluster-one.json
|
||||
│ │ { "region": "east", "clusterName": "cluster-one" }
|
||||
│ └── east-cluster-two.json
|
||||
│ { "region": "east", "clusterName": "cluster-two" }
|
||||
└── app-two
|
||||
├── east-cluster-one.json
|
||||
│ { "region": "east", "clusterName": "cluster-one" }
|
||||
└── west-cluster-three.json
|
||||
{ "region": "west", "clusterName": "cluster-three" }
|
||||
```
|
||||
|
||||
…the matrix generator above would yield the following results:
|
||||
|
||||
```yaml
|
||||
- appName: app-one
|
||||
app.path: /apps
|
||||
app.path.filename: app-one.json
|
||||
# plus additional path-related parameters from the first child generator, all
|
||||
# prefixed with "app".
|
||||
region: east
|
||||
clusterName: cluster-one
|
||||
target.path: /targets/app-one
|
||||
target.path.filename: east-cluster-one.json
|
||||
# plus additional path-related parameters from the second child generator, all
|
||||
# prefixed with "target".
|
||||
|
||||
- appName: app-one
|
||||
app.path: /apps
|
||||
app.path.filename: app-one.json
|
||||
region: east
|
||||
clusterName: cluster-two
|
||||
target.path: /targets/app-one
|
||||
target.path.filename: east-cluster-two.json
|
||||
|
||||
- appName: app-two
|
||||
app.path: /apps
|
||||
app.path.filename: app-two.json
|
||||
region: east
|
||||
clusterName: cluster-one
|
||||
target.path: /targets/app-two
|
||||
target.path.filename: east-cluster-one.json
|
||||
|
||||
- appName: app-two
|
||||
app.path: /apps
|
||||
app.path.filename: app-two.json
|
||||
region: west
|
||||
clusterName: cluster-three
|
||||
target.path: /targets/app-two
|
||||
target.path.filename: west-cluster-three.json
|
||||
```
|
||||
|
||||
## Restrictions
|
||||
|
||||
1. The Matrix generator currently only supports combining the outputs of only two child generators (eg does not support generating combinations for 3 or more).
|
||||
|
||||
@@ -0,0 +1,43 @@
|
||||
# Post Selector all generators
|
||||
|
||||
The Selector allows to post-filter based on generated values using the kubernetes common labelSelector format. In the example, the list generator generates a set of two application which then filter by the key value to only select the `env` with value `staging`:
|
||||
|
||||
## Example: List generator + Post Selector
|
||||
```yaml
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: guestbook
|
||||
spec:
|
||||
generators:
|
||||
- list:
|
||||
elements:
|
||||
- cluster: engineering-dev
|
||||
url: https://kubernetes.default.svc
|
||||
env: staging
|
||||
- cluster: engineering-prod
|
||||
url: https://kubernetes.default.svc
|
||||
env: prod
|
||||
selector:
|
||||
matchLabels:
|
||||
env: staging
|
||||
template:
|
||||
metadata:
|
||||
name: '{{cluster}}-guestbook'
|
||||
spec:
|
||||
project: default
|
||||
source:
|
||||
repoURL: https://github.com/argoproj-labs/applicationset.git
|
||||
targetRevision: HEAD
|
||||
path: examples/list-generator/guestbook/{{cluster}}
|
||||
destination:
|
||||
server: '{{url}}'
|
||||
namespace: guestbook
|
||||
```
|
||||
|
||||
The List generator + Post Selector generates a single set of parameters:
|
||||
```yaml
|
||||
- cluster: engineering-dev
|
||||
url: https://kubernetes.default.svc
|
||||
env: staging
|
||||
```
|
||||
@@ -60,7 +60,7 @@ spec:
|
||||
* `repo`: Required name of the GitHub repository.
|
||||
* `api`: If using GitHub Enterprise, the URL to access it. (Optional)
|
||||
* `tokenRef`: A `Secret` name and key containing the GitHub access token to use for requests. If not specified, will make anonymous requests which have a lower rate limit and can only see public repositories. (Optional)
|
||||
* `labels`: Labels is used to filter the PRs that you want to target. (Optional)
|
||||
* `labels`: Filter the PRs to those containing **all** of the labels listed. (Optional)
|
||||
* `appSecretName`: A `Secret` name containing a GitHub App secret in [repo-creds format][repo-creds].
|
||||
|
||||
[repo-creds]: ../declarative-setup.md#repository-credentials
|
||||
@@ -274,6 +274,7 @@ spec:
|
||||
* `branch_slug`: The branch name will be cleaned to be conform to the DNS label standard as defined in [RFC 1123](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-label-names), and truncated to 50 characters to give room to append/suffix-ing it with 13 more characters.
|
||||
* `head_sha`: This is the SHA of the head of the pull request.
|
||||
* `head_short_sha`: This is the short SHA of the head of the pull request (8 characters long or the length of the head SHA if it's shorter).
|
||||
* `labels`: The array of pull request labels. (Supported only for Go Template ApplicationSet manifests.)
|
||||
|
||||
## Webhook Configuration
|
||||
|
||||
@@ -318,3 +319,7 @@ The Pull Request Generator will requeue when the next action occurs.
|
||||
- `merge`
|
||||
|
||||
For more information about each event, please refer to the [official documentation](https://docs.gitlab.com/ee/user/project/integrations/webhook_events.html#merge-request-events).
|
||||
|
||||
## Lifecycle
|
||||
|
||||
An Application will be generated when a Pull Request is discovered when the configured criteria is met - i.e. for GitHub when a Pull Request matches the specified `labels` and/or `pullRequestState`. Application will be removed when a Pull Request no longer meets the specified criteria.
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user