mirror of
https://github.com/argoproj/argo-cd.git
synced 2026-02-20 17:48:47 +01:00
Compare commits
334 Commits
ibb-docs
...
security-s
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c7a0dff47e | ||
|
|
22c83d3104 | ||
|
|
39c00f97ca | ||
|
|
212029d8c2 | ||
|
|
4a06cb2c74 | ||
|
|
2494657829 | ||
|
|
031ecaf754 | ||
|
|
e420666d76 | ||
|
|
02eb67a43e | ||
|
|
52d223d9ef | ||
|
|
b9f9b78b8b | ||
|
|
f847670157 | ||
|
|
9b86af18bc | ||
|
|
da46381907 | ||
|
|
b17007b7cf | ||
|
|
7dcfe1fc38 | ||
|
|
97d75a61fd | ||
|
|
048902a32e | ||
|
|
3524128d7f | ||
|
|
b600da1318 | ||
|
|
e510a77a1d | ||
|
|
06b98c5a25 | ||
|
|
4242cfc126 | ||
|
|
977b730569 | ||
|
|
1373f4fd3f | ||
|
|
9b7498ce84 | ||
|
|
c043f4ad0a | ||
|
|
1000ac15ab | ||
|
|
88b6cfcea7 | ||
|
|
3cb111545f | ||
|
|
1e6a47cd39 | ||
|
|
f9571760a3 | ||
|
|
9bd10330ff | ||
|
|
b1d0009fd9 | ||
|
|
9b994c4af0 | ||
|
|
a77d149668 | ||
|
|
062e245192 | ||
|
|
ac96269bda | ||
|
|
a4c0f3cf1f | ||
|
|
193cf76e12 | ||
|
|
afb88308fa | ||
|
|
8b4516519f | ||
|
|
adb4471569 | ||
|
|
59b00bb0c0 | ||
|
|
eaac2c636f | ||
|
|
a825aad384 | ||
|
|
4610bc831c | ||
|
|
61f6530e8a | ||
|
|
b2a6387ca0 | ||
|
|
f7011f646b | ||
|
|
3da1eaa261 | ||
|
|
b38bc0040b | ||
|
|
95da518e7d | ||
|
|
adc6f564f7 | ||
|
|
996262021b | ||
|
|
6069ded532 | ||
|
|
221a03973e | ||
|
|
c20301f27c | ||
|
|
5a6f969b83 | ||
|
|
bd8777a8f8 | ||
|
|
4a50114126 | ||
|
|
9414fb303d | ||
|
|
61f8876b99 | ||
|
|
584428edaf | ||
|
|
49412a1ea9 | ||
|
|
8895b4a83a | ||
|
|
806ab33d7c | ||
|
|
70f9de4403 | ||
|
|
c9e4bcd351 | ||
|
|
1808539652 | ||
|
|
96b0eb0f53 | ||
|
|
15b284b142 | ||
|
|
891707721f | ||
|
|
c3784349d3 | ||
|
|
2358669114 | ||
|
|
c31d15e8d6 | ||
|
|
2ddb7772d1 | ||
|
|
1ab40261c0 | ||
|
|
63e410a9fc | ||
|
|
e82a6447e2 | ||
|
|
3b05351675 | ||
|
|
cc2a27ef14 | ||
|
|
8d262f2585 | ||
|
|
88936be9ce | ||
|
|
1ccaefef1d | ||
|
|
913ac509c2 | ||
|
|
57d01a472c | ||
|
|
3c52ce1873 | ||
|
|
1c6d2806d1 | ||
|
|
720858fbbc | ||
|
|
cd3fe2d1b1 | ||
|
|
e32388066d | ||
|
|
ffff9112da | ||
|
|
efd3802856 | ||
|
|
9923159304 | ||
|
|
2f8da71a1b | ||
|
|
7f2dd9e8fc | ||
|
|
fddb06fc54 | ||
|
|
abf6d73b0c | ||
|
|
983b9ca584 | ||
|
|
1cc154f151 | ||
|
|
f6d5c31e98 | ||
|
|
842b55b6ba | ||
|
|
69b36514e3 | ||
|
|
79bcaa6001 | ||
|
|
f6abf72b6c | ||
|
|
052a5d2f69 | ||
|
|
c683ab916b | ||
|
|
8ee4387268 | ||
|
|
a454093924 | ||
|
|
35fdd38d10 | ||
|
|
55da026fd5 | ||
|
|
8f0ef8d326 | ||
|
|
2c1698022b | ||
|
|
566d50f633 | ||
|
|
50b6577345 | ||
|
|
43c928e2e5 | ||
|
|
825a0f3aba | ||
|
|
ec95d12d9f | ||
|
|
05ab30ebe1 | ||
|
|
82a1e554d5 | ||
|
|
ed4e80c95d | ||
|
|
bb33e8ca76 | ||
|
|
a50a67e2ad | ||
|
|
f8bc471801 | ||
|
|
58ce42b824 | ||
|
|
66d2f1e962 | ||
|
|
3d79487e85 | ||
|
|
0c5d4d8500 | ||
|
|
f65e0b52f5 | ||
|
|
d7baab244f | ||
|
|
231fab4e58 | ||
|
|
b2bdbedc6a | ||
|
|
72c3d0fa58 | ||
|
|
68d1a6f3d6 | ||
|
|
9e392fa432 | ||
|
|
852f62c484 | ||
|
|
8df3c19301 | ||
|
|
1d885b7eea | ||
|
|
1ad91259bd | ||
|
|
cb467bc231 | ||
|
|
234e44aaad | ||
|
|
5bffa7b1ca | ||
|
|
0dad6c9321 | ||
|
|
cf7eb6aecb | ||
|
|
7a7636d61d | ||
|
|
0864a0212f | ||
|
|
c4f6ed857d | ||
|
|
2f16fcad6e | ||
|
|
2b1f00dd4e | ||
|
|
ec4e2f26d5 | ||
|
|
c6fa942e94 | ||
|
|
66a1e40e39 | ||
|
|
a2d756e4ac | ||
|
|
46975dac36 | ||
|
|
a89e489316 | ||
|
|
396f5a2186 | ||
|
|
daf547407e | ||
|
|
9fe4ad3253 | ||
|
|
ed0273039d | ||
|
|
9decefe1d6 | ||
|
|
1730e4bf4f | ||
|
|
4f7a7f6a1b | ||
|
|
358088ae1d | ||
|
|
3dcab562e2 | ||
|
|
22e62fe173 | ||
|
|
d29c07beaf | ||
|
|
ac009c4860 | ||
|
|
586c00d689 | ||
|
|
f4c05ae94c | ||
|
|
c4c6bfad16 | ||
|
|
4431ace96a | ||
|
|
99b222ce39 | ||
|
|
95c95cf9e6 | ||
|
|
627b91ef0e | ||
|
|
ea7a264508 | ||
|
|
be50b1d9af | ||
|
|
9b6a34260d | ||
|
|
cac7ad9917 | ||
|
|
2fef0dee73 | ||
|
|
85ea242ea7 | ||
|
|
9832ce8fa7 | ||
|
|
550b6732e5 | ||
|
|
11fde44428 | ||
|
|
eb576a5f3e | ||
|
|
b4c12815f0 | ||
|
|
42911db31a | ||
|
|
f0359fbd78 | ||
|
|
fba9154ab2 | ||
|
|
9f2523b46d | ||
|
|
f80d3bee84 | ||
|
|
9dab4f659d | ||
|
|
07c0ee55ba | ||
|
|
a3a3e8d65d | ||
|
|
4462debe28 | ||
|
|
9ceef4a1a1 | ||
|
|
74798a68e3 | ||
|
|
6c5da3f60b | ||
|
|
661afe0ad9 | ||
|
|
30e37b7bb4 | ||
|
|
13dd04fa71 | ||
|
|
6ecd70ae74 | ||
|
|
c9f54652ba | ||
|
|
5a8f64b428 | ||
|
|
434f35eec5 | ||
|
|
2e9f57316a | ||
|
|
507bd9d2d0 | ||
|
|
4018fd8924 | ||
|
|
d2699888d1 | ||
|
|
cf7bf14541 | ||
|
|
5bb937cf3a | ||
|
|
f25f0b2cee | ||
|
|
89792b4cc1 | ||
|
|
cbb430337f | ||
|
|
44a4ec16b5 | ||
|
|
ac71ec8dfb | ||
|
|
8e57f5383e | ||
|
|
42efcb36bf | ||
|
|
62c85d6f39 | ||
|
|
9839564a67 | ||
|
|
2b4c5659ea | ||
|
|
f6501accee | ||
|
|
2b3ac697d7 | ||
|
|
c9af1f8eec | ||
|
|
ac7be1f4b4 | ||
|
|
48da592248 | ||
|
|
7a1e8273f3 | ||
|
|
cfad9edd4a | ||
|
|
36c3d72a87 | ||
|
|
16d50c2267 | ||
|
|
ac6949b0de | ||
|
|
3ac4483052 | ||
|
|
470ac1343f | ||
|
|
9a90bf8b1e | ||
|
|
addc758963 | ||
|
|
2a9cc279ab | ||
|
|
2bb3820a62 | ||
|
|
489b670a96 | ||
|
|
f9f27cc644 | ||
|
|
67ae39e99d | ||
|
|
981a1f1c9f | ||
|
|
95b38f1db4 | ||
|
|
362abff610 | ||
|
|
012ffd584a | ||
|
|
50ed4bcc02 | ||
|
|
a14aa81169 | ||
|
|
18435c998e | ||
|
|
b8685bb243 | ||
|
|
c30b7775ce | ||
|
|
f6f183d1d9 | ||
|
|
47c17932ac | ||
|
|
7c880c6006 | ||
|
|
b9712fc908 | ||
|
|
83463c67be | ||
|
|
3db9bff3da | ||
|
|
752bc5f80d | ||
|
|
ba8931b67d | ||
|
|
9b6992af61 | ||
|
|
a0d7b5c2ba | ||
|
|
e9598a3677 | ||
|
|
e824f5e210 | ||
|
|
ab51bc3cde | ||
|
|
1eaaa56a45 | ||
|
|
6509d0b92f | ||
|
|
a9da5d83d6 | ||
|
|
15b0785ba4 | ||
|
|
437cd4f1b5 | ||
|
|
3f5c2eb60c | ||
|
|
13026e28ac | ||
|
|
967999df79 | ||
|
|
a259654e32 | ||
|
|
bd6bb09320 | ||
|
|
eaf6e9e0a0 | ||
|
|
9960f54166 | ||
|
|
dc959e900f | ||
|
|
ea3215bc0a | ||
|
|
7d24fe7e08 | ||
|
|
17e9ef409a | ||
|
|
f73e7014ce | ||
|
|
097ed6f108 | ||
|
|
812664cbb1 | ||
|
|
299af2172f | ||
|
|
c9dfd41308 | ||
|
|
aa02e79401 | ||
|
|
3b36f34d8f | ||
|
|
0ea88b7d72 | ||
|
|
0c242fe535 | ||
|
|
a72b262b5e | ||
|
|
2173b87b99 | ||
|
|
8f00d347b5 | ||
|
|
c1b0c7ba4c | ||
|
|
e1c21480e4 | ||
|
|
9bd0ed0d56 | ||
|
|
d2da477791 | ||
|
|
0240958c15 | ||
|
|
4328bfc1ff | ||
|
|
817161a33b | ||
|
|
87a0d02b3a | ||
|
|
16c6e36ee4 | ||
|
|
8375840344 | ||
|
|
41b1ebef83 | ||
|
|
4cf807e67a | ||
|
|
6344d9623e | ||
|
|
1a8dd249c0 | ||
|
|
b0dab38c7d | ||
|
|
1f0d4cfccc | ||
|
|
1ae5aff2f8 | ||
|
|
fe151a1c0e | ||
|
|
149926dec4 | ||
|
|
d5a961c7f6 | ||
|
|
428bf48734 | ||
|
|
4d6d20427e | ||
|
|
9bada88f0f | ||
|
|
234fc9d1cd | ||
|
|
79f4b5ceb6 | ||
|
|
1ae0317423 | ||
|
|
cfb24701d2 | ||
|
|
e21a82fcc1 | ||
|
|
ac69e27fd2 | ||
|
|
46ae472cee | ||
|
|
08124045dc | ||
|
|
777302191f | ||
|
|
ebf2682214 | ||
|
|
a773b1effb | ||
|
|
7101965bd6 | ||
|
|
c8cae4a293 | ||
|
|
3047db9709 | ||
|
|
327936d164 | ||
|
|
9fa3c4fa94 | ||
|
|
297eb7168a | ||
|
|
21731c9c2d | ||
|
|
ec649b6d86 | ||
|
|
f60ae50f38 | ||
|
|
4aaed135b8 |
32
.github/ISSUE_TEMPLATE/release.md
vendored
Normal file
32
.github/ISSUE_TEMPLATE/release.md
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
---
|
||||
name: Argo CD Release
|
||||
about: Used by our Release Champion to track progress of a minor release
|
||||
title: 'Argo CD Release vX.X'
|
||||
labels: 'release'
|
||||
assignees: ''
|
||||
---
|
||||
|
||||
Target RC1 date: ___. __, ____
|
||||
Target GA date: ___. __, ____
|
||||
|
||||
- [ ] Create new section in the [Release Planning doc](https://docs.google.com/document/d/1trJIomcgXcfvLw0aYnERrFWfPjQOfYMDJOCh1S8nMBc/edit?usp=sharing)
|
||||
- [ ] Schedule a Release Planning meeting roughly two weeks before the scheduled Release freeze date by adding it to the community calendar (or delegate this task to someone with write access to the community calendar)
|
||||
- [ ] Include Zoom link in the invite
|
||||
- [ ] Post in #argo-cd and #argo-contributors one week before the meeting
|
||||
- [ ] Post again one hour before the meeting
|
||||
- [ ] At the meeting, remove issues/PRs from the project's column for that release which have not been “claimed” by at least one Approver (add it to the next column if Approver requests that)
|
||||
- [ ] 1wk before feature freeze post in #argo-contributors that PRs must be merged by DD-MM-YYYY to be included in the release - ask approvers to drop items from milestone they can’t merge
|
||||
- [ ] At least two days before RC1 date, draft RC blog post and submit it for review (or delegate this task)
|
||||
- [ ] Cut RC1 (or delegate this task to an Approver and coordinate timing)
|
||||
- [ ] Create new release branch
|
||||
- [ ] Add the release branch to ReadTheDocs
|
||||
- [ ] Confirm that tweet and blog post are ready
|
||||
- [ ] Trigger the release
|
||||
- [ ] After the release is finished, publish tweet and blog post
|
||||
- [ ] Post in #argo-cd and #argo-announcements with lots of emojis announcing the release and requesting help testing
|
||||
- [ ] Monitor support channels for issues, cherry-picking bugfixes and docs fixes as appropriate (or delegate this task to an Approver and coordinate timing)
|
||||
- [ ] At release date, evaluate if any bugs justify delaying the release. If not, cut the release (or delegate this task to an Approver and coordinate timing)
|
||||
- [ ] If unreleased changes are on the release branch for {current minor version minus 3}, cut a final patch release for that series (or delegate this task to an Approver and coordinate timing)
|
||||
- [ ] After the release, post in #argo-cd that the {current minor version minus 3} has reached EOL (example: https://cloud-native.slack.com/archives/C01TSERG0KZ/p1667336234059729)
|
||||
- [ ] (For the next release champion) Review the [items scheduled for the next release](https://github.com/orgs/argoproj/projects/25). If any item does not have an assignee who can commit to finish the feature, move it to the next release.
|
||||
- [ ] (For the next release champion) Schedule a time mid-way through the release cycle to review items again.
|
||||
18
.github/dependabot.yml
vendored
Normal file
18
.github/dependabot.yml
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "gomod"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
ignore:
|
||||
- dependency-name: k8s.io/*
|
||||
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
|
||||
- package-ecosystem: "npm"
|
||||
directory: "/ui/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
68
.github/workflows/ci-build.yaml
vendored
68
.github/workflows/ci-build.yaml
vendored
@@ -27,9 +27,9 @@ jobs:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@v3
|
||||
uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Download all Go modules
|
||||
@@ -45,13 +45,13 @@ jobs:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@v3
|
||||
uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Restore go build cache
|
||||
uses: actions/cache@v1
|
||||
uses: actions/cache@627f0f41f6904a5b1efbaed9f96d9eb58e92e920 # v3.2.4
|
||||
with:
|
||||
path: ~/.cache/go-build
|
||||
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
|
||||
@@ -69,13 +69,13 @@ jobs:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@v3
|
||||
uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Run golangci-lint
|
||||
uses: golangci/golangci-lint-action@v3
|
||||
uses: golangci/golangci-lint-action@0ad9a0988b3973e851ab0a07adf248ec2e100376 # v3.3.1
|
||||
with:
|
||||
version: v1.46.2
|
||||
args: --timeout 10m --exclude SA5011 --verbose
|
||||
@@ -92,11 +92,11 @@ jobs:
|
||||
- name: Create checkout directory
|
||||
run: mkdir -p ~/go/src/github.com/argoproj
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
|
||||
- name: Create symlink in GOPATH
|
||||
run: ln -s $(pwd) ~/go/src/github.com/argoproj/argo-cd
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@v3
|
||||
uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Install required packages
|
||||
@@ -116,7 +116,7 @@ jobs:
|
||||
run: |
|
||||
echo "/usr/local/bin" >> $GITHUB_PATH
|
||||
- name: Restore go build cache
|
||||
uses: actions/cache@v1
|
||||
uses: actions/cache@627f0f41f6904a5b1efbaed9f96d9eb58e92e920 # v3.2.4
|
||||
with:
|
||||
path: ~/.cache/go-build
|
||||
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
|
||||
@@ -133,12 +133,12 @@ jobs:
|
||||
- name: Run all unit tests
|
||||
run: make test-local
|
||||
- name: Generate code coverage artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2
|
||||
with:
|
||||
name: code-coverage
|
||||
path: coverage.out
|
||||
- name: Generate test results artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2
|
||||
with:
|
||||
name: test-results
|
||||
path: test-results/
|
||||
@@ -155,11 +155,11 @@ jobs:
|
||||
- name: Create checkout directory
|
||||
run: mkdir -p ~/go/src/github.com/argoproj
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
|
||||
- name: Create symlink in GOPATH
|
||||
run: ln -s $(pwd) ~/go/src/github.com/argoproj/argo-cd
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@v3
|
||||
uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Install required packages
|
||||
@@ -179,7 +179,7 @@ jobs:
|
||||
run: |
|
||||
echo "/usr/local/bin" >> $GITHUB_PATH
|
||||
- name: Restore go build cache
|
||||
uses: actions/cache@v1
|
||||
uses: actions/cache@627f0f41f6904a5b1efbaed9f96d9eb58e92e920 # v3.2.4
|
||||
with:
|
||||
path: ~/.cache/go-build
|
||||
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
|
||||
@@ -196,7 +196,7 @@ jobs:
|
||||
- name: Run all unit tests
|
||||
run: make test-race-local
|
||||
- name: Generate test results artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2
|
||||
with:
|
||||
name: race-results
|
||||
path: test-results/
|
||||
@@ -206,9 +206,9 @@ jobs:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@v3
|
||||
uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: Create symlink in GOPATH
|
||||
@@ -250,14 +250,14 @@ jobs:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
|
||||
- name: Setup NodeJS
|
||||
uses: actions/setup-node@v1
|
||||
uses: actions/setup-node@64ed1c7eab4cce3362f8c340dee64e5eaeef8f7c # v3.6.0
|
||||
with:
|
||||
node-version: '12.18.4'
|
||||
- name: Restore node dependency cache
|
||||
id: cache-dependencies
|
||||
uses: actions/cache@v1
|
||||
uses: actions/cache@627f0f41f6904a5b1efbaed9f96d9eb58e92e920 # v3.2.4
|
||||
with:
|
||||
path: ui/node_modules
|
||||
key: ${{ runner.os }}-node-dep-v2-${{ hashFiles('**/yarn.lock') }}
|
||||
@@ -287,12 +287,12 @@ jobs:
|
||||
sonar_secret: ${{ secrets.SONAR_TOKEN }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Restore node dependency cache
|
||||
id: cache-dependencies
|
||||
uses: actions/cache@v1
|
||||
uses: actions/cache@627f0f41f6904a5b1efbaed9f96d9eb58e92e920 # v3.2.4
|
||||
with:
|
||||
path: ui/node_modules
|
||||
key: ${{ runner.os }}-node-dep-v2-${{ hashFiles('**/yarn.lock') }}
|
||||
@@ -303,16 +303,16 @@ jobs:
|
||||
run: |
|
||||
mkdir -p test-results
|
||||
- name: Get code coverage artifiact
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2
|
||||
with:
|
||||
name: code-coverage
|
||||
- name: Get test result artifact
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2
|
||||
with:
|
||||
name: test-results
|
||||
path: test-results
|
||||
- name: Upload code coverage information to codecov.io
|
||||
uses: codecov/codecov-action@v1
|
||||
uses: codecov/codecov-action@d9f34f8cd5cb3b3eb79b3e4b5dae3a16df499a70 # v3.1.1
|
||||
with:
|
||||
file: coverage.out
|
||||
- name: Perform static code analysis using SonarCloud
|
||||
@@ -348,7 +348,7 @@ jobs:
|
||||
runs-on: ubuntu-22.04
|
||||
strategy:
|
||||
matrix:
|
||||
k3s-version: [v1.24.3, v1.23.3, v1.22.6]
|
||||
k3s-version: [v1.26.0, v1.25.4, v1.24.3, v1.23.3]
|
||||
needs:
|
||||
- build-go
|
||||
env:
|
||||
@@ -366,9 +366,9 @@ jobs:
|
||||
GITLAB_TOKEN: ${{ secrets.E2E_TEST_GITLAB_TOKEN }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@v3
|
||||
uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- name: GH actions workaround - Kill XSP4 process
|
||||
@@ -386,7 +386,7 @@ jobs:
|
||||
sudo chown runner $HOME/.kube/config
|
||||
kubectl version
|
||||
- name: Restore go build cache
|
||||
uses: actions/cache@v1
|
||||
uses: actions/cache@627f0f41f6904a5b1efbaed9f96d9eb58e92e920 # v3.2.4
|
||||
with:
|
||||
path: ~/.cache/go-build
|
||||
key: ${{ runner.os }}-go-build-v1-${{ github.run_id }}
|
||||
@@ -412,9 +412,9 @@ jobs:
|
||||
git config --global user.email "john.doe@example.com"
|
||||
- name: Pull Docker image required for tests
|
||||
run: |
|
||||
docker pull ghcr.io/dexidp/dex:v2.35.3-distroless
|
||||
docker pull ghcr.io/dexidp/dex:v2.35.3
|
||||
docker pull argoproj/argo-cd-ci-builder:v1.0.0
|
||||
docker pull redis:7.0.5-alpine
|
||||
docker pull redis:7.0.7-alpine
|
||||
- name: Create target directory for binaries in the build-process
|
||||
run: |
|
||||
mkdir -p dist
|
||||
@@ -442,7 +442,7 @@ jobs:
|
||||
set -x
|
||||
make test-e2e-local
|
||||
- name: Upload e2e-server logs
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2
|
||||
with:
|
||||
name: e2e-server-k8s${{ matrix.k3s-version }}.log
|
||||
path: /tmp/e2e-server.log
|
||||
|
||||
8
.github/workflows/codeql.yml
vendored
8
.github/workflows/codeql.yml
vendored
@@ -29,11 +29,11 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v2
|
||||
uses: github/codeql-action/init@8aff97f12c99086bdb92ff62ae06dbbcdf07941b # v2.1.33
|
||||
# Override language selection by uncommenting this and choosing your languages
|
||||
# with:
|
||||
# languages: go, javascript, csharp, python, cpp, java
|
||||
@@ -41,7 +41,7 @@ jobs:
|
||||
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||
# If this step fails, then you should remove it and run the build manually (see below)
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@v2
|
||||
uses: github/codeql-action/autobuild@8aff97f12c99086bdb92ff62ae06dbbcdf07941b # v2.1.33
|
||||
|
||||
# ℹ️ Command-line programs to run using the OS shell.
|
||||
# 📚 https://git.io/JvXDl
|
||||
@@ -55,4 +55,4 @@ jobs:
|
||||
# make release
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v2
|
||||
uses: github/codeql-action/analyze@8aff97f12c99086bdb92ff62ae06dbbcdf07941b # v2.1.33
|
||||
|
||||
38
.github/workflows/image.yaml
vendored
38
.github/workflows/image.yaml
vendored
@@ -23,37 +23,38 @@ jobs:
|
||||
publish:
|
||||
permissions:
|
||||
contents: write # for git to push upgrade commit if not already deployed
|
||||
packages: write # for pushing packages to GHCR, which is used by cd.apps.argoproj.io to avoid polluting Quay with tags
|
||||
if: github.repository == 'argoproj/argo-cd'
|
||||
runs-on: ubuntu-22.04
|
||||
env:
|
||||
GOPATH: /home/runner/work/argo-cd/argo-cd
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
- uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
- uses: actions/checkout@master
|
||||
- uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
|
||||
with:
|
||||
path: src/github.com/argoproj/argo-cd
|
||||
|
||||
# get image tag
|
||||
- run: echo ::set-output name=tag::$(cat ./VERSION)-${GITHUB_SHA::8}
|
||||
- run: echo "tag=$(cat ./VERSION)-${GITHUB_SHA::8}" >> $GITHUB_OUTPUT
|
||||
working-directory: ./src/github.com/argoproj/argo-cd
|
||||
id: image
|
||||
|
||||
# login
|
||||
- run: |
|
||||
docker login ghcr.io --username $USERNAME --password $PASSWORD
|
||||
docker login quay.io --username "${DOCKER_USERNAME}" --password "${DOCKER_TOKEN}"
|
||||
docker login ghcr.io --username $USERNAME --password-stdin <<< "$PASSWORD"
|
||||
docker login quay.io --username "$DOCKER_USERNAME" --password-stdin <<< "$DOCKER_TOKEN"
|
||||
if: github.event_name == 'push'
|
||||
env:
|
||||
USERNAME: ${{ secrets.USERNAME }}
|
||||
PASSWORD: ${{ secrets.TOKEN }}
|
||||
USERNAME: ${{ github.actor }}
|
||||
PASSWORD: ${{ secrets.GITHUB_TOKEN }}
|
||||
DOCKER_USERNAME: ${{ secrets.RELEASE_QUAY_USERNAME }}
|
||||
DOCKER_TOKEN: ${{ secrets.RELEASE_QUAY_TOKEN }}
|
||||
|
||||
# build
|
||||
- uses: docker/setup-qemu-action@v2
|
||||
- uses: docker/setup-buildx-action@v2
|
||||
- uses: docker/setup-qemu-action@e81a89b1732b9c48d79cd809d8d81d79c4647a18 # v2.1.0
|
||||
- uses: docker/setup-buildx-action@15c905b16b06416d2086efa066dd8e3a35cc7f98 # v2.4.0
|
||||
- run: |
|
||||
IMAGE_PLATFORMS=linux/amd64
|
||||
if [[ "${{ github.event_name }}" == "push" || "${{ contains(github.event.pull_request.labels.*.name, 'test-arm-image') }}" == "true" ]]
|
||||
@@ -61,20 +62,27 @@ jobs:
|
||||
IMAGE_PLATFORMS=linux/amd64,linux/arm64,linux/s390x,linux/ppc64le
|
||||
fi
|
||||
echo "Building image for platforms: $IMAGE_PLATFORMS"
|
||||
docker buildx build --platform $IMAGE_PLATFORMS --push="${{ github.event_name == 'push' }}" \
|
||||
-t ghcr.io/argoproj/argocd:${{ steps.image.outputs.tag }} \
|
||||
docker buildx build --platform $IMAGE_PLATFORMS --sbom=false --provenance=false --push="${{ github.event_name == 'push' }}" \
|
||||
-t ghcr.io/argoproj/argo-cd/argocd:${{ steps.image.outputs.tag }} \
|
||||
-t quay.io/argoproj/argocd:latest .
|
||||
working-directory: ./src/github.com/argoproj/argo-cd
|
||||
|
||||
# sign container images
|
||||
- name: Install cosign
|
||||
uses: sigstore/cosign-installer@main
|
||||
uses: sigstore/cosign-installer@9becc617647dfa20ae7b1151972e9b3a2c338a2b # v2.8.1
|
||||
with:
|
||||
cosign-release: 'v1.13.0'
|
||||
cosign-release: 'v1.13.1'
|
||||
|
||||
- name: Install crane to get digest of image
|
||||
uses: imjasonh/setup-crane@e82f1b9a8007d399333baba4d75915558e9fb6a4
|
||||
|
||||
- name: Get digest of image
|
||||
run: |
|
||||
echo "IMAGE_DIGEST=$(crane digest quay.io/argoproj/argocd:latest)" >> $GITHUB_ENV
|
||||
|
||||
- name: Sign Argo CD latest image
|
||||
run: |
|
||||
cosign sign --key env://COSIGN_PRIVATE_KEY quay.io/argoproj/argocd:latest
|
||||
cosign sign --key env://COSIGN_PRIVATE_KEY quay.io/argoproj/argocd@${{ env.IMAGE_DIGEST }}
|
||||
# Displays the public key to share.
|
||||
cosign public-key --key env://COSIGN_PRIVATE_KEY
|
||||
env:
|
||||
@@ -88,7 +96,7 @@ jobs:
|
||||
env:
|
||||
TOKEN: ${{ secrets.TOKEN }}
|
||||
- run: |
|
||||
docker run -u $(id -u):$(id -g) -v $(pwd):/src -w /src --rm -t ghcr.io/argoproj/argocd:${{ steps.image.outputs.tag }} kustomize edit set image quay.io/argoproj/argocd=ghcr.io/argoproj/argocd:${{ steps.image.outputs.tag }}
|
||||
docker run -u $(id -u):$(id -g) -v $(pwd):/src -w /src --rm -t ghcr.io/argoproj/argo-cd/argocd:${{ steps.image.outputs.tag }} kustomize edit set image quay.io/argoproj/argocd=ghcr.io/argoproj/argo-cd/argocd:${{ steps.image.outputs.tag }}
|
||||
git config --global user.email 'ci@argoproj.com'
|
||||
git config --global user.name 'CI'
|
||||
git diff --exit-code && echo 'Already deployed' || (git commit -am 'Upgrade argocd to ${{ steps.image.outputs.tag }}' && git push)
|
||||
|
||||
41
.github/workflows/pr-title-check.yml
vendored
Normal file
41
.github/workflows/pr-title-check.yml
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
name: "Lint PR"
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types:
|
||||
- opened
|
||||
- edited
|
||||
- synchronize
|
||||
|
||||
# IMPORTANT: No checkout actions, scripts, or builds should be added to this workflow. Permissions should always be used
|
||||
# with extreme caution.
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
# PR updates can happen in quick succession leading to this
|
||||
# workflow being trigger a number of times. This limits it
|
||||
# to one run per PR.
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
|
||||
jobs:
|
||||
main:
|
||||
permissions:
|
||||
pull-requests: read # for amannn/action-semantic-pull-request to analyze PRs
|
||||
statuses: write # for amannn/action-semantic-pull-request to mark status of analyzed PR
|
||||
name: Validate PR title
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
# IMPORTANT: Carefully review changes when updating this action. Using the pull_request_target event requires caution.
|
||||
- uses: amannn/action-semantic-pull-request@01d5fd8a8ebb9aafe902c40c53f0f4744f7381eb # v5.0.2
|
||||
with:
|
||||
types: |
|
||||
feat
|
||||
fix
|
||||
docs
|
||||
test
|
||||
ci
|
||||
chore
|
||||
[Bot] docs
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
40
.github/workflows/release.yaml
vendored
40
.github/workflows/release.yaml
vendored
@@ -12,7 +12,7 @@ on:
|
||||
- "!release-v0*"
|
||||
|
||||
env:
|
||||
GOLANG_VERSION: '1.18'
|
||||
GOLANG_VERSION: '1.18'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
@@ -43,7 +43,7 @@ jobs:
|
||||
GIT_EMAIL: argoproj@gmail.com
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
@@ -147,7 +147,7 @@ jobs:
|
||||
echo "RELEASE_NOTES=${RELEASE_NOTES}" >> $GITHUB_ENV
|
||||
|
||||
- name: Setup Golang
|
||||
uses: actions/setup-go@v3
|
||||
uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
|
||||
@@ -195,19 +195,19 @@ jobs:
|
||||
QUAY_TOKEN: ${{ secrets.RELEASE_QUAY_TOKEN }}
|
||||
run: |
|
||||
set -ue
|
||||
docker login quay.io --username "${QUAY_USERNAME}" --password "${QUAY_TOKEN}"
|
||||
docker login quay.io --username "${QUAY_USERNAME}" --password-stdin <<< "${QUAY_TOKEN}"
|
||||
# Remove the following when Docker Hub is gone
|
||||
docker login --username "${DOCKER_USERNAME}" --password "${DOCKER_TOKEN}"
|
||||
docker login --username "${DOCKER_USERNAME}" --password-stdin <<< "${DOCKER_TOKEN}"
|
||||
if: ${{ env.DRY_RUN != 'true' }}
|
||||
|
||||
- uses: docker/setup-qemu-action@v2
|
||||
- uses: docker/setup-buildx-action@v2
|
||||
- uses: docker/setup-qemu-action@e81a89b1732b9c48d79cd809d8d81d79c4647a18 # v2.1.0
|
||||
- uses: docker/setup-buildx-action@15c905b16b06416d2086efa066dd8e3a35cc7f98 # v2.4.0
|
||||
- name: Build and push Docker image for release
|
||||
run: |
|
||||
set -ue
|
||||
git clean -fd
|
||||
mkdir -p dist/
|
||||
docker buildx build --platform linux/amd64,linux/arm64,linux/s390x,linux/ppc64le --push -t ${IMAGE_NAMESPACE}/argocd:v${TARGET_VERSION} -t argoproj/argocd:v${TARGET_VERSION} .
|
||||
docker buildx build --platform linux/amd64,linux/arm64,linux/s390x,linux/ppc64le --sbom=false --provenance=false --push -t ${IMAGE_NAMESPACE}/argocd:v${TARGET_VERSION} -t argoproj/argocd:v${TARGET_VERSION} .
|
||||
make release-cli
|
||||
make checksums
|
||||
chmod +x ./dist/argocd-linux-amd64
|
||||
@@ -215,13 +215,20 @@ jobs:
|
||||
if: ${{ env.DRY_RUN != 'true' }}
|
||||
|
||||
- name: Install cosign
|
||||
uses: sigstore/cosign-installer@main
|
||||
uses: sigstore/cosign-installer@9becc617647dfa20ae7b1151972e9b3a2c338a2b # v2.8.1
|
||||
with:
|
||||
cosign-release: 'v1.13.0'
|
||||
cosign-release: 'v1.13.1'
|
||||
|
||||
- name: Install crane to get digest of image
|
||||
uses: imjasonh/setup-crane@e82f1b9a8007d399333baba4d75915558e9fb6a4
|
||||
|
||||
- name: Get digest of image
|
||||
run: |
|
||||
echo "IMAGE_DIGEST=$(crane digest quay.io/argoproj/argocd:v${TARGET_VERSION})" >> $GITHUB_ENV
|
||||
|
||||
- name: Sign Argo CD container images and assets
|
||||
run: |
|
||||
cosign sign --key env://COSIGN_PRIVATE_KEY ${IMAGE_NAMESPACE}/argocd:v${TARGET_VERSION}
|
||||
cosign sign --key env://COSIGN_PRIVATE_KEY ${IMAGE_NAMESPACE}/argocd@${{ env.IMAGE_DIGEST }}
|
||||
cosign sign-blob --key env://COSIGN_PRIVATE_KEY ./dist/argocd-${TARGET_VERSION}-checksums.txt > ./dist/argocd-${TARGET_VERSION}-checksums.sig
|
||||
# Retrieves the public key to release as an asset
|
||||
cosign public-key --key env://COSIGN_PRIVATE_KEY > ./dist/argocd-cosign.pub
|
||||
@@ -232,7 +239,7 @@ jobs:
|
||||
|
||||
- name: Read release notes file
|
||||
id: release-notes
|
||||
uses: juliangruber/read-file-action@v1
|
||||
uses: juliangruber/read-file-action@02bbba9876a8f870efd4ad64e3b9088d3fb94d4b # v1.1.6
|
||||
with:
|
||||
path: ${{ env.RELEASE_NOTES }}
|
||||
|
||||
@@ -243,7 +250,7 @@ jobs:
|
||||
git push origin ${RELEASE_TAG}
|
||||
|
||||
- name: Dry run GitHub release
|
||||
uses: actions/create-release@v1
|
||||
uses: actions/create-release@0cb9c9b65d5d1901c1f53e5e66eaf4afd303e70e # v1.1.4
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
id: create_release
|
||||
@@ -264,7 +271,7 @@ jobs:
|
||||
SIGS_BOM_VERSION: v0.2.1
|
||||
# comma delimited list of project relative folders to inspect for package
|
||||
# managers (gomod, yarn, npm).
|
||||
PROJECT_FOLDERS: ".,./ui"
|
||||
PROJECT_FOLDERS: ".,./ui"
|
||||
# full qualified name of the docker image to be inspected
|
||||
DOCKER_IMAGE: ${{env.IMAGE_NAMESPACE}}/argocd:v${{env.TARGET_VERSION}}
|
||||
run: |
|
||||
@@ -295,7 +302,7 @@ jobs:
|
||||
if: ${{ env.DRY_RUN != 'true' }}
|
||||
|
||||
- name: Create GitHub release
|
||||
uses: softprops/action-gh-release@v1
|
||||
uses: softprops/action-gh-release@de2c0eb89ae2a093876385947365aca7b0e5f844 # v0.1.15
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
@@ -303,7 +310,6 @@ jobs:
|
||||
tag_name: ${{ env.RELEASE_TAG }}
|
||||
draft: ${{ env.DRAFT_RELEASE }}
|
||||
prerelease: ${{ env.PRE_RELEASE }}
|
||||
generate_release_notes: true
|
||||
body: ${{ steps.release-notes.outputs.content }} # Pre-pended to the generated notes
|
||||
files: |
|
||||
dist/argocd-*
|
||||
@@ -314,7 +320,7 @@ jobs:
|
||||
- name: Update homebrew formula
|
||||
env:
|
||||
HOMEBREW_TOKEN: ${{ secrets.RELEASE_HOMEBREW_TOKEN }}
|
||||
uses: dawidd6/action-homebrew-bump-formula@v3
|
||||
uses: dawidd6/action-homebrew-bump-formula@02e79d9da43d79efa846d73695b6052cbbdbf48a # v3.8.3
|
||||
with:
|
||||
token: ${{env.HOMEBREW_TOKEN}}
|
||||
formula: argocd
|
||||
|
||||
4
.github/workflows/update-snyk.yaml
vendored
4
.github/workflows/update-snyk.yaml
vendored
@@ -17,7 +17,7 @@ jobs:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Build reports
|
||||
@@ -31,6 +31,6 @@ jobs:
|
||||
git config --global user.email 'ci@argoproj.com'
|
||||
git config --global user.name 'CI'
|
||||
git add docs/snyk
|
||||
git commit -m "[Bot] Update Snyk reports" --signoff
|
||||
git commit -m "[Bot] docs: Update Snyk reports" --signoff
|
||||
git push --set-upstream origin "$pr_branch"
|
||||
gh pr create -B master -H "$pr_branch" --title '[Bot] docs: Update Snyk report' --body ''
|
||||
|
||||
@@ -36,6 +36,8 @@ RUN ./install.sh helm-linux && \
|
||||
####################################################################################################
|
||||
FROM $BASE_IMAGE AS argocd-base
|
||||
|
||||
LABEL org.opencontainers.image.source="https://github.com/argoproj/argo-cd"
|
||||
|
||||
USER root
|
||||
|
||||
ENV ARGOCD_USER_ID=999
|
||||
|
||||
29
Makefile
29
Makefile
@@ -64,13 +64,20 @@ else
|
||||
DOCKER_SRC_MOUNT="$(PWD):/go/src/github.com/argoproj/argo-cd$(VOLUME_MOUNT)"
|
||||
endif
|
||||
|
||||
# User and group IDs to map to the test container
|
||||
CONTAINER_UID=$(shell id -u)
|
||||
CONTAINER_GID=$(shell id -g)
|
||||
|
||||
# Set SUDO to sudo to run privileged commands with sudo
|
||||
SUDO?=
|
||||
|
||||
# Runs any command in the argocd-test-utils container in server mode
|
||||
# Server mode container will start with uid 0 and drop privileges during runtime
|
||||
define run-in-test-server
|
||||
docker run --rm -it \
|
||||
$(SUDO) docker run --rm -it \
|
||||
--name argocd-test-server \
|
||||
-u $(shell id -u):$(shell id -g) \
|
||||
-e USER_ID=$(shell id -u) \
|
||||
-u $(CONTAINER_UID):$(CONTAINER_GID) \
|
||||
-e USER_ID=$(CONTAINER_UID) \
|
||||
-e HOME=/home/user \
|
||||
-e GOPATH=/go \
|
||||
-e GOCACHE=/tmp/go-build-cache \
|
||||
@@ -98,9 +105,9 @@ endef
|
||||
|
||||
# Runs any command in the argocd-test-utils container in client mode
|
||||
define run-in-test-client
|
||||
docker run --rm -it \
|
||||
$(SUDO) docker run --rm -it \
|
||||
--name argocd-test-client \
|
||||
-u $(shell id -u):$(shell id -g) \
|
||||
-u $(CONTAINER_UID):$(CONTAINER_GID) \
|
||||
-e HOME=/home/user \
|
||||
-e GOPATH=/go \
|
||||
-e ARGOCD_E2E_K3S=$(ARGOCD_E2E_K3S) \
|
||||
@@ -119,7 +126,7 @@ endef
|
||||
|
||||
#
|
||||
define exec-in-test-server
|
||||
docker exec -it -u $(shell id -u):$(shell id -g) -e ARGOCD_E2E_RECORD=$(ARGOCD_E2E_RECORD) -e ARGOCD_E2E_K3S=$(ARGOCD_E2E_K3S) argocd-test-server $(1)
|
||||
$(SUDO) docker exec -it -u $(CONTAINER_UID):$(CONTAINER_GID) -e ARGOCD_E2E_RECORD=$(ARGOCD_E2E_RECORD) -e ARGOCD_E2E_K3S=$(ARGOCD_E2E_K3S) argocd-test-server $(1)
|
||||
endef
|
||||
|
||||
PATH:=$(PATH):$(PWD)/hack
|
||||
@@ -244,8 +251,8 @@ release-cli: clean-debug build-ui
|
||||
.PHONY: test-tools-image
|
||||
test-tools-image:
|
||||
ifndef SKIP_TEST_TOOLS_IMAGE
|
||||
docker build --build-arg UID=$(shell id -u) -t $(TEST_TOOLS_PREFIX)$(TEST_TOOLS_IMAGE) -f test/container/Dockerfile .
|
||||
docker tag $(TEST_TOOLS_PREFIX)$(TEST_TOOLS_IMAGE) $(TEST_TOOLS_PREFIX)$(TEST_TOOLS_IMAGE):$(TEST_TOOLS_TAG)
|
||||
$(SUDO) docker build --build-arg UID=$(CONTAINER_UID) -t $(TEST_TOOLS_PREFIX)$(TEST_TOOLS_IMAGE) -f test/container/Dockerfile .
|
||||
$(SUDO) docker tag $(TEST_TOOLS_PREFIX)$(TEST_TOOLS_IMAGE) $(TEST_TOOLS_PREFIX)$(TEST_TOOLS_IMAGE):$(TEST_TOOLS_TAG)
|
||||
endif
|
||||
|
||||
.PHONY: manifests-local
|
||||
@@ -326,7 +333,7 @@ mod-vendor: test-tools-image
|
||||
mod-vendor-local: mod-download-local
|
||||
go mod vendor
|
||||
|
||||
# Deprecated - replace by install-local-tools
|
||||
# Deprecated - replace by install-tools-local
|
||||
.PHONY: install-lint-tools
|
||||
install-lint-tools:
|
||||
./hack/install.sh lint-tools
|
||||
@@ -512,7 +519,7 @@ build-docs-local:
|
||||
|
||||
.PHONY: build-docs
|
||||
build-docs:
|
||||
docker run ${MKDOCS_RUN_ARGS} --rm -it -p 8000:8000 -v ${CURRENT_DIR}:/docs ${MKDOCS_DOCKER_IMAGE} build
|
||||
docker run ${MKDOCS_RUN_ARGS} --rm -it -v ${CURRENT_DIR}:/docs --entrypoint "" ${MKDOCS_DOCKER_IMAGE} sh -c 'pip install -r docs/requirements.txt; mkdocs build'
|
||||
|
||||
.PHONY: serve-docs-local
|
||||
serve-docs-local:
|
||||
@@ -520,7 +527,7 @@ serve-docs-local:
|
||||
|
||||
.PHONY: serve-docs
|
||||
serve-docs:
|
||||
docker run ${MKDOCS_RUN_ARGS} --rm -it -p 8000:8000 -v ${CURRENT_DIR}:/docs ${MKDOCS_DOCKER_IMAGE} serve -a 0.0.0.0:8000
|
||||
docker run ${MKDOCS_RUN_ARGS} --rm -it -p 8000:8000 -v ${CURRENT_DIR}/site:/site -w /site --entrypoint "" ${MKDOCS_DOCKER_IMAGE} python3 -m http.server --bind 0.0.0.0 8000
|
||||
|
||||
|
||||
# Verify that kubectl can connect to your K8s cluster from Docker
|
||||
|
||||
2
OWNERS
2
OWNERS
@@ -27,3 +27,5 @@ reviewers:
|
||||
- wanghong230
|
||||
- ciiay
|
||||
- saumeya
|
||||
- zachaller
|
||||
- 34fathombelow
|
||||
|
||||
13
README.md
13
README.md
@@ -1,6 +1,17 @@
|
||||
[](https://github.com/argoproj/argo-cd/actions?query=workflow%3A%22Integration+tests%22) [](https://argoproj.github.io/community/join-slack) [](https://codecov.io/gh/argoproj/argo-cd) [](https://github.com/argoproj/argo-cd/releases/latest) [](https://bestpractices.coreinfrastructure.org/projects/4486) [](https://twitter.com/argoproj)
|
||||
**Releases:**
|
||||
[](https://github.com/argoproj/argo-cd/releases/latest)
|
||||
[](https://artifacthub.io/packages/helm/argo/argo-cd)
|
||||
|
||||
**Code:**
|
||||
[](https://github.com/argoproj/argo-cd/actions?query=workflow%3A%22Integration+tests%22)
|
||||
[](https://codecov.io/gh/argoproj/argo-cd)
|
||||
[](https://bestpractices.coreinfrastructure.org/projects/4486)
|
||||
[](https://app.fossa.com/projects/git%2Bgithub.com%2Fargoproj%2Fargo-cd?ref=badge_shield)
|
||||
|
||||
**Social:**
|
||||
[](https://twitter.com/argoproj)
|
||||
[](https://argoproj.github.io/community/join-slack)
|
||||
|
||||
# Argo CD - Declarative Continuous Delivery for Kubernetes
|
||||
|
||||
## What is Argo CD?
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# Defined below are the security contacts for this repo.
|
||||
#
|
||||
# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE
|
||||
# INSTRUCTIONS AT https://argo-cd.readthedocs.io/en/latest/security_considerations/#reporting-vulnerabilities
|
||||
# INSTRUCTIONS AT https://github.com/argoproj/argo-cd/security/policy
|
||||
|
||||
alexmt
|
||||
edlee2121
|
||||
|
||||
28
USERS.md
28
USERS.md
@@ -11,9 +11,11 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Adevinta](https://www.adevinta.com/)
|
||||
1. [Adfinis](https://adfinis.com)
|
||||
1. [Adventure](https://jp.adventurekk.com/)
|
||||
1. [AirQo](https://airqo.net/)
|
||||
1. [Akuity](https://akuity.io/)
|
||||
1. [Alibaba Group](https://www.alibabagroup.com/)
|
||||
1. [Allianz Direct](https://www.allianzdirect.de/)
|
||||
1. [Amadeus IT Group](https://amadeus.com/)
|
||||
1. [Ambassador Labs](https://www.getambassador.io/)
|
||||
1. [ANSTO - Australian Synchrotron](https://www.synchrotron.org.au/)
|
||||
1. [Ant Group](https://www.antgroup.com/)
|
||||
@@ -29,18 +31,21 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [BigPanda](https://bigpanda.io)
|
||||
1. [BioBox Analytics](https://biobox.io)
|
||||
1. [BMW Group](https://www.bmwgroup.com/)
|
||||
1. [PT Boer Technology (Btech)](https://btech.id/)
|
||||
1. [Boozt](https://www.booztgroup.com/)
|
||||
1. [Boticario](https://www.boticario.com.br/)
|
||||
1. [Bulder Bank](https://bulderbank.no)
|
||||
1. [Camptocamp](https://camptocamp.com)
|
||||
1. [Capital One](https://www.capitalone.com)
|
||||
1. [CARFAX](https://www.carfax.com)
|
||||
1. [CARFAX Europe](https://www.carfax.eu)
|
||||
1. [Casavo](https://casavo.com)
|
||||
1. [Celonis](https://www.celonis.com/)
|
||||
1. [CERN](https://home.cern/)
|
||||
1. [Chargetrip](https://chargetrip.com)
|
||||
1. [Chime](https://www.chime.com)
|
||||
1. [Cisco ET&I](https://eti.cisco.com/)
|
||||
1. [Cloud Scale](https://cloudscaleinc.com/)
|
||||
1. [Cobalt](https://www.cobalt.io/)
|
||||
1. [Codefresh](https://www.codefresh.io/)
|
||||
1. [Codility](https://www.codility.com/)
|
||||
@@ -56,6 +61,8 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Deutsche Telekom AG](https://telekom.com)
|
||||
1. [Devopsi - Poland Software/DevOps Consulting](https://devopsi.pl/)
|
||||
1. [Devtron Labs](https://github.com/devtron-labs/devtron)
|
||||
1. [Divistant](https://divistant.com)
|
||||
1. [Doximity](https://www.doximity.com/)
|
||||
1. [EDF Renewables](https://www.edf-re.com/)
|
||||
1. [edX](https://edx.org)
|
||||
1. [Elastic](https://elastic.co/)
|
||||
@@ -65,6 +72,7 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [END.](https://www.endclothing.com/)
|
||||
1. [Energisme](https://energisme.com/)
|
||||
1. [enigmo](https://enigmo.co.jp/)
|
||||
1. [Envoy](https://envoy.com/)
|
||||
1. [Faro](https://www.faro.com/)
|
||||
1. [Fave](https://myfave.com)
|
||||
1. [Flip](https://flip.id)
|
||||
@@ -75,7 +83,8 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [G DATA CyberDefense AG](https://www.gdata-software.com/)
|
||||
1. [Garner](https://www.garnercorp.com)
|
||||
1. [Generali Deutschland AG](https://www.generali.de/)
|
||||
2. [Gepardec](https://gepardec.com/)
|
||||
1. [Gepardec](https://gepardec.com/)
|
||||
1. [GetYourGuide](https://www.getyourguide.com/)
|
||||
1. [Gitpod](https://www.gitpod.io)
|
||||
1. [Gllue](https://gllue.com)
|
||||
1. [gloat](https://gloat.com/)
|
||||
@@ -97,6 +106,7 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Ibotta](https://home.ibotta.com)
|
||||
1. [IITS-Consulting](https://iits-consulting.de)
|
||||
1. [imaware](https://imaware.health)
|
||||
1. [Indeed](https://indeed.com)
|
||||
1. [Index Exchange](https://www.indexexchange.com/)
|
||||
1. [InsideBoard](https://www.insideboard.com)
|
||||
1. [Intuit](https://www.intuit.com/)
|
||||
@@ -116,6 +126,7 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Kurly](https://www.kurly.com/)
|
||||
1. [LexisNexis](https://www.lexisnexis.com/)
|
||||
1. [Lian Chu Securities](https://lczq.com)
|
||||
1. [Liatrio](https://www.liatrio.com)
|
||||
1. [Lightricks](https://www.lightricks.com/)
|
||||
1. [LINE](https://linecorp.com/en/)
|
||||
1. [Lytt](https://www.lytt.co/)
|
||||
@@ -128,6 +139,7 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Max Kelsen](https://www.maxkelsen.com/)
|
||||
1. [MeDirect](https://medirect.com.mt/)
|
||||
1. [Meican](https://meican.com/)
|
||||
1. [Mercedes-Benz Tech Innovation](https://www.mercedes-benz-techinnovation.com/)
|
||||
1. [Metanet](http://www.metanet.co.kr/en/)
|
||||
1. [MindSpore](https://mindspore.cn)
|
||||
1. [Mirantis](https://mirantis.com/)
|
||||
@@ -142,14 +154,17 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Nextdoor](https://nextdoor.com/)
|
||||
1. [Nikkei](https://www.nikkei.co.jp/nikkeiinfo/en/)
|
||||
1. [Nitro](https://gonitro.com)
|
||||
1. [Objective](https://www.objective.com.br/)
|
||||
1. [OCCMundial](https://occ.com.mx)
|
||||
1. [Octadesk](https://octadesk.com)
|
||||
1. [omegaUp](https://omegaUp.com)
|
||||
1. [Omni](https://omni.se/)
|
||||
1. [openEuler](https://openeuler.org)
|
||||
1. [openGauss](https://opengauss.org/)
|
||||
1. [openLooKeng](https://openlookeng.io)
|
||||
1. [OpenSaaS Studio](https://opensaas.studio)
|
||||
1. [Opensurvey](https://www.opensurvey.co.kr/)
|
||||
1. [OpsMx](https://opsmx.io)
|
||||
1. [OpsVerse](https://opsverse.io)
|
||||
1. [Optoro](https://www.optoro.com/)
|
||||
1. [Orbital Insight](https://orbitalinsight.com/)
|
||||
@@ -157,11 +172,15 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Packlink](https://www.packlink.com/)
|
||||
1. [Pandosearch](https://www.pandosearch.com/en/home)
|
||||
1. [PagerDuty](https://www.pagerduty.com/)
|
||||
1. [Patreon](https://www.patreon.com/)
|
||||
1. [PayPay](https://paypay.ne.jp/)
|
||||
1. [Peloton Interactive](https://www.onepeloton.com/)
|
||||
1. [Pigment](https://www.gopigment.com/)
|
||||
1. [Pipefy](https://www.pipefy.com/)
|
||||
1. [Pismo](https://pismo.io/)
|
||||
1. [Platform9 Systems](https://platform9.com/)
|
||||
1. [Polarpoint.io](https://polarpoint.io)
|
||||
1. [PostFinance](https://github.com/postfinance)
|
||||
1. [Preferred Networks](https://preferred.jp/en/)
|
||||
1. [Productboard](https://www.productboard.com/)
|
||||
1. [Prudential](https://prudential.com.sg)
|
||||
@@ -181,10 +200,13 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Saildrone](https://www.saildrone.com/)
|
||||
1. [Saloodo! GmbH](https://www.saloodo.com)
|
||||
1. [Sap Labs](http://sap.com)
|
||||
1. [Sauce Labs](https://saucelabs.com/)
|
||||
1. [Schwarz IT](https://jobs.schwarz/it-mission)
|
||||
1. [SI Analytics](https://si-analytics.ai)
|
||||
1. [Skit](https://skit.ai/)
|
||||
1. [Skyscanner](https://www.skyscanner.net/)
|
||||
1. [Smilee.io](https://smilee.io)
|
||||
1. [Smood.ch](https://www.smood.ch/)
|
||||
1. [Snapp](https://snapp.ir/)
|
||||
1. [Snyk](https://snyk.io/)
|
||||
1. [Softway Medical](https://www.softwaymedical.fr/)
|
||||
@@ -213,6 +235,7 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Toss](https://toss.im/en)
|
||||
1. [Trendyol](https://www.trendyol.com/)
|
||||
1. [tru.ID](https://tru.id)
|
||||
1. [Trusting Social](https://trustingsocial.com/)
|
||||
1. [Twilio SendGrid](https://sendgrid.com)
|
||||
1. [tZERO](https://www.tzero.com/)
|
||||
1. [UBIO](https://ub.io/)
|
||||
@@ -221,9 +244,11 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [Unifonic Inc](https://www.unifonic.com/)
|
||||
1. [Universidad Mesoamericana](https://www.umes.edu.gt/)
|
||||
1. [Viaduct](https://www.viaduct.ai/)
|
||||
1. [Vinted](https://vinted.com/)
|
||||
1. [Virtuo](https://www.govirtuo.com/)
|
||||
1. [VISITS Technologies](https://visits.world/en)
|
||||
1. [Volvo Cars](https://www.volvocars.com/)
|
||||
1. [Voyager Digital](https://www.investvoyager.com/)
|
||||
1. [VSHN - The DevOps Company](https://vshn.ch/)
|
||||
1. [Walkbase](https://www.walkbase.com/)
|
||||
1. [Webstores](https://www.webstores.nl)
|
||||
@@ -232,6 +257,7 @@ Currently, the following organizations are **officially** using Argo CD:
|
||||
1. [WeMo Scooter](https://www.wemoscooter.com/)
|
||||
1. [Whitehat Berlin](https://whitehat.berlin) by Guido Maria Serra +Fenaroli
|
||||
1. [Witick](https://witick.io/)
|
||||
1. [Wolffun Game](https://www.wolffungame.com/)
|
||||
1. [WooliesX](https://wooliesx.com.au/)
|
||||
1. [Woolworths Group](https://www.woolworthsgroup.com.au/)
|
||||
1. [WSpot](https://www.wspot.com.br/)
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/record"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
@@ -69,6 +70,8 @@ type ApplicationSetReconciler struct {
|
||||
KubeClientset kubernetes.Interface
|
||||
utils.Policy
|
||||
utils.Renderer
|
||||
|
||||
EnableProgressiveSyncs bool
|
||||
}
|
||||
|
||||
// +kubebuilder:rbac:groups=argoproj.io,resources=applicationsets,verbs=get;list;watch;create;update;patch;delete
|
||||
@@ -134,6 +137,27 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
||||
return ctrl.Result{RequeueAfter: ReconcileRequeueOnValidationError}, nil
|
||||
}
|
||||
|
||||
// appMap is a name->app collection of Applications in this ApplicationSet.
|
||||
appMap := map[string]argov1alpha1.Application{}
|
||||
// appSyncMap tracks which apps will be synced during this reconciliation.
|
||||
appSyncMap := map[string]bool{}
|
||||
|
||||
if r.EnableProgressiveSyncs && applicationSetInfo.Spec.Strategy != nil {
|
||||
applications, err := r.getCurrentApplications(ctx, applicationSetInfo)
|
||||
if err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("failed to get current applications for application set: %w", err)
|
||||
}
|
||||
|
||||
for _, app := range applications {
|
||||
appMap[app.Name] = app
|
||||
}
|
||||
|
||||
appSyncMap, err = r.performProgressiveSyncs(ctx, applicationSetInfo, applications, desiredApplications, appMap)
|
||||
if err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("failed to perform progressive sync reconciliation for application set: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
var validApps []argov1alpha1.Application
|
||||
for i := range desiredApplications {
|
||||
if validateErrors[i] == nil {
|
||||
@@ -162,6 +186,26 @@ func (r *ApplicationSetReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
||||
)
|
||||
}
|
||||
|
||||
if r.EnableProgressiveSyncs {
|
||||
// trigger appropriate application syncs if RollingSync strategy is enabled
|
||||
if progressiveSyncsStrategyEnabled(&applicationSetInfo, "RollingSync") {
|
||||
validApps, err = r.syncValidApplications(ctx, &applicationSetInfo, appSyncMap, appMap, validApps)
|
||||
|
||||
if err != nil {
|
||||
_ = r.setApplicationSetStatusCondition(ctx,
|
||||
&applicationSetInfo,
|
||||
argov1alpha1.ApplicationSetCondition{
|
||||
Type: argov1alpha1.ApplicationSetConditionErrorOccurred,
|
||||
Message: err.Error(),
|
||||
Reason: argov1alpha1.ApplicationSetReasonSyncApplicationError,
|
||||
Status: argov1alpha1.ApplicationSetConditionStatusTrue,
|
||||
}, parametersGenerated,
|
||||
)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if r.Policy.Update() {
|
||||
err = r.createOrUpdateInCluster(ctx, applicationSetInfo, validApps)
|
||||
if err != nil {
|
||||
@@ -528,6 +572,11 @@ func (r *ApplicationSetReconciler) createOrUpdateInCluster(ctx context.Context,
|
||||
// Copy only the Application/ObjectMeta fields that are significant, from the generatedApp
|
||||
found.Spec = generatedApp.Spec
|
||||
|
||||
// allow setting the Operation field to trigger a sync operation on an Application
|
||||
if generatedApp.Operation != nil {
|
||||
found.Operation = generatedApp.Operation
|
||||
}
|
||||
|
||||
// Preserve specially treated argo cd annotations:
|
||||
// * https://github.com/argoproj/applicationset/issues/180
|
||||
// * https://github.com/argoproj/argo-cd/issues/10500
|
||||
@@ -726,4 +775,541 @@ func (r *ApplicationSetReconciler) removeFinalizerOnInvalidDestination(ctx conte
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ApplicationSetReconciler) performProgressiveSyncs(ctx context.Context, appset argov1alpha1.ApplicationSet, applications []argov1alpha1.Application, desiredApplications []argov1alpha1.Application, appMap map[string]argov1alpha1.Application) (map[string]bool, error) {
|
||||
|
||||
appDependencyList, appStepMap, err := r.buildAppDependencyList(ctx, appset, desiredApplications)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to build app dependency list: %w", err)
|
||||
}
|
||||
|
||||
_, err = r.updateApplicationSetApplicationStatus(ctx, &appset, applications, appStepMap)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to update applicationset app status: %w", err)
|
||||
}
|
||||
|
||||
log.Infof("ApplicationSet %v step list:", appset.Name)
|
||||
for i, step := range appDependencyList {
|
||||
log.Infof("step %v: %+v", i+1, step)
|
||||
}
|
||||
|
||||
appSyncMap, err := r.buildAppSyncMap(ctx, appset, appDependencyList, appMap)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to build app sync map: %w", err)
|
||||
}
|
||||
|
||||
log.Infof("Application allowed to sync before maxUpdate?: %+v", appSyncMap)
|
||||
|
||||
_, err = r.updateApplicationSetApplicationStatusProgress(ctx, &appset, appSyncMap, appStepMap, appMap)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to update applicationset application status progress: %w", err)
|
||||
}
|
||||
|
||||
_, err = r.updateApplicationSetApplicationStatusConditions(ctx, &appset)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to update applicationset application status conditions: %w", err)
|
||||
}
|
||||
|
||||
return appSyncMap, nil
|
||||
}
|
||||
|
||||
// this list tracks which Applications belong to each RollingUpdate step
|
||||
func (r *ApplicationSetReconciler) buildAppDependencyList(ctx context.Context, applicationSet argov1alpha1.ApplicationSet, applications []argov1alpha1.Application) ([][]string, map[string]int, error) {
|
||||
|
||||
if applicationSet.Spec.Strategy == nil || applicationSet.Spec.Strategy.Type == "" || applicationSet.Spec.Strategy.Type == "AllAtOnce" {
|
||||
return [][]string{}, map[string]int{}, nil
|
||||
}
|
||||
|
||||
steps := []argov1alpha1.ApplicationSetRolloutStep{}
|
||||
if progressiveSyncsStrategyEnabled(&applicationSet, "RollingSync") {
|
||||
steps = applicationSet.Spec.Strategy.RollingSync.Steps
|
||||
}
|
||||
|
||||
appDependencyList := make([][]string, 0)
|
||||
for range steps {
|
||||
appDependencyList = append(appDependencyList, make([]string, 0))
|
||||
}
|
||||
|
||||
appStepMap := map[string]int{}
|
||||
|
||||
// use applicationLabelSelectors to filter generated Applications into steps and status by name
|
||||
for _, app := range applications {
|
||||
for i, step := range steps {
|
||||
|
||||
selected := true // default to true, assuming the current Application is a match for the given step matchExpression
|
||||
|
||||
allNotInMatched := true // needed to support correct AND behavior between multiple NotIn MatchExpressions
|
||||
notInUsed := false // since we default to allNotInMatched == true, track whether a NotIn expression was actually used
|
||||
|
||||
for _, matchExpression := range step.MatchExpressions {
|
||||
|
||||
if matchExpression.Operator == "In" {
|
||||
if val, ok := app.Labels[matchExpression.Key]; ok {
|
||||
valueMatched := labelMatchedExpression(val, matchExpression)
|
||||
|
||||
if !valueMatched { // none of the matchExpression values was a match with the Application'ss labels
|
||||
selected = false
|
||||
break
|
||||
}
|
||||
} else {
|
||||
selected = false // no matching label key with In means this Application will not be included in the current step
|
||||
break
|
||||
}
|
||||
} else if matchExpression.Operator == "NotIn" {
|
||||
notInUsed = true // a NotIn selector was used in this matchExpression
|
||||
if val, ok := app.Labels[matchExpression.Key]; ok {
|
||||
valueMatched := labelMatchedExpression(val, matchExpression)
|
||||
|
||||
if !valueMatched { // none of the matchExpression values was a match with the Application's labels
|
||||
allNotInMatched = false
|
||||
}
|
||||
} else {
|
||||
allNotInMatched = false // no matching label key with NotIn means this Application may still be included in the current step
|
||||
}
|
||||
} else { // handle invalid operator selection
|
||||
log.Warnf("skipping AppSet rollingUpdate step Application selection for %q, invalid matchExpression operator provided: %q ", applicationSet.Name, matchExpression.Operator)
|
||||
selected = false
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if notInUsed && allNotInMatched { // check if all NotIn Expressions matched, if so exclude this Application
|
||||
selected = false
|
||||
}
|
||||
|
||||
if selected {
|
||||
appDependencyList[i] = append(appDependencyList[i], app.Name)
|
||||
if val, ok := appStepMap[app.Name]; ok {
|
||||
log.Warnf("AppSet '%v' has a invalid matchExpression that selects Application '%v' label twice, in steps %v and %v", applicationSet.Name, app.Name, val+1, i+1)
|
||||
} else {
|
||||
appStepMap[app.Name] = i
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return appDependencyList, appStepMap, nil
|
||||
}
|
||||
|
||||
func labelMatchedExpression(val string, matchExpression argov1alpha1.ApplicationMatchExpression) bool {
|
||||
valueMatched := false
|
||||
for _, value := range matchExpression.Values {
|
||||
if val == value {
|
||||
valueMatched = true
|
||||
break
|
||||
}
|
||||
}
|
||||
return valueMatched
|
||||
}
|
||||
|
||||
// this map is used to determine which stage of Applications are ready to be updated in the reconciler loop
|
||||
func (r *ApplicationSetReconciler) buildAppSyncMap(ctx context.Context, applicationSet argov1alpha1.ApplicationSet, appDependencyList [][]string, appMap map[string]argov1alpha1.Application) (map[string]bool, error) {
|
||||
appSyncMap := map[string]bool{}
|
||||
syncEnabled := true
|
||||
|
||||
// healthy stages and the first non-healthy stage should have sync enabled
|
||||
// every stage after should have sync disabled
|
||||
|
||||
for i := range appDependencyList {
|
||||
// set the syncEnabled boolean for every Application in the current step
|
||||
for _, appName := range appDependencyList[i] {
|
||||
appSyncMap[appName] = syncEnabled
|
||||
}
|
||||
|
||||
// detect if we need to halt before progressing to the next step
|
||||
for _, appName := range appDependencyList[i] {
|
||||
|
||||
idx := findApplicationStatusIndex(applicationSet.Status.ApplicationStatus, appName)
|
||||
if idx == -1 {
|
||||
// no Application status found, likely because the Application is being newly created
|
||||
syncEnabled = false
|
||||
break
|
||||
}
|
||||
|
||||
appStatus := applicationSet.Status.ApplicationStatus[idx]
|
||||
|
||||
if app, ok := appMap[appName]; ok {
|
||||
|
||||
syncEnabled = appSyncEnabledForNextStep(&applicationSet, app, appStatus)
|
||||
if !syncEnabled {
|
||||
break
|
||||
}
|
||||
} else {
|
||||
// application name not found in the list of applications managed by this ApplicationSet, maybe because it's being deleted
|
||||
syncEnabled = false
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return appSyncMap, nil
|
||||
}
|
||||
|
||||
func appSyncEnabledForNextStep(appset *argov1alpha1.ApplicationSet, app argov1alpha1.Application, appStatus argov1alpha1.ApplicationSetApplicationStatus) bool {
|
||||
|
||||
if progressiveSyncsStrategyEnabled(appset, "RollingSync") {
|
||||
// we still need to complete the current step if the Application is not yet Healthy or there are still pending Application changes
|
||||
return isApplicationHealthy(app) && appStatus.Status == "Healthy"
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func progressiveSyncsStrategyEnabled(appset *argov1alpha1.ApplicationSet, strategyType string) bool {
|
||||
if appset.Spec.Strategy == nil || appset.Spec.Strategy.Type != strategyType {
|
||||
return false
|
||||
}
|
||||
|
||||
if strategyType == "RollingSync" && appset.Spec.Strategy.RollingSync == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func isApplicationHealthy(app argov1alpha1.Application) bool {
|
||||
healthStatusString, syncStatusString, operationPhaseString := statusStrings(app)
|
||||
|
||||
if healthStatusString == "Healthy" && syncStatusString != "OutOfSync" && (operationPhaseString == "Succeeded" || operationPhaseString == "") {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func statusStrings(app argov1alpha1.Application) (string, string, string) {
|
||||
healthStatusString := string(app.Status.Health.Status)
|
||||
syncStatusString := string(app.Status.Sync.Status)
|
||||
operationPhaseString := ""
|
||||
if app.Status.OperationState != nil {
|
||||
operationPhaseString = string(app.Status.OperationState.Phase)
|
||||
}
|
||||
|
||||
return healthStatusString, syncStatusString, operationPhaseString
|
||||
}
|
||||
|
||||
// check the status of each Application's status and promote Applications to the next status if needed
|
||||
func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatus(ctx context.Context, applicationSet *argov1alpha1.ApplicationSet, applications []argov1alpha1.Application, appStepMap map[string]int) ([]argov1alpha1.ApplicationSetApplicationStatus, error) {
|
||||
|
||||
now := metav1.Now()
|
||||
appStatuses := make([]argov1alpha1.ApplicationSetApplicationStatus, 0, len(applications))
|
||||
|
||||
for _, app := range applications {
|
||||
|
||||
healthStatusString, syncStatusString, operationPhaseString := statusStrings(app)
|
||||
|
||||
idx := findApplicationStatusIndex(applicationSet.Status.ApplicationStatus, app.Name)
|
||||
|
||||
currentAppStatus := argov1alpha1.ApplicationSetApplicationStatus{}
|
||||
|
||||
if idx == -1 {
|
||||
// AppStatus not found, set default status of "Waiting"
|
||||
currentAppStatus = argov1alpha1.ApplicationSetApplicationStatus{
|
||||
Application: app.Name,
|
||||
LastTransitionTime: &now,
|
||||
Message: "No Application status found, defaulting status to Waiting.",
|
||||
Status: "Waiting",
|
||||
Step: fmt.Sprint(appStepMap[app.Name] + 1),
|
||||
}
|
||||
} else {
|
||||
// we have an existing AppStatus
|
||||
currentAppStatus = applicationSet.Status.ApplicationStatus[idx]
|
||||
}
|
||||
|
||||
appOutdated := false
|
||||
if progressiveSyncsStrategyEnabled(applicationSet, "RollingSync") {
|
||||
appOutdated = syncStatusString == "OutOfSync"
|
||||
}
|
||||
|
||||
if appOutdated && currentAppStatus.Status != "Waiting" && currentAppStatus.Status != "Pending" {
|
||||
log.Infof("Application %v is outdated, updating its ApplicationSet status to Waiting", app.Name)
|
||||
currentAppStatus.LastTransitionTime = &now
|
||||
currentAppStatus.Status = "Waiting"
|
||||
currentAppStatus.Message = "Application has pending changes, setting status to Waiting."
|
||||
currentAppStatus.Step = fmt.Sprint(appStepMap[currentAppStatus.Application] + 1)
|
||||
}
|
||||
|
||||
if currentAppStatus.Status == "Pending" {
|
||||
if operationPhaseString == "Succeeded" && app.Status.OperationState.StartedAt.After(currentAppStatus.LastTransitionTime.Time) {
|
||||
log.Infof("Application %v has completed a sync successfully, updating its ApplicationSet status to Progressing", app.Name)
|
||||
currentAppStatus.LastTransitionTime = &now
|
||||
currentAppStatus.Status = "Progressing"
|
||||
currentAppStatus.Message = "Application resource completed a sync successfully, updating status from Pending to Progressing."
|
||||
currentAppStatus.Step = fmt.Sprint(appStepMap[currentAppStatus.Application] + 1)
|
||||
} else if operationPhaseString == "Running" || healthStatusString == "Progressing" {
|
||||
log.Infof("Application %v has entered Progressing status, updating its ApplicationSet status to Progressing", app.Name)
|
||||
currentAppStatus.LastTransitionTime = &now
|
||||
currentAppStatus.Status = "Progressing"
|
||||
currentAppStatus.Message = "Application resource became Progressing, updating status from Pending to Progressing."
|
||||
currentAppStatus.Step = fmt.Sprint(appStepMap[currentAppStatus.Application] + 1)
|
||||
}
|
||||
}
|
||||
|
||||
if currentAppStatus.Status == "Waiting" && isApplicationHealthy(app) {
|
||||
log.Infof("Application %v is already synced and healthy, updating its ApplicationSet status to Healthy", app.Name)
|
||||
currentAppStatus.LastTransitionTime = &now
|
||||
currentAppStatus.Status = healthStatusString
|
||||
currentAppStatus.Message = "Application resource is already Healthy, updating status from Waiting to Healthy."
|
||||
currentAppStatus.Step = fmt.Sprint(appStepMap[currentAppStatus.Application] + 1)
|
||||
}
|
||||
|
||||
if currentAppStatus.Status == "Progressing" && isApplicationHealthy(app) {
|
||||
log.Infof("Application %v has completed Progressing status, updating its ApplicationSet status to Healthy", app.Name)
|
||||
currentAppStatus.LastTransitionTime = &now
|
||||
currentAppStatus.Status = healthStatusString
|
||||
currentAppStatus.Message = "Application resource became Healthy, updating status from Progressing to Healthy."
|
||||
currentAppStatus.Step = fmt.Sprint(appStepMap[currentAppStatus.Application] + 1)
|
||||
}
|
||||
|
||||
appStatuses = append(appStatuses, currentAppStatus)
|
||||
}
|
||||
|
||||
err := r.setAppSetApplicationStatus(ctx, applicationSet, appStatuses)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to set AppSet application statuses: %w", err)
|
||||
}
|
||||
|
||||
return appStatuses, nil
|
||||
}
|
||||
|
||||
// check Applications that are in Waiting status and promote them to Pending if needed
|
||||
func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatusProgress(ctx context.Context, applicationSet *argov1alpha1.ApplicationSet, appSyncMap map[string]bool, appStepMap map[string]int, appMap map[string]argov1alpha1.Application) ([]argov1alpha1.ApplicationSetApplicationStatus, error) {
|
||||
now := metav1.Now()
|
||||
|
||||
appStatuses := make([]argov1alpha1.ApplicationSetApplicationStatus, 0, len(applicationSet.Status.ApplicationStatus))
|
||||
|
||||
// if we have no RollingUpdate steps, clear out the existing ApplicationStatus entries
|
||||
if applicationSet.Spec.Strategy != nil && applicationSet.Spec.Strategy.Type != "" && applicationSet.Spec.Strategy.Type != "AllAtOnce" {
|
||||
updateCountMap := []int{}
|
||||
totalCountMap := []int{}
|
||||
|
||||
length := 0
|
||||
if progressiveSyncsStrategyEnabled(applicationSet, "RollingSync") {
|
||||
length = len(applicationSet.Spec.Strategy.RollingSync.Steps)
|
||||
}
|
||||
for s := 0; s < length; s++ {
|
||||
updateCountMap = append(updateCountMap, 0)
|
||||
totalCountMap = append(totalCountMap, 0)
|
||||
}
|
||||
|
||||
// populate updateCountMap with counts of existing Pending and Progressing Applications
|
||||
for _, appStatus := range applicationSet.Status.ApplicationStatus {
|
||||
totalCountMap[appStepMap[appStatus.Application]] += 1
|
||||
|
||||
if progressiveSyncsStrategyEnabled(applicationSet, "RollingSync") {
|
||||
if appStatus.Status == "Pending" || appStatus.Status == "Progressing" {
|
||||
updateCountMap[appStepMap[appStatus.Application]] += 1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, appStatus := range applicationSet.Status.ApplicationStatus {
|
||||
|
||||
maxUpdateAllowed := true
|
||||
maxUpdate := &intstr.IntOrString{}
|
||||
if progressiveSyncsStrategyEnabled(applicationSet, "RollingSync") {
|
||||
maxUpdate = applicationSet.Spec.Strategy.RollingSync.Steps[appStepMap[appStatus.Application]].MaxUpdate
|
||||
}
|
||||
|
||||
// by default allow all applications to update if maxUpdate is unset
|
||||
if maxUpdate != nil {
|
||||
maxUpdateVal, err := intstr.GetScaledValueFromIntOrPercent(maxUpdate, totalCountMap[appStepMap[appStatus.Application]], false)
|
||||
if err != nil {
|
||||
log.Warnf("AppSet '%v' has a invalid maxUpdate value '%+v', ignoring maxUpdate logic for this step: %v", applicationSet.Name, maxUpdate, err)
|
||||
}
|
||||
|
||||
// ensure that percentage values greater than 0% always result in at least 1 Application being selected
|
||||
if maxUpdate.Type == intstr.String && maxUpdate.StrVal != "0%" && maxUpdateVal < 1 {
|
||||
maxUpdateVal = 1
|
||||
}
|
||||
|
||||
if updateCountMap[appStepMap[appStatus.Application]] >= maxUpdateVal {
|
||||
maxUpdateAllowed = false
|
||||
log.Infof("Application %v is not allowed to update yet, %v/%v Applications already updating in step %v in AppSet %v", appStatus.Application, updateCountMap[appStepMap[appStatus.Application]], maxUpdateVal, appStepMap[appStatus.Application]+1, applicationSet.Name)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if appStatus.Status == "Waiting" && appSyncMap[appStatus.Application] && maxUpdateAllowed {
|
||||
log.Infof("Application %v moved to Pending status, watching for the Application to start Progressing", appStatus.Application)
|
||||
appStatus.LastTransitionTime = &now
|
||||
appStatus.Status = "Pending"
|
||||
appStatus.Message = "Application moved to Pending status, watching for the Application resource to start Progressing."
|
||||
appStatus.Step = fmt.Sprint(appStepMap[appStatus.Application] + 1)
|
||||
|
||||
updateCountMap[appStepMap[appStatus.Application]] += 1
|
||||
}
|
||||
|
||||
appStatuses = append(appStatuses, appStatus)
|
||||
}
|
||||
}
|
||||
|
||||
err := r.setAppSetApplicationStatus(ctx, applicationSet, appStatuses)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to set AppSet app status: %w", err)
|
||||
}
|
||||
|
||||
return appStatuses, nil
|
||||
}
|
||||
|
||||
func (r *ApplicationSetReconciler) updateApplicationSetApplicationStatusConditions(ctx context.Context, applicationSet *argov1alpha1.ApplicationSet) ([]argov1alpha1.ApplicationSetCondition, error) {
|
||||
|
||||
appSetProgressing := false
|
||||
for _, appStatus := range applicationSet.Status.ApplicationStatus {
|
||||
if appStatus.Status != "Healthy" {
|
||||
appSetProgressing = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
appSetConditionProgressing := false
|
||||
for _, appSetCondition := range applicationSet.Status.Conditions {
|
||||
if appSetCondition.Type == argov1alpha1.ApplicationSetConditionRolloutProgressing && appSetCondition.Status == argov1alpha1.ApplicationSetConditionStatusTrue {
|
||||
appSetConditionProgressing = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if appSetProgressing && !appSetConditionProgressing {
|
||||
_ = r.setApplicationSetStatusCondition(ctx,
|
||||
applicationSet,
|
||||
argov1alpha1.ApplicationSetCondition{
|
||||
Type: argov1alpha1.ApplicationSetConditionRolloutProgressing,
|
||||
Message: "ApplicationSet Rollout Rollout started",
|
||||
Reason: argov1alpha1.ApplicationSetReasonApplicationSetModified,
|
||||
Status: argov1alpha1.ApplicationSetConditionStatusTrue,
|
||||
}, false,
|
||||
)
|
||||
} else if !appSetProgressing && appSetConditionProgressing {
|
||||
_ = r.setApplicationSetStatusCondition(ctx,
|
||||
applicationSet,
|
||||
argov1alpha1.ApplicationSetCondition{
|
||||
Type: argov1alpha1.ApplicationSetConditionRolloutProgressing,
|
||||
Message: "ApplicationSet Rollout Rollout complete",
|
||||
Reason: argov1alpha1.ApplicationSetReasonApplicationSetRolloutComplete,
|
||||
Status: argov1alpha1.ApplicationSetConditionStatusFalse,
|
||||
}, false,
|
||||
)
|
||||
}
|
||||
|
||||
return applicationSet.Status.Conditions, nil
|
||||
}
|
||||
|
||||
func findApplicationStatusIndex(appStatuses []argov1alpha1.ApplicationSetApplicationStatus, application string) int {
|
||||
for i := range appStatuses {
|
||||
if appStatuses[i].Application == application {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// setApplicationSetApplicationStatus updates the ApplicatonSet's status field
|
||||
// with any new/changed Application statuses.
|
||||
func (r *ApplicationSetReconciler) setAppSetApplicationStatus(ctx context.Context, applicationSet *argov1alpha1.ApplicationSet, applicationStatuses []argov1alpha1.ApplicationSetApplicationStatus) error {
|
||||
needToUpdateStatus := false
|
||||
for i := range applicationStatuses {
|
||||
appStatus := applicationStatuses[i]
|
||||
idx := findApplicationStatusIndex(applicationSet.Status.ApplicationStatus, appStatus.Application)
|
||||
if idx == -1 {
|
||||
needToUpdateStatus = true
|
||||
break
|
||||
}
|
||||
currentStatus := applicationSet.Status.ApplicationStatus[idx]
|
||||
if currentStatus.Message != appStatus.Message || currentStatus.Status != appStatus.Status {
|
||||
needToUpdateStatus = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if needToUpdateStatus {
|
||||
// fetch updated Application Set object before updating it
|
||||
namespacedName := types.NamespacedName{Namespace: applicationSet.Namespace, Name: applicationSet.Name}
|
||||
if err := r.Get(ctx, namespacedName, applicationSet); err != nil {
|
||||
if client.IgnoreNotFound(err) != nil {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("error fetching updated application set: %v", err)
|
||||
}
|
||||
|
||||
for i := range applicationStatuses {
|
||||
applicationSet.Status.SetApplicationStatus(applicationStatuses[i])
|
||||
}
|
||||
|
||||
// Update the newly fetched object with new set of ApplicationStatus
|
||||
err := r.Client.Status().Update(ctx, applicationSet)
|
||||
if err != nil {
|
||||
|
||||
log.Errorf("unable to set application set status: %v", err)
|
||||
return fmt.Errorf("unable to set application set status: %v", err)
|
||||
}
|
||||
|
||||
if err := r.Get(ctx, namespacedName, applicationSet); err != nil {
|
||||
if client.IgnoreNotFound(err) != nil {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("error fetching updated application set: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ApplicationSetReconciler) syncValidApplications(ctx context.Context, applicationSet *argov1alpha1.ApplicationSet, appSyncMap map[string]bool, appMap map[string]argov1alpha1.Application, validApps []argov1alpha1.Application) ([]argov1alpha1.Application, error) {
|
||||
rolloutApps := []argov1alpha1.Application{}
|
||||
for i := range validApps {
|
||||
pruneEnabled := false
|
||||
|
||||
// ensure that Applications generated with RollingSync do not have an automated sync policy, since the AppSet controller will handle triggering the sync operation instead
|
||||
if validApps[i].Spec.SyncPolicy != nil && validApps[i].Spec.SyncPolicy.Automated != nil {
|
||||
pruneEnabled = validApps[i].Spec.SyncPolicy.Automated.Prune
|
||||
validApps[i].Spec.SyncPolicy.Automated = nil
|
||||
}
|
||||
|
||||
appSetStatusPending := false
|
||||
idx := findApplicationStatusIndex(applicationSet.Status.ApplicationStatus, validApps[i].Name)
|
||||
if idx > -1 && applicationSet.Status.ApplicationStatus[idx].Status == "Pending" {
|
||||
// only trigger a sync for Applications that are in Pending status, since this is governed by maxUpdate
|
||||
appSetStatusPending = true
|
||||
}
|
||||
|
||||
// check appSyncMap to determine which Applications are ready to be updated and which should be skipped
|
||||
if appSyncMap[validApps[i].Name] && appMap[validApps[i].Name].Status.Sync.Status == "OutOfSync" && appSetStatusPending {
|
||||
log.Infof("triggering sync for application: %v, prune enabled: %v", validApps[i].Name, pruneEnabled)
|
||||
validApps[i], _ = syncApplication(validApps[i], pruneEnabled)
|
||||
}
|
||||
rolloutApps = append(rolloutApps, validApps[i])
|
||||
}
|
||||
return rolloutApps, nil
|
||||
}
|
||||
|
||||
// used by the RollingSync Progressive Sync strategy to trigger a sync of a particular Application resource
|
||||
func syncApplication(application argov1alpha1.Application, prune bool) (argov1alpha1.Application, error) {
|
||||
|
||||
operation := argov1alpha1.Operation{
|
||||
InitiatedBy: argov1alpha1.OperationInitiator{
|
||||
Username: "applicationset-controller",
|
||||
Automated: true,
|
||||
},
|
||||
Info: []*argov1alpha1.Info{
|
||||
{
|
||||
Name: "Reason",
|
||||
Value: "ApplicationSet RollingSync triggered a sync of this Application resource.",
|
||||
},
|
||||
},
|
||||
Sync: &argov1alpha1.SyncOperation{},
|
||||
}
|
||||
|
||||
if application.Spec.SyncPolicy != nil {
|
||||
if application.Spec.SyncPolicy.Retry != nil {
|
||||
operation.Retry = *application.Spec.SyncPolicy.Retry
|
||||
}
|
||||
if application.Spec.SyncPolicy.SyncOptions != nil {
|
||||
operation.Sync.SyncOptions = application.Spec.SyncPolicy.SyncOptions
|
||||
}
|
||||
operation.Sync.Prune = prune
|
||||
}
|
||||
application.Operation = &operation
|
||||
|
||||
return application, nil
|
||||
}
|
||||
|
||||
var _ handler.EventHandler = &clusterSecretEventHandler{}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -23,6 +23,8 @@ spec:
|
||||
template:
|
||||
metadata:
|
||||
name: 'myapp-{{ .branch }}-{{ .number }}'
|
||||
labels:
|
||||
key1: '{{ index .labels 0 }}'
|
||||
spec:
|
||||
source:
|
||||
repoURL: 'https://github.com/myorg/myrepo.git'
|
||||
|
||||
@@ -85,6 +85,7 @@ func (g *GitGenerator) generateParamsForGitDirectories(appSetGenerator *argoproj
|
||||
"total": len(allPaths),
|
||||
"repoURL": appSetGenerator.Git.RepoURL,
|
||||
"revision": appSetGenerator.Git.Revision,
|
||||
"pathParamPrefix": appSetGenerator.Git.PathParamPrefix,
|
||||
}).Info("applications result from the repo service")
|
||||
|
||||
requestedApps := g.filterApps(appSetGenerator.Git.Directories, allPaths)
|
||||
@@ -121,7 +122,7 @@ func (g *GitGenerator) generateParamsForGitFiles(appSetGenerator *argoprojiov1al
|
||||
for _, path := range allPaths {
|
||||
|
||||
// A JSON / YAML file path can contain multiple sets of parameters (ie it is an array)
|
||||
paramsArray, err := g.generateParamsFromGitFile(path, allFiles[path], useGoTemplate)
|
||||
paramsArray, err := g.generateParamsFromGitFile(path, allFiles[path], useGoTemplate, appSetGenerator.Git.PathParamPrefix)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to process file '%s': %v", path, err)
|
||||
}
|
||||
@@ -133,7 +134,7 @@ func (g *GitGenerator) generateParamsForGitFiles(appSetGenerator *argoprojiov1al
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (g *GitGenerator) generateParamsFromGitFile(filePath string, fileContent []byte, useGoTemplate bool) ([]map[string]interface{}, error) {
|
||||
func (g *GitGenerator) generateParamsFromGitFile(filePath string, fileContent []byte, useGoTemplate bool, pathParamPrefix string) ([]map[string]interface{}, error) {
|
||||
objectsFound := []map[string]interface{}{}
|
||||
|
||||
// First, we attempt to parse as an array
|
||||
@@ -167,7 +168,11 @@ func (g *GitGenerator) generateParamsFromGitFile(filePath string, fileContent []
|
||||
paramPath["basenameNormalized"] = utils.SanitizeName(path.Base(paramPath["path"].(string)))
|
||||
paramPath["filenameNormalized"] = utils.SanitizeName(path.Base(paramPath["filename"].(string)))
|
||||
paramPath["segments"] = strings.Split(paramPath["path"].(string), "/")
|
||||
params["path"] = paramPath
|
||||
if pathParamPrefix != "" {
|
||||
params[pathParamPrefix] = map[string]interface{}{"path": paramPath}
|
||||
} else {
|
||||
params["path"] = paramPath
|
||||
}
|
||||
} else {
|
||||
flat, err := flatten.Flatten(objectFound, "", flatten.DotStyle)
|
||||
if err != nil {
|
||||
@@ -176,14 +181,18 @@ func (g *GitGenerator) generateParamsFromGitFile(filePath string, fileContent []
|
||||
for k, v := range flat {
|
||||
params[k] = fmt.Sprintf("%v", v)
|
||||
}
|
||||
params["path"] = path.Dir(filePath)
|
||||
params["path.basename"] = path.Base(params["path"].(string))
|
||||
params["path.filename"] = path.Base(filePath)
|
||||
params["path.basenameNormalized"] = utils.SanitizeName(path.Base(params["path"].(string)))
|
||||
params["path.filenameNormalized"] = utils.SanitizeName(path.Base(params["path.filename"].(string)))
|
||||
for k, v := range strings.Split(params["path"].(string), "/") {
|
||||
pathParamName := "path"
|
||||
if pathParamPrefix != "" {
|
||||
pathParamName = pathParamPrefix+"."+pathParamName
|
||||
}
|
||||
params[pathParamName] = path.Dir(filePath)
|
||||
params[pathParamName+".basename"] = path.Base(params[pathParamName].(string))
|
||||
params[pathParamName+".filename"] = path.Base(filePath)
|
||||
params[pathParamName+".basenameNormalized"] = utils.SanitizeName(path.Base(params[pathParamName].(string)))
|
||||
params[pathParamName+".filenameNormalized"] = utils.SanitizeName(path.Base(params[pathParamName+".filename"].(string)))
|
||||
for k, v := range strings.Split(params[pathParamName].(string), "/") {
|
||||
if len(v) > 0 {
|
||||
params["path["+strconv.Itoa(k)+"]"] = v
|
||||
params[pathParamName+"["+strconv.Itoa(k)+"]"] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -192,7 +201,6 @@ func (g *GitGenerator) generateParamsFromGitFile(filePath string, fileContent []
|
||||
}
|
||||
|
||||
return res, nil
|
||||
|
||||
}
|
||||
|
||||
func (g *GitGenerator) filterApps(Directories []argoprojiov1alpha1.GitDirectoryGeneratorItem, allPaths []string) []string {
|
||||
@@ -223,9 +231,7 @@ func (g *GitGenerator) filterApps(Directories []argoprojiov1alpha1.GitDirectoryG
|
||||
return res
|
||||
}
|
||||
|
||||
func (g *GitGenerator) generateParamsFromApps(requestedApps []string, _ *argoprojiov1alpha1.ApplicationSetGenerator, useGoTemplate bool) []map[string]interface{} {
|
||||
// TODO: At some point, the applicationSetGenerator param should be used
|
||||
|
||||
func (g *GitGenerator) generateParamsFromApps(requestedApps []string, appSetGenerator *argoprojiov1alpha1.ApplicationSetGenerator, useGoTemplate bool) []map[string]interface{} {
|
||||
res := make([]map[string]interface{}, len(requestedApps))
|
||||
for i, a := range requestedApps {
|
||||
|
||||
@@ -237,14 +243,22 @@ func (g *GitGenerator) generateParamsFromApps(requestedApps []string, _ *argopro
|
||||
paramPath["basename"] = path.Base(a)
|
||||
paramPath["basenameNormalized"] = utils.SanitizeName(path.Base(a))
|
||||
paramPath["segments"] = strings.Split(paramPath["path"].(string), "/")
|
||||
params["path"] = paramPath
|
||||
if appSetGenerator.Git.PathParamPrefix != "" {
|
||||
params[appSetGenerator.Git.PathParamPrefix] = map[string]interface{}{"path": paramPath}
|
||||
} else {
|
||||
params["path"] = paramPath
|
||||
}
|
||||
} else {
|
||||
params["path"] = a
|
||||
params["path.basename"] = path.Base(a)
|
||||
params["path.basenameNormalized"] = utils.SanitizeName(path.Base(a))
|
||||
for k, v := range strings.Split(params["path"].(string), "/") {
|
||||
pathParamName := "path"
|
||||
if appSetGenerator.Git.PathParamPrefix != "" {
|
||||
pathParamName = appSetGenerator.Git.PathParamPrefix+"."+pathParamName
|
||||
}
|
||||
params[pathParamName] = a
|
||||
params[pathParamName+".basename"] = path.Base(a)
|
||||
params[pathParamName+".basenameNormalized"] = utils.SanitizeName(path.Base(a))
|
||||
for k, v := range strings.Split(params[pathParamName].(string), "/") {
|
||||
if len(v) > 0 {
|
||||
params["path["+strconv.Itoa(k)+"]"] = v
|
||||
params[pathParamName+"["+strconv.Itoa(k)+"]"] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -51,7 +51,7 @@ func Test_generateParamsFromGitFile(t *testing.T) {
|
||||
params, err := (*GitGenerator)(nil).generateParamsFromGitFile("path/dir/file_name.yaml", []byte(`
|
||||
foo:
|
||||
bar: baz
|
||||
`), false)
|
||||
`), false, "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -69,11 +69,33 @@ foo:
|
||||
}, params)
|
||||
}
|
||||
|
||||
func Test_generatePrefixedParamsFromGitFile(t *testing.T) {
|
||||
params, err := (*GitGenerator)(nil).generateParamsFromGitFile("path/dir/file_name.yaml", []byte(`
|
||||
foo:
|
||||
bar: baz
|
||||
`), false, "myRepo")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assert.Equal(t, []map[string]interface{}{
|
||||
{
|
||||
"foo.bar": "baz",
|
||||
"myRepo.path": "path/dir",
|
||||
"myRepo.path.basename": "dir",
|
||||
"myRepo.path.filename": "file_name.yaml",
|
||||
"myRepo.path.basenameNormalized": "dir",
|
||||
"myRepo.path.filenameNormalized": "file-name.yaml",
|
||||
"myRepo.path[0]": "path",
|
||||
"myRepo.path[1]": "dir",
|
||||
},
|
||||
}, params)
|
||||
}
|
||||
|
||||
func Test_generateParamsFromGitFileGoTemplate(t *testing.T) {
|
||||
params, err := (*GitGenerator)(nil).generateParamsFromGitFile("path/dir/file_name.yaml", []byte(`
|
||||
foo:
|
||||
bar: baz
|
||||
`), true)
|
||||
`), true, "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -97,15 +119,46 @@ foo:
|
||||
}, params)
|
||||
}
|
||||
|
||||
func Test_generatePrefixedParamsFromGitFileGoTemplate(t *testing.T) {
|
||||
params, err := (*GitGenerator)(nil).generateParamsFromGitFile("path/dir/file_name.yaml", []byte(`
|
||||
foo:
|
||||
bar: baz
|
||||
`), true, "myRepo")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assert.Equal(t, []map[string]interface{}{
|
||||
{
|
||||
"foo": map[string]interface{}{
|
||||
"bar": "baz",
|
||||
},
|
||||
"myRepo": map[string]interface{}{
|
||||
"path": map[string]interface{}{
|
||||
"path": "path/dir",
|
||||
"basename": "dir",
|
||||
"filename": "file_name.yaml",
|
||||
"basenameNormalized": "dir",
|
||||
"filenameNormalized": "file-name.yaml",
|
||||
"segments": []string{
|
||||
"path",
|
||||
"dir",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, params)
|
||||
}
|
||||
|
||||
func TestGitGenerateParamsFromDirectories(t *testing.T) {
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
directories []argoprojiov1alpha1.GitDirectoryGeneratorItem
|
||||
repoApps []string
|
||||
repoError error
|
||||
expected []map[string]interface{}
|
||||
expectedError error
|
||||
name string
|
||||
directories []argoprojiov1alpha1.GitDirectoryGeneratorItem
|
||||
pathParamPrefix string
|
||||
repoApps []string
|
||||
repoError error
|
||||
expected []map[string]interface{}
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "happy flow - created apps",
|
||||
@@ -124,6 +177,24 @@ func TestGitGenerateParamsFromDirectories(t *testing.T) {
|
||||
},
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "It prefixes path parameters with PathParamPrefix",
|
||||
directories: []argoprojiov1alpha1.GitDirectoryGeneratorItem{{Path: "*"}},
|
||||
pathParamPrefix: "myRepo",
|
||||
repoApps: []string{
|
||||
"app1",
|
||||
"app2",
|
||||
"app_3",
|
||||
"p1/app4",
|
||||
},
|
||||
repoError: nil,
|
||||
expected: []map[string]interface{}{
|
||||
{"myRepo.path": "app1", "myRepo.path.basename": "app1", "myRepo.path.basenameNormalized": "app1", "myRepo.path[0]": "app1"},
|
||||
{"myRepo.path": "app2", "myRepo.path.basename": "app2", "myRepo.path.basenameNormalized": "app2", "myRepo.path[0]": "app2"},
|
||||
{"myRepo.path": "app_3", "myRepo.path.basename": "app_3", "myRepo.path.basenameNormalized": "app-3", "myRepo.path[0]": "app_3"},
|
||||
},
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "It filters application according to the paths",
|
||||
directories: []argoprojiov1alpha1.GitDirectoryGeneratorItem{{Path: "p1/*"}, {Path: "p1/*/*"}},
|
||||
@@ -212,9 +283,10 @@ func TestGitGenerateParamsFromDirectories(t *testing.T) {
|
||||
Spec: argoprojiov1alpha1.ApplicationSetSpec{
|
||||
Generators: []argoprojiov1alpha1.ApplicationSetGenerator{{
|
||||
Git: &argoprojiov1alpha1.GitGenerator{
|
||||
RepoURL: "RepoURL",
|
||||
Revision: "Revision",
|
||||
Directories: testCaseCopy.directories,
|
||||
RepoURL: "RepoURL",
|
||||
Revision: "Revision",
|
||||
Directories: testCaseCopy.directories,
|
||||
PathParamPrefix: testCaseCopy.pathParamPrefix,
|
||||
},
|
||||
}},
|
||||
},
|
||||
@@ -237,12 +309,13 @@ func TestGitGenerateParamsFromDirectories(t *testing.T) {
|
||||
func TestGitGenerateParamsFromDirectoriesGoTemplate(t *testing.T) {
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
directories []argoprojiov1alpha1.GitDirectoryGeneratorItem
|
||||
repoApps []string
|
||||
repoError error
|
||||
expected []map[string]interface{}
|
||||
expectedError error
|
||||
name string
|
||||
directories []argoprojiov1alpha1.GitDirectoryGeneratorItem
|
||||
pathParamPrefix string
|
||||
repoApps []string
|
||||
repoError error
|
||||
expected []map[string]interface{}
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "happy flow - created apps",
|
||||
@@ -288,6 +361,57 @@ func TestGitGenerateParamsFromDirectoriesGoTemplate(t *testing.T) {
|
||||
},
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "It prefixes path parameters with PathParamPrefix",
|
||||
directories: []argoprojiov1alpha1.GitDirectoryGeneratorItem{{Path: "*"}},
|
||||
pathParamPrefix: "myRepo",
|
||||
repoApps: []string{
|
||||
"app1",
|
||||
"app2",
|
||||
"app_3",
|
||||
"p1/app4",
|
||||
},
|
||||
repoError: nil,
|
||||
expected: []map[string]interface{}{
|
||||
{
|
||||
"myRepo": map[string]interface{}{
|
||||
"path": map[string]interface{}{
|
||||
"path": "app1",
|
||||
"basename": "app1",
|
||||
"basenameNormalized": "app1",
|
||||
"segments": []string{
|
||||
"app1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"myRepo": map[string]interface{}{
|
||||
"path": map[string]interface{}{
|
||||
"path": "app2",
|
||||
"basename": "app2",
|
||||
"basenameNormalized": "app2",
|
||||
"segments": []string{
|
||||
"app2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"myRepo": map[string]interface{}{
|
||||
"path": map[string]interface{}{
|
||||
"path": "app_3",
|
||||
"basename": "app_3",
|
||||
"basenameNormalized": "app-3",
|
||||
"segments": []string{
|
||||
"app_3",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "It filters application according to the paths",
|
||||
directories: []argoprojiov1alpha1.GitDirectoryGeneratorItem{{Path: "p1/*"}, {Path: "p1/*/*"}},
|
||||
@@ -455,9 +579,10 @@ func TestGitGenerateParamsFromDirectoriesGoTemplate(t *testing.T) {
|
||||
GoTemplate: true,
|
||||
Generators: []argoprojiov1alpha1.ApplicationSetGenerator{{
|
||||
Git: &argoprojiov1alpha1.GitGenerator{
|
||||
RepoURL: "RepoURL",
|
||||
Revision: "Revision",
|
||||
Directories: testCaseCopy.directories,
|
||||
RepoURL: "RepoURL",
|
||||
Revision: "Revision",
|
||||
Directories: testCaseCopy.directories,
|
||||
PathParamPrefix: testCaseCopy.pathParamPrefix,
|
||||
},
|
||||
}},
|
||||
},
|
||||
|
||||
@@ -144,9 +144,10 @@ func (m *MatrixGenerator) GetRequeueAfter(appSetGenerator *argoprojiov1alpha1.Ap
|
||||
|
||||
for _, r := range appSetGenerator.Matrix.Generators {
|
||||
base := &argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
List: r.List,
|
||||
Clusters: r.Clusters,
|
||||
Git: r.Git,
|
||||
List: r.List,
|
||||
Clusters: r.Clusters,
|
||||
Git: r.Git,
|
||||
PullRequest: r.PullRequest,
|
||||
}
|
||||
generators := GetRelevantGenerators(base, m.supportedGenerators)
|
||||
|
||||
|
||||
@@ -399,6 +399,8 @@ func TestMatrixGetRequeueAfter(t *testing.T) {
|
||||
Elements: []apiextensionsv1.JSON{{Raw: []byte(`{"cluster": "Cluster","url": "Url"}`)}},
|
||||
}
|
||||
|
||||
pullRequestGenerator := &argoprojiov1alpha1.PullRequestGenerator{}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
baseGenerators []argoprojiov1alpha1.ApplicationSetNestedGenerator
|
||||
@@ -431,6 +433,31 @@ func TestMatrixGetRequeueAfter(t *testing.T) {
|
||||
gitGetRequeueAfter: time.Duration(1),
|
||||
expected: time.Duration(1),
|
||||
},
|
||||
{
|
||||
name: "returns the minimal time for pull request",
|
||||
baseGenerators: []argoprojiov1alpha1.ApplicationSetNestedGenerator{
|
||||
{
|
||||
Git: gitGenerator,
|
||||
},
|
||||
{
|
||||
PullRequest: pullRequestGenerator,
|
||||
},
|
||||
},
|
||||
gitGetRequeueAfter: time.Duration(15 * time.Second),
|
||||
expected: time.Duration(15 * time.Second),
|
||||
},
|
||||
{
|
||||
name: "returns the default time if no requeueAfterSeconds is provided",
|
||||
baseGenerators: []argoprojiov1alpha1.ApplicationSetNestedGenerator{
|
||||
{
|
||||
Git: gitGenerator,
|
||||
},
|
||||
{
|
||||
PullRequest: pullRequestGenerator,
|
||||
},
|
||||
},
|
||||
expected: time.Duration(30 * time.Minute),
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
@@ -441,16 +468,18 @@ func TestMatrixGetRequeueAfter(t *testing.T) {
|
||||
|
||||
for _, g := range testCaseCopy.baseGenerators {
|
||||
gitGeneratorSpec := argoprojiov1alpha1.ApplicationSetGenerator{
|
||||
Git: g.Git,
|
||||
List: g.List,
|
||||
Git: g.Git,
|
||||
List: g.List,
|
||||
PullRequest: g.PullRequest,
|
||||
}
|
||||
mock.On("GetRequeueAfter", &gitGeneratorSpec).Return(testCaseCopy.gitGetRequeueAfter, nil)
|
||||
}
|
||||
|
||||
var matrixGenerator = NewMatrixGenerator(
|
||||
map[string]Generator{
|
||||
"Git": mock,
|
||||
"List": &ListGenerator{},
|
||||
"Git": mock,
|
||||
"List": &ListGenerator{},
|
||||
"PullRequest": &PullRequestGenerator{},
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@@ -90,13 +90,19 @@ func (g *PullRequestGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha
|
||||
shortSHALength = len(pull.HeadSHA)
|
||||
}
|
||||
|
||||
params = append(params, map[string]interface{}{
|
||||
paramMap := map[string]interface{}{
|
||||
"number": strconv.Itoa(pull.Number),
|
||||
"branch": pull.Branch,
|
||||
"branch_slug": slug.Make(pull.Branch),
|
||||
"head_sha": pull.HeadSHA,
|
||||
"head_short_sha": pull.HeadSHA[:shortSHALength],
|
||||
})
|
||||
}
|
||||
|
||||
// PR lables will only be supported for Go Template appsets, since fasttemplate will be deprecated.
|
||||
if applicationSetInfo != nil && applicationSetInfo.Spec.GoTemplate {
|
||||
paramMap["labels"] = pull.Labels
|
||||
}
|
||||
params = append(params, paramMap)
|
||||
}
|
||||
return params, nil
|
||||
}
|
||||
|
||||
@@ -17,9 +17,10 @@ import (
|
||||
func TestPullRequestGithubGenerateParams(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
cases := []struct {
|
||||
selectFunc func(context.Context, *argoprojiov1alpha1.PullRequestGenerator, *argoprojiov1alpha1.ApplicationSet) (pullrequest.PullRequestService, error)
|
||||
expected []map[string]interface{}
|
||||
expectedErr error
|
||||
selectFunc func(context.Context, *argoprojiov1alpha1.PullRequestGenerator, *argoprojiov1alpha1.ApplicationSet) (pullrequest.PullRequestService, error)
|
||||
expected []map[string]interface{}
|
||||
expectedErr error
|
||||
applicationSet argoprojiov1alpha1.ApplicationSet
|
||||
}{
|
||||
{
|
||||
selectFunc: func(context.Context, *argoprojiov1alpha1.PullRequestGenerator, *argoprojiov1alpha1.ApplicationSet) (pullrequest.PullRequestService, error) {
|
||||
@@ -107,6 +108,71 @@ func TestPullRequestGithubGenerateParams(t *testing.T) {
|
||||
expected: nil,
|
||||
expectedErr: fmt.Errorf("error listing repos: fake error"),
|
||||
},
|
||||
{
|
||||
selectFunc: func(context.Context, *argoprojiov1alpha1.PullRequestGenerator, *argoprojiov1alpha1.ApplicationSet) (pullrequest.PullRequestService, error) {
|
||||
return pullrequest.NewFakeService(
|
||||
ctx,
|
||||
[]*pullrequest.PullRequest{
|
||||
&pullrequest.PullRequest{
|
||||
Number: 1,
|
||||
Branch: "branch1",
|
||||
HeadSHA: "089d92cbf9ff857a39e6feccd32798ca700fb958",
|
||||
Labels: []string{"preview"},
|
||||
},
|
||||
},
|
||||
nil,
|
||||
)
|
||||
},
|
||||
expected: []map[string]interface{}{
|
||||
{
|
||||
"number": "1",
|
||||
"branch": "branch1",
|
||||
"branch_slug": "branch1",
|
||||
"head_sha": "089d92cbf9ff857a39e6feccd32798ca700fb958",
|
||||
"head_short_sha": "089d92cb",
|
||||
"labels": []string{"preview"},
|
||||
},
|
||||
},
|
||||
expectedErr: nil,
|
||||
applicationSet: argoprojiov1alpha1.ApplicationSet{
|
||||
Spec: argoprojiov1alpha1.ApplicationSetSpec{
|
||||
// Application set is using Go Template.
|
||||
GoTemplate: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
selectFunc: func(context.Context, *argoprojiov1alpha1.PullRequestGenerator, *argoprojiov1alpha1.ApplicationSet) (pullrequest.PullRequestService, error) {
|
||||
return pullrequest.NewFakeService(
|
||||
ctx,
|
||||
[]*pullrequest.PullRequest{
|
||||
&pullrequest.PullRequest{
|
||||
Number: 1,
|
||||
Branch: "branch1",
|
||||
HeadSHA: "089d92cbf9ff857a39e6feccd32798ca700fb958",
|
||||
Labels: []string{"preview"},
|
||||
},
|
||||
},
|
||||
nil,
|
||||
)
|
||||
},
|
||||
expected: []map[string]interface{}{
|
||||
{
|
||||
"number": "1",
|
||||
"branch": "branch1",
|
||||
"branch_slug": "branch1",
|
||||
"head_sha": "089d92cbf9ff857a39e6feccd32798ca700fb958",
|
||||
"head_short_sha": "089d92cb",
|
||||
},
|
||||
},
|
||||
expectedErr: nil,
|
||||
applicationSet: argoprojiov1alpha1.ApplicationSet{
|
||||
Spec: argoprojiov1alpha1.ApplicationSetSpec{
|
||||
// Application set is using fasttemplate.
|
||||
GoTemplate: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
@@ -117,7 +183,7 @@ func TestPullRequestGithubGenerateParams(t *testing.T) {
|
||||
PullRequest: &argoprojiov1alpha1.PullRequestGenerator{},
|
||||
}
|
||||
|
||||
got, gotErr := gen.GenerateParams(&generatorConfig, nil)
|
||||
got, gotErr := gen.GenerateParams(&generatorConfig, &c.applicationSet)
|
||||
assert.Equal(t, c.expectedErr, gotErr)
|
||||
assert.ElementsMatch(t, c.expected, got)
|
||||
}
|
||||
|
||||
@@ -122,6 +122,15 @@ func (g *SCMProviderGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error initializing Azure Devops service: %v", err)
|
||||
}
|
||||
} else if providerConfig.Bitbucket != nil {
|
||||
appPassword, err := g.getSecretRef(ctx, providerConfig.Bitbucket.AppPasswordRef, applicationSetInfo.Namespace)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error fetching Bitbucket cloud appPassword: %v", err)
|
||||
}
|
||||
provider, err = scm_provider.NewBitBucketCloudProvider(ctx, providerConfig.Bitbucket.Owner, providerConfig.Bitbucket.User, appPassword, providerConfig.Bitbucket.AllBranches)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error initializing Bitbucket cloud service: %v", err)
|
||||
}
|
||||
} else {
|
||||
return nil, fmt.Errorf("no SCM provider implementation configured")
|
||||
}
|
||||
|
||||
@@ -20,10 +20,12 @@ func Client(g github_app_auth.Authentication, url string) (*github.Client, error
|
||||
url = g.EnterpriseBaseURL
|
||||
}
|
||||
var client *github.Client
|
||||
httpClient := http.Client{Transport: rt}
|
||||
if url == "" {
|
||||
httpClient := http.Client{Transport: rt}
|
||||
client = github.NewClient(&httpClient)
|
||||
} else {
|
||||
rt.BaseURL = url
|
||||
httpClient := http.Client{Transport: rt}
|
||||
client, err = github.NewEnterpriseClient(url, url, &httpClient)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create github enterprise client: %w", err)
|
||||
|
||||
@@ -69,6 +69,7 @@ func (b *BitbucketService) List(_ context.Context) ([]*PullRequest, error) {
|
||||
Number: pull.ID,
|
||||
Branch: pull.FromRef.DisplayID, // ID: refs/heads/main DisplayID: main
|
||||
HeadSHA: pull.FromRef.LatestCommit, // This is not defined in the official docs, but works in practice
|
||||
Labels: []string{}, // Not supported by library
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -122,16 +122,19 @@ func TestListPullRequestPagination(t *testing.T) {
|
||||
Number: 101,
|
||||
Branch: "feature-101",
|
||||
HeadSHA: "ab3cf2e4d1517c83e720d2585b9402dbef71f992",
|
||||
Labels: []string{},
|
||||
}, *pullRequests[0])
|
||||
assert.Equal(t, PullRequest{
|
||||
Number: 102,
|
||||
Branch: "feature-102",
|
||||
HeadSHA: "bb3cf2e4d1517c83e720d2585b9402dbef71f992",
|
||||
Labels: []string{},
|
||||
}, *pullRequests[1])
|
||||
assert.Equal(t, PullRequest{
|
||||
Number: 200,
|
||||
Branch: "feature-200",
|
||||
HeadSHA: "cb3cf2e4d1517c83e720d2585b9402dbef71f992",
|
||||
Labels: []string{},
|
||||
}, *pullRequests[2])
|
||||
}
|
||||
|
||||
@@ -284,11 +287,13 @@ func TestListPullRequestBranchMatch(t *testing.T) {
|
||||
Number: 101,
|
||||
Branch: "feature-101",
|
||||
HeadSHA: "ab3cf2e4d1517c83e720d2585b9402dbef71f992",
|
||||
Labels: []string{},
|
||||
}, *pullRequests[0])
|
||||
assert.Equal(t, PullRequest{
|
||||
Number: 102,
|
||||
Branch: "feature-102",
|
||||
HeadSHA: "bb3cf2e4d1517c83e720d2585b9402dbef71f992",
|
||||
Labels: []string{},
|
||||
}, *pullRequests[1])
|
||||
|
||||
regexp = `.*2$`
|
||||
@@ -305,6 +310,7 @@ func TestListPullRequestBranchMatch(t *testing.T) {
|
||||
Number: 102,
|
||||
Branch: "feature-102",
|
||||
HeadSHA: "bb3cf2e4d1517c83e720d2585b9402dbef71f992",
|
||||
Labels: []string{},
|
||||
}, *pullRequests[0])
|
||||
|
||||
regexp = `[\d{2}`
|
||||
|
||||
@@ -57,7 +57,17 @@ func (g *GiteaService) List(ctx context.Context) ([]*PullRequest, error) {
|
||||
Number: int(pr.Index),
|
||||
Branch: pr.Head.Ref,
|
||||
HeadSHA: pr.Head.Sha,
|
||||
Labels: getGiteaPRLabelNames(pr.Labels),
|
||||
})
|
||||
}
|
||||
return list, nil
|
||||
}
|
||||
|
||||
// Get the Gitea pull request label names.
|
||||
func getGiteaPRLabelNames(giteaLabels []*gitea.Label) []string {
|
||||
var labelNames []string
|
||||
for _, giteaLabel := range giteaLabels {
|
||||
labelNames = append(labelNames, giteaLabel.Name)
|
||||
}
|
||||
return labelNames
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"code.gitea.io/sdk/gitea"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
@@ -257,3 +258,32 @@ func TestGiteaList(t *testing.T) {
|
||||
assert.Equal(t, prs[0].Branch, "test")
|
||||
assert.Equal(t, prs[0].HeadSHA, "7bbaf62d92ddfafd9cc8b340c619abaec32bc09f")
|
||||
}
|
||||
|
||||
func TestGetGiteaPRLabelNames(t *testing.T) {
|
||||
Tests := []struct {
|
||||
Name string
|
||||
PullLabels []*gitea.Label
|
||||
ExpectedResult []string
|
||||
}{
|
||||
{
|
||||
Name: "PR has labels",
|
||||
PullLabels: []*gitea.Label{
|
||||
&gitea.Label{Name: "label1"},
|
||||
&gitea.Label{Name: "label2"},
|
||||
&gitea.Label{Name: "label3"},
|
||||
},
|
||||
ExpectedResult: []string{"label1", "label2", "label3"},
|
||||
},
|
||||
{
|
||||
Name: "PR does not have labels",
|
||||
PullLabels: []*gitea.Label{},
|
||||
ExpectedResult: nil,
|
||||
},
|
||||
}
|
||||
for _, test := range Tests {
|
||||
t.Run(test.Name, func(t *testing.T) {
|
||||
labels := getGiteaPRLabelNames(test.PullLabels)
|
||||
assert.Equal(t, test.ExpectedResult, labels)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -68,6 +68,7 @@ func (g *GithubService) List(ctx context.Context) ([]*PullRequest, error) {
|
||||
Number: *pull.Number,
|
||||
Branch: *pull.Head.Ref,
|
||||
HeadSHA: *pull.Head.SHA,
|
||||
Labels: getGithubPRLabelNames(pull.Labels),
|
||||
})
|
||||
}
|
||||
if resp.NextPage == 0 {
|
||||
@@ -97,3 +98,12 @@ func containLabels(expectedLabels []string, gotLabels []*github.Label) bool {
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Get the Github pull request label names.
|
||||
func getGithubPRLabelNames(gitHubLabels []*github.Label) []string {
|
||||
var labelNames []string
|
||||
for _, gitHubLabel := range gitHubLabels {
|
||||
labelNames = append(labelNames, *gitHubLabel.Name)
|
||||
}
|
||||
return labelNames
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-github/v35/github"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func toPtr(s string) *string {
|
||||
@@ -57,3 +58,32 @@ func TestContainLabels(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetGitHubPRLabelNames(t *testing.T) {
|
||||
Tests := []struct {
|
||||
Name string
|
||||
PullLabels []*github.Label
|
||||
ExpectedResult []string
|
||||
}{
|
||||
{
|
||||
Name: "PR has labels",
|
||||
PullLabels: []*github.Label{
|
||||
&github.Label{Name: toPtr("label1")},
|
||||
&github.Label{Name: toPtr("label2")},
|
||||
&github.Label{Name: toPtr("label3")},
|
||||
},
|
||||
ExpectedResult: []string{"label1", "label2", "label3"},
|
||||
},
|
||||
{
|
||||
Name: "PR does not have labels",
|
||||
PullLabels: []*github.Label{},
|
||||
ExpectedResult: nil,
|
||||
},
|
||||
}
|
||||
for _, test := range Tests {
|
||||
t.Run(test.Name, func(t *testing.T) {
|
||||
labels := getGithubPRLabelNames(test.PullLabels)
|
||||
assert.Equal(t, test.ExpectedResult, labels)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -72,6 +72,7 @@ func (g *GitLabService) List(ctx context.Context) ([]*PullRequest, error) {
|
||||
Number: mr.IID,
|
||||
Branch: mr.SourceBranch,
|
||||
HeadSHA: mr.SHA,
|
||||
Labels: mr.Labels,
|
||||
})
|
||||
}
|
||||
if resp.NextPage == 0 {
|
||||
|
||||
@@ -12,6 +12,8 @@ type PullRequest struct {
|
||||
Branch string
|
||||
// HeadSHA is the SHA of the HEAD from which the pull request originated.
|
||||
HeadSHA string
|
||||
// Labels of the pull request.
|
||||
Labels []string
|
||||
}
|
||||
|
||||
type PullRequestService interface {
|
||||
|
||||
@@ -11,6 +11,7 @@ var Policies = map[string]Policy{
|
||||
"sync": &SyncPolicy{},
|
||||
"create-only": &CreateOnlyPolicy{},
|
||||
"create-update": &CreateUpdatePolicy{},
|
||||
"create-delete": &CreateDeletePolicy{},
|
||||
}
|
||||
|
||||
type SyncPolicy struct{}
|
||||
@@ -42,3 +43,13 @@ func (p *CreateOnlyPolicy) Update() bool {
|
||||
func (p *CreateOnlyPolicy) Delete() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
type CreateDeletePolicy struct{}
|
||||
|
||||
func (p *CreateDeletePolicy) Update() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (p *CreateDeletePolicy) Delete() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
"text/template"
|
||||
"unsafe"
|
||||
|
||||
"github.com/Masterminds/sprig"
|
||||
"github.com/Masterminds/sprig/v3"
|
||||
"github.com/valyala/fasttemplate"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
@@ -133,6 +133,16 @@ func (r *Render) deeplyReplace(copy, original reflect.Value, replaceMap map[stri
|
||||
if err := r.deeplyReplace(copyValue, originalValue, replaceMap, useGoTemplate); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Keys can be templated as well as values (e.g. to template something into an annotation).
|
||||
if key.Kind() == reflect.String {
|
||||
templatedKey, err := r.Replace(key.String(), replaceMap, useGoTemplate)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
key = reflect.ValueOf(templatedKey)
|
||||
}
|
||||
|
||||
copy.SetMapIndex(key, copyValue)
|
||||
}
|
||||
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
logtest "github.com/sirupsen/logrus/hooks/test"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
@@ -40,7 +41,7 @@ func TestRenderTemplateParams(t *testing.T) {
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: argoappsv1.ApplicationSpec{
|
||||
Source: argoappsv1.ApplicationSource{
|
||||
Source: &argoappsv1.ApplicationSource{
|
||||
Path: "",
|
||||
RepoURL: "",
|
||||
TargetRevision: "",
|
||||
@@ -219,7 +220,7 @@ func TestRenderTemplateParamsGoTemplate(t *testing.T) {
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: argoappsv1.ApplicationSpec{
|
||||
Source: argoappsv1.ApplicationSource{
|
||||
Source: &argoappsv1.ApplicationSource{
|
||||
Path: "",
|
||||
RepoURL: "",
|
||||
TargetRevision: "",
|
||||
@@ -461,14 +462,56 @@ func TestRenderTemplateParamsGoTemplate(t *testing.T) {
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRenderTemplateKeys(t *testing.T) {
|
||||
t.Run("fasttemplate", func(t *testing.T) {
|
||||
application := &argoappsv1.Application{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: map[string]string{
|
||||
"annotation-{{key}}": "annotation-{{value}}",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
params := map[string]interface{}{
|
||||
"key": "some-key",
|
||||
"value": "some-value",
|
||||
}
|
||||
|
||||
render := Render{}
|
||||
newApplication, err := render.RenderTemplateParams(application, nil, params, false)
|
||||
require.NoError(t, err)
|
||||
require.Contains(t, newApplication.ObjectMeta.Annotations, "annotation-some-key")
|
||||
assert.Equal(t, newApplication.ObjectMeta.Annotations["annotation-some-key"], "annotation-some-value")
|
||||
})
|
||||
t.Run("gotemplate", func(t *testing.T) {
|
||||
application := &argoappsv1.Application{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: map[string]string{
|
||||
"annotation-{{ .key }}": "annotation-{{ .value }}",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
params := map[string]interface{}{
|
||||
"key": "some-key",
|
||||
"value": "some-value",
|
||||
}
|
||||
|
||||
render := Render{}
|
||||
newApplication, err := render.RenderTemplateParams(application, nil, params, true)
|
||||
require.NoError(t, err)
|
||||
require.Contains(t, newApplication.ObjectMeta.Annotations, "annotation-some-key")
|
||||
assert.Equal(t, newApplication.ObjectMeta.Annotations["annotation-some-key"], "annotation-some-value")
|
||||
})
|
||||
}
|
||||
|
||||
func TestRenderTemplateParamsFinalizers(t *testing.T) {
|
||||
|
||||
emptyApplication := &argoappsv1.Application{
|
||||
Spec: argoappsv1.ApplicationSpec{
|
||||
Source: argoappsv1.ApplicationSource{
|
||||
Source: &argoappsv1.ApplicationSource{
|
||||
Path: "",
|
||||
RepoURL: "",
|
||||
TargetRevision: "",
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
-----BEGIN PUBLIC KEY-----
|
||||
MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEesHEB7vX5Y2RxXypjMy1nI1z7iRG
|
||||
JI9/gt/sYqzpsa65aaNP4npM43DDxoIy/MQBo9s/mxGxmA+8UXeDpVC9vw==
|
||||
-----END PUBLIC KEY-----
|
||||
@@ -735,6 +735,42 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/v1/applications/{name}/links": {
|
||||
"get": {
|
||||
"tags": [
|
||||
"ApplicationService"
|
||||
],
|
||||
"summary": "ListLinks returns the list of all application deep links",
|
||||
"operationId": "ApplicationService_ListLinks",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"name": "name",
|
||||
"in": "path",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"name": "namespace",
|
||||
"in": "query"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/applicationLinksResponse"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/runtimeError"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/v1/applications/{name}/logs": {
|
||||
"get": {
|
||||
"tags": [
|
||||
@@ -1385,6 +1421,67 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/v1/applications/{name}/resource/links": {
|
||||
"get": {
|
||||
"tags": [
|
||||
"ApplicationService"
|
||||
],
|
||||
"summary": "ListResourceLinks returns the list of all resource deep links",
|
||||
"operationId": "ApplicationService_ListResourceLinks",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"name": "name",
|
||||
"in": "path",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"name": "namespace",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"name": "resourceName",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"name": "version",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"name": "group",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"name": "kind",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"name": "appNamespace",
|
||||
"in": "query"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/applicationLinksResponse"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/runtimeError"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/v1/applications/{name}/revisions/{revision}/metadata": {
|
||||
"get": {
|
||||
"tags": [
|
||||
@@ -2560,6 +2657,37 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/v1/projects/{name}/links": {
|
||||
"get": {
|
||||
"tags": [
|
||||
"ProjectService"
|
||||
],
|
||||
"summary": "ListLinks returns all deep links for the particular project",
|
||||
"operationId": "ProjectService_ListLinks",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"name": "name",
|
||||
"in": "path",
|
||||
"required": true
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/applicationLinksResponse"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/runtimeError"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/v1/projects/{name}/syncwindows": {
|
||||
"get": {
|
||||
"tags": [
|
||||
@@ -3296,6 +3424,18 @@
|
||||
"description": "Reference between project and repository that allow you automatically to be added as item inside SourceRepos project entity.",
|
||||
"name": "project",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Google Cloud Platform service account key.",
|
||||
"name": "gcpServiceAccountKey",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "boolean",
|
||||
"description": "Whether to force HTTP basic auth.",
|
||||
"name": "forceHttpBasicAuth",
|
||||
"in": "query"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
@@ -3454,6 +3594,29 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/v1/settings/plugins": {
|
||||
"get": {
|
||||
"tags": [
|
||||
"SettingsService"
|
||||
],
|
||||
"summary": "Get returns Argo CD plugins",
|
||||
"operationId": "SettingsService_GetPlugins",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/clusterSettingsPluginsResponse"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/runtimeError"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/v1/stream/applications": {
|
||||
"get": {
|
||||
"tags": [
|
||||
@@ -3900,6 +4063,34 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"applicationLinkInfo": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"description": {
|
||||
"type": "string"
|
||||
},
|
||||
"iconClass": {
|
||||
"type": "string"
|
||||
},
|
||||
"title": {
|
||||
"type": "string"
|
||||
},
|
||||
"url": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"applicationLinksResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"items": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/applicationLinkInfo"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"applicationLogEntry": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -4097,6 +4288,9 @@
|
||||
"appLabelKey": {
|
||||
"type": "string"
|
||||
},
|
||||
"appsInAnyNamespaceEnabled": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"configManagementPlugins": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
@@ -4177,6 +4371,17 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"clusterSettingsPluginsResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"plugins": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/clusterPlugin"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"gpgkeyGnuPGPublicKeyCreateResponse": {
|
||||
"type": "object",
|
||||
"title": "Response to a public key creation request",
|
||||
@@ -4197,6 +4402,24 @@
|
||||
"type": "object",
|
||||
"title": "Generic (empty) response for GPG public key CRUD requests"
|
||||
},
|
||||
"intstrIntOrString": {
|
||||
"description": "+protobuf=true\n+protobuf.options.(gogoproto.goproto_stringer)=false\n+k8s:openapi-gen=true",
|
||||
"type": "object",
|
||||
"title": "IntOrString is a type that can hold an int32 or a string. When used in\nJSON or YAML marshalling and unmarshalling, it produces or consumes the\ninner type. This allows you to have, for example, a JSON field that can\naccept a name or number.\nTODO: Rename to Int32OrString",
|
||||
"properties": {
|
||||
"intVal": {
|
||||
"type": "integer",
|
||||
"format": "int32"
|
||||
},
|
||||
"strVal": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": {
|
||||
"type": "string",
|
||||
"format": "int64"
|
||||
}
|
||||
}
|
||||
},
|
||||
"notificationService": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -4505,6 +4728,65 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"repositoryParameterAnnouncement": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"array": {
|
||||
"description": "array is the default value of the parameter if the parameter is an array.",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"collectionType": {
|
||||
"description": "collectionType is the type of value this parameter holds - either a single value (a string) or a collection\n(array or map). If collectionType is set, only the field with that type will be used. If collectionType is not\nset, `string` is the default. If collectionType is set to an invalid value, a validation error is thrown.",
|
||||
"type": "string"
|
||||
},
|
||||
"itemType": {
|
||||
"description": "itemType determines the primitive data type represented by the parameter. Parameters are always encoded as\nstrings, but this field lets them be interpreted as other primitive types.",
|
||||
"type": "string"
|
||||
},
|
||||
"map": {
|
||||
"description": "map is the default value of the parameter if the parameter is a map.",
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"name": {
|
||||
"description": "name is the name identifying a parameter.",
|
||||
"type": "string"
|
||||
},
|
||||
"required": {
|
||||
"description": "required defines if this given parameter is mandatory.",
|
||||
"type": "boolean"
|
||||
},
|
||||
"string": {
|
||||
"description": "string is the default value of the parameter if the parameter is a string.",
|
||||
"type": "string"
|
||||
},
|
||||
"title": {
|
||||
"description": "title is a human-readable text of the parameter name.",
|
||||
"type": "string"
|
||||
},
|
||||
"tooltip": {
|
||||
"description": "tooltip is a human-readable description of the parameter.",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"repositoryPluginAppSpec": {
|
||||
"type": "object",
|
||||
"title": "PluginAppSpec contains details about a plugin-type Application",
|
||||
"properties": {
|
||||
"parametersAnnouncement": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/repositoryParameterAnnouncement"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"repositoryRefs": {
|
||||
"type": "object",
|
||||
"title": "A subset of the repository's named refs",
|
||||
@@ -4551,6 +4833,9 @@
|
||||
"kustomize": {
|
||||
"$ref": "#/definitions/repositoryKustomizeAppSpec"
|
||||
},
|
||||
"plugin": {
|
||||
"$ref": "#/definitions/repositoryPluginAppSpec"
|
||||
},
|
||||
"type": {
|
||||
"type": "string"
|
||||
}
|
||||
@@ -5373,6 +5658,23 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1ApplicationMatchExpression": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"key": {
|
||||
"type": "string"
|
||||
},
|
||||
"operator": {
|
||||
"type": "string"
|
||||
},
|
||||
"values": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1ApplicationSet": {
|
||||
"type": "object",
|
||||
"title": "ApplicationSet is a set of Application resources\n+genclient\n+genclient:noStatus\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object\n+kubebuilder:resource:path=applicationsets,shortName=appset;appsets\n+kubebuilder:subresource:status",
|
||||
@@ -5388,6 +5690,31 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1ApplicationSetApplicationStatus": {
|
||||
"type": "object",
|
||||
"title": "ApplicationSetApplicationStatus contains details about each Application managed by the ApplicationSet",
|
||||
"properties": {
|
||||
"application": {
|
||||
"type": "string",
|
||||
"title": "Application contains the name of the Application resource"
|
||||
},
|
||||
"lastTransitionTime": {
|
||||
"$ref": "#/definitions/v1Time"
|
||||
},
|
||||
"message": {
|
||||
"type": "string",
|
||||
"title": "Message contains human-readable message indicating details about the status"
|
||||
},
|
||||
"status": {
|
||||
"type": "string",
|
||||
"title": "Status contains the AppSet's perceived status of the managed Application resource: (Waiting, Pending, Progressing, Healthy)"
|
||||
},
|
||||
"step": {
|
||||
"type": "string",
|
||||
"title": "Step tracks which step this Application should be updated in"
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1ApplicationSetCondition": {
|
||||
"type": "object",
|
||||
"title": "ApplicationSetCondition contains details about an applicationset condition, which is usally an error or warning",
|
||||
@@ -5494,6 +5821,31 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1ApplicationSetRolloutStep": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"matchExpressions": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/v1alpha1ApplicationMatchExpression"
|
||||
}
|
||||
},
|
||||
"maxUpdate": {
|
||||
"$ref": "#/definitions/intstrIntOrString"
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1ApplicationSetRolloutStrategy": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"steps": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/v1alpha1ApplicationSetRolloutStep"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1ApplicationSetSpec": {
|
||||
"description": "ApplicationSetSpec represents a class of application set state.",
|
||||
"type": "object",
|
||||
@@ -5507,6 +5859,9 @@
|
||||
"goTemplate": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"strategy": {
|
||||
"$ref": "#/definitions/v1alpha1ApplicationSetStrategy"
|
||||
},
|
||||
"syncPolicy": {
|
||||
"$ref": "#/definitions/v1alpha1ApplicationSetSyncPolicy"
|
||||
},
|
||||
@@ -5519,6 +5874,12 @@
|
||||
"type": "object",
|
||||
"title": "ApplicationSetStatus defines the observed state of ApplicationSet",
|
||||
"properties": {
|
||||
"applicationStatus": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/v1alpha1ApplicationSetApplicationStatus"
|
||||
}
|
||||
},
|
||||
"conditions": {
|
||||
"type": "array",
|
||||
"title": "INSERT ADDITIONAL STATUS FIELD - define observed state of cluster\nImportant: Run \"make\" to regenerate code after modifying this file",
|
||||
@@ -5528,6 +5889,18 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1ApplicationSetStrategy": {
|
||||
"description": "ApplicationSetStrategy configures how generated Applications are updated in sequence.",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"rollingSync": {
|
||||
"$ref": "#/definitions/v1alpha1ApplicationSetRolloutStrategy"
|
||||
},
|
||||
"type": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1ApplicationSetSyncPolicy": {
|
||||
"description": "ApplicationSetSyncPolicy configures how generated Applications will relate to their\nApplicationSet.",
|
||||
"type": "object",
|
||||
@@ -5604,6 +5977,10 @@
|
||||
"plugin": {
|
||||
"$ref": "#/definitions/v1alpha1ApplicationSourcePlugin"
|
||||
},
|
||||
"ref": {
|
||||
"description": "Ref is reference to another source within sources field. This field will not be used if used with a `source` tag.",
|
||||
"type": "string"
|
||||
},
|
||||
"repoURL": {
|
||||
"type": "string",
|
||||
"title": "RepoURL is the URL to the repository (Git or Helm) that contains the application manifests"
|
||||
@@ -5772,6 +6149,39 @@
|
||||
},
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"parameters": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/v1alpha1ApplicationSourcePluginParameter"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1ApplicationSourcePluginParameter": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"array": {
|
||||
"description": "Array is the value of an array type parameter.",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"map": {
|
||||
"description": "Map is the value of a map type parameter.",
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"name": {
|
||||
"description": "Name is the name identifying a parameter.",
|
||||
"type": "string"
|
||||
},
|
||||
"string": {
|
||||
"description": "String_ is the value of a string type parameter.",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -5808,6 +6218,13 @@
|
||||
"source": {
|
||||
"$ref": "#/definitions/v1alpha1ApplicationSource"
|
||||
},
|
||||
"sources": {
|
||||
"type": "array",
|
||||
"title": "Sources is a reference to the location of the application's manifests or chart",
|
||||
"items": {
|
||||
"$ref": "#/definitions/v1alpha1ApplicationSource"
|
||||
}
|
||||
},
|
||||
"syncPolicy": {
|
||||
"$ref": "#/definitions/v1alpha1SyncPolicy"
|
||||
}
|
||||
@@ -5858,6 +6275,13 @@
|
||||
"type": "string",
|
||||
"title": "SourceType specifies the type of this application"
|
||||
},
|
||||
"sourceTypes": {
|
||||
"type": "array",
|
||||
"title": "SourceTypes specifies the type of the sources included in the application",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"summary": {
|
||||
"$ref": "#/definitions/v1alpha1ApplicationSummary"
|
||||
},
|
||||
@@ -6155,6 +6579,13 @@
|
||||
},
|
||||
"source": {
|
||||
"$ref": "#/definitions/v1alpha1ApplicationSource"
|
||||
},
|
||||
"sources": {
|
||||
"type": "array",
|
||||
"title": "Sources is a reference to the application's multiple sources used for comparison",
|
||||
"items": {
|
||||
"$ref": "#/definitions/v1alpha1ApplicationSource"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -6289,6 +6720,9 @@
|
||||
"$ref": "#/definitions/v1alpha1GitFileGeneratorItem"
|
||||
}
|
||||
},
|
||||
"pathParamPrefix": {
|
||||
"type": "string"
|
||||
},
|
||||
"repoURL": {
|
||||
"type": "string"
|
||||
},
|
||||
@@ -6544,6 +6978,23 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1ManagedNamespaceMetadata": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"annotations": {
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"labels": {
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1alpha1MatrixGenerator": {
|
||||
"description": "MatrixGenerator generates the cartesian product of two sets of parameters. The parameters are defined by two nested\ngenerators.",
|
||||
"type": "object",
|
||||
@@ -6897,6 +7348,14 @@
|
||||
"type": "boolean",
|
||||
"title": "EnableOCI specifies whether helm-oci support should be enabled for this repo"
|
||||
},
|
||||
"forceHttpBasicAuth": {
|
||||
"type": "boolean",
|
||||
"title": "ForceHttpBasicAuth specifies whether Argo CD should attempt to force basic auth for HTTP connections"
|
||||
},
|
||||
"gcpServiceAccountKey": {
|
||||
"type": "string",
|
||||
"title": "GCPServiceAccountKey specifies the service account key in JSON format to be used for getting credentials to Google Cloud Source repos"
|
||||
},
|
||||
"githubAppEnterpriseBaseUrl": {
|
||||
"type": "string",
|
||||
"title": "GithubAppEnterpriseBaseURL specifies the GitHub API URL for GitHub app authentication. If empty will default to https://api.github.com"
|
||||
@@ -6919,6 +7378,10 @@
|
||||
"type": "string",
|
||||
"title": "Password for authenticating at the repo server"
|
||||
},
|
||||
"proxy": {
|
||||
"type": "string",
|
||||
"title": "Proxy specifies the HTTP/HTTPS proxy used to access repos at the repo server"
|
||||
},
|
||||
"sshPrivateKey": {
|
||||
"type": "string",
|
||||
"title": "SSHPrivateKey contains the private key data for authenticating at the repo server using SSH (only Git repos)"
|
||||
@@ -6975,6 +7438,14 @@
|
||||
"type": "boolean",
|
||||
"title": "EnableOCI specifies whether helm-oci support should be enabled for this repo"
|
||||
},
|
||||
"forceHttpBasicAuth": {
|
||||
"type": "boolean",
|
||||
"title": "ForceHttpBasicAuth specifies whether Argo CD should attempt to force basic auth for HTTP connections"
|
||||
},
|
||||
"gcpServiceAccountKey": {
|
||||
"type": "string",
|
||||
"title": "GCPServiceAccountKey specifies the service account key in JSON format to be used for getting credentials to Google Cloud Source repos"
|
||||
},
|
||||
"githubAppEnterpriseBaseUrl": {
|
||||
"type": "string",
|
||||
"title": "GithubAppEnterpriseBaseURL specifies the base URL of GitHub Enterprise installation. If empty will default to https://api.github.com"
|
||||
@@ -7465,8 +7936,22 @@
|
||||
"type": "string",
|
||||
"title": "Revision holds the revision the sync was performed against"
|
||||
},
|
||||
"revisions": {
|
||||
"type": "array",
|
||||
"title": "Revisions holds the revision of each source in sources field the sync was performed against",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"source": {
|
||||
"$ref": "#/definitions/v1alpha1ApplicationSource"
|
||||
},
|
||||
"sources": {
|
||||
"type": "array",
|
||||
"title": "Sources is a reference to the application sources used for the sync operation",
|
||||
"items": {
|
||||
"$ref": "#/definitions/v1alpha1ApplicationSource"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -7767,9 +8252,23 @@
|
||||
"description": "Revision is the revision (Git) or chart version (Helm) which to sync the application to\nIf omitted, will use the revision specified in app spec.",
|
||||
"type": "string"
|
||||
},
|
||||
"revisions": {
|
||||
"description": "Revisions is the list of revision (Git) or chart version (Helm) which to sync each source in sources field for the application to\nIf omitted, will use the revision specified in app spec.",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"source": {
|
||||
"$ref": "#/definitions/v1alpha1ApplicationSource"
|
||||
},
|
||||
"sources": {
|
||||
"type": "array",
|
||||
"title": "Sources overrides the source definition set in the application.\nThis is typically set in a Rollback operation and is nil during a Sync operation",
|
||||
"items": {
|
||||
"$ref": "#/definitions/v1alpha1ApplicationSource"
|
||||
}
|
||||
},
|
||||
"syncOptions": {
|
||||
"type": "array",
|
||||
"title": "SyncOptions provide per-sync sync-options, e.g. Validate=false",
|
||||
@@ -7815,8 +8314,22 @@
|
||||
"type": "string",
|
||||
"title": "Revision holds the revision this sync operation was performed to"
|
||||
},
|
||||
"revisions": {
|
||||
"type": "array",
|
||||
"title": "Revisions holds the revision this sync operation was performed for respective indexed source in sources field",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"source": {
|
||||
"$ref": "#/definitions/v1alpha1ApplicationSource"
|
||||
},
|
||||
"sources": {
|
||||
"type": "array",
|
||||
"title": "Source records the application source information of the sync, used for comparing auto-sync",
|
||||
"items": {
|
||||
"$ref": "#/definitions/v1alpha1ApplicationSource"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -7827,6 +8340,9 @@
|
||||
"automated": {
|
||||
"$ref": "#/definitions/v1alpha1SyncPolicyAutomated"
|
||||
},
|
||||
"managedNamespaceMetadata": {
|
||||
"$ref": "#/definitions/v1alpha1ManagedNamespaceMetadata"
|
||||
},
|
||||
"retry": {
|
||||
"$ref": "#/definitions/v1alpha1RetryStrategy"
|
||||
},
|
||||
@@ -7868,6 +8384,13 @@
|
||||
"type": "string",
|
||||
"title": "Revision contains information about the revision the comparison has been performed to"
|
||||
},
|
||||
"revisions": {
|
||||
"type": "array",
|
||||
"title": "Revisions contains information about the revisions of multiple sources the comparison has been performed to",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"status": {
|
||||
"type": "string",
|
||||
"title": "Status is the sync state of the comparison"
|
||||
|
||||
@@ -46,16 +46,17 @@ func getSubmoduleEnabled() bool {
|
||||
|
||||
func NewCommand() *cobra.Command {
|
||||
var (
|
||||
clientConfig clientcmd.ClientConfig
|
||||
metricsAddr string
|
||||
probeBindAddr string
|
||||
webhookAddr string
|
||||
enableLeaderElection bool
|
||||
namespace string
|
||||
argocdRepoServer string
|
||||
policy string
|
||||
debugLog bool
|
||||
dryRun bool
|
||||
clientConfig clientcmd.ClientConfig
|
||||
metricsAddr string
|
||||
probeBindAddr string
|
||||
webhookAddr string
|
||||
enableLeaderElection bool
|
||||
namespace string
|
||||
argocdRepoServer string
|
||||
policy string
|
||||
debugLog bool
|
||||
dryRun bool
|
||||
enableProgressiveSyncs bool
|
||||
)
|
||||
scheme := runtime.NewScheme()
|
||||
_ = clientgoscheme.AddToScheme(scheme)
|
||||
@@ -89,7 +90,7 @@ func NewCommand() *cobra.Command {
|
||||
|
||||
policyObj, exists := utils.Policies[policy]
|
||||
if !exists {
|
||||
log.Info("Policy value can be: sync, create-only, create-update")
|
||||
log.Info("Policy value can be: sync, create-only, create-update, create-delete")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
@@ -168,15 +169,16 @@ func NewCommand() *cobra.Command {
|
||||
|
||||
go func() { errors.CheckError(askPassServer.Run(askpass.SocketPath)) }()
|
||||
if err = (&controllers.ApplicationSetReconciler{
|
||||
Generators: topLevelGenerators,
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Recorder: mgr.GetEventRecorderFor("applicationset-controller"),
|
||||
Renderer: &utils.Render{},
|
||||
Policy: policyObj,
|
||||
ArgoAppClientset: appSetConfig,
|
||||
KubeClientset: k8sClient,
|
||||
ArgoDB: argoCDDB,
|
||||
Generators: topLevelGenerators,
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Recorder: mgr.GetEventRecorderFor("applicationset-controller"),
|
||||
Renderer: &utils.Render{},
|
||||
Policy: policyObj,
|
||||
ArgoAppClientset: appSetConfig,
|
||||
KubeClientset: k8sClient,
|
||||
ArgoDB: argoCDDB,
|
||||
EnableProgressiveSyncs: enableProgressiveSyncs,
|
||||
}).SetupWithManager(mgr); err != nil {
|
||||
log.Error(err, "unable to create controller", "controller", "ApplicationSet")
|
||||
os.Exit(1)
|
||||
@@ -200,11 +202,12 @@ func NewCommand() *cobra.Command {
|
||||
"Enabling this will ensure there is only one active controller manager.")
|
||||
command.Flags().StringVar(&namespace, "namespace", env.StringFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_NAMESPACE", ""), "Argo CD repo namespace (default: argocd)")
|
||||
command.Flags().StringVar(&argocdRepoServer, "argocd-repo-server", env.StringFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_REPO_SERVER", common.DefaultRepoServerAddr), "Argo CD repo server address")
|
||||
command.Flags().StringVar(&policy, "policy", env.StringFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_POLICY", "sync"), "Modify how application is synced between the generator and the cluster. Default is 'sync' (create & update & delete), options: 'create-only', 'create-update' (no deletion)")
|
||||
command.Flags().StringVar(&policy, "policy", env.StringFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_POLICY", "sync"), "Modify how application is synced between the generator and the cluster. Default is 'sync' (create & update & delete), options: 'create-only', 'create-update' (no deletion), 'create-delete' (no update)")
|
||||
command.Flags().BoolVar(&debugLog, "debug", env.ParseBoolFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_DEBUG", false), "Print debug logs. Takes precedence over loglevel")
|
||||
command.Flags().StringVar(&cmdutil.LogFormat, "logformat", env.StringFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_LOGFORMAT", "text"), "Set the logging format. One of: text|json")
|
||||
command.Flags().StringVar(&cmdutil.LogLevel, "loglevel", env.StringFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_LOGLEVEL", "info"), "Set the logging level. One of: debug|info|warn|error")
|
||||
command.Flags().BoolVar(&dryRun, "dry-run", env.ParseBoolFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_DRY_RUN", false), "Enable dry run mode")
|
||||
command.Flags().BoolVar(&enableProgressiveSyncs, "enable-progressive-syncs", env.ParseBoolFromEnv("ARGOCD_APPLICATIONSET_CONTROLLER_ENABLE_PROGRESSIVE_SYNCS", false), "Enable use of the experimental progressive syncs feature.")
|
||||
return &command
|
||||
}
|
||||
|
||||
|
||||
@@ -156,7 +156,7 @@ func NewCommand() *cobra.Command {
|
||||
command.Flags().StringVar(&logLevel, "loglevel", "info", "Set the logging level. One of: debug|info|warn|error")
|
||||
command.Flags().StringVar(&logFormat, "logformat", "text", "Set the logging format. One of: text|json")
|
||||
command.Flags().IntVar(&metricsPort, "metrics-port", defaultMetricsPort, "Metrics port")
|
||||
command.Flags().StringVar(&argocdRepoServer, "argocd-repo-server", "argocd-repo-server:8081", "Argo CD repo server address")
|
||||
command.Flags().StringVar(&argocdRepoServer, "argocd-repo-server", common.DefaultRepoServerAddr, "Argo CD repo server address")
|
||||
command.Flags().BoolVar(&argocdRepoServerPlaintext, "argocd-repo-server-plaintext", false, "Use a plaintext client (non-TLS) to connect to repository server")
|
||||
command.Flags().BoolVar(&argocdRepoServerStrictTLS, "argocd-repo-server-strict-tls", false, "Perform strict validation of TLS certificates when connecting to repo server")
|
||||
command.Flags().StringVar(&configMapName, "config-map-name", "argocd-notifications-cm", "Set notifications ConfigMap name")
|
||||
|
||||
@@ -71,6 +71,7 @@ func NewCommand() *cobra.Command {
|
||||
dexServerStrictTLS bool
|
||||
staticAssetsDir string
|
||||
applicationNamespaces []string
|
||||
enableProxyExtension bool
|
||||
)
|
||||
var command = &cobra.Command{
|
||||
Use: cliName,
|
||||
@@ -184,6 +185,7 @@ func NewCommand() *cobra.Command {
|
||||
RedisClient: redisClient,
|
||||
StaticAssetsDir: staticAssetsDir,
|
||||
ApplicationNamespaces: applicationNamespaces,
|
||||
EnableProxyExtension: enableProxyExtension,
|
||||
}
|
||||
|
||||
stats.RegisterStackDumper()
|
||||
@@ -235,6 +237,7 @@ func NewCommand() *cobra.Command {
|
||||
command.Flags().BoolVar(&dexServerPlaintext, "dex-server-plaintext", env.ParseBoolFromEnv("ARGOCD_SERVER_DEX_SERVER_PLAINTEXT", false), "Use a plaintext client (non-TLS) to connect to dex server")
|
||||
command.Flags().BoolVar(&dexServerStrictTLS, "dex-server-strict-tls", env.ParseBoolFromEnv("ARGOCD_SERVER_DEX_SERVER_STRICT_TLS", false), "Perform strict validation of TLS certificates when connecting to dex server")
|
||||
command.Flags().StringSliceVar(&applicationNamespaces, "application-namespaces", env.StringsFromEnv("ARGOCD_APPLICATION_NAMESPACES", []string{}, ","), "List of additional namespaces where application resources can be managed in")
|
||||
command.Flags().BoolVar(&enableProxyExtension, "enable-proxy-extension", env.ParseBoolFromEnv("ARGOCD_SERVER_ENABLE_PROXY_EXTENSION", false), "Enable Proxy Extension feature")
|
||||
tlsConfigCustomizerSrc = tls.AddTLSFlagsToCmd(command)
|
||||
cacheSrc = servercache.AddCacheFlagsToCmd(command, func(client *redis.Client) {
|
||||
redisClient = client
|
||||
|
||||
@@ -56,6 +56,7 @@ func NewAdminCommand() *cobra.Command {
|
||||
command.AddCommand(NewExportCommand())
|
||||
command.AddCommand(NewDashboardCommand())
|
||||
command.AddCommand(NewNotificationsCommand())
|
||||
command.AddCommand(NewInitialPasswordCommand())
|
||||
|
||||
command.Flags().StringVar(&cmdutil.LogFormat, "logformat", "text", "Set the logging format. One of: text|json")
|
||||
command.Flags().StringVar(&cmdutil.LogLevel, "loglevel", "info", "Set the logging level. One of: debug|info|warn|error")
|
||||
|
||||
@@ -401,7 +401,12 @@ func reconcileApplications(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res := appStateManager.CompareAppState(&app, proj, app.Spec.Source.TargetRevision, app.Spec.Source, false, false, nil)
|
||||
sources := make([]v1alpha1.ApplicationSource, 0)
|
||||
revisions := make([]string, 0)
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions = append(revisions, app.Spec.GetSource().TargetRevision)
|
||||
|
||||
res := appStateManager.CompareAppState(&app, proj, revisions, sources, false, false, nil, false)
|
||||
items = append(items, appReconcileResult{
|
||||
Name: app.Name,
|
||||
Conditions: app.Status.Conditions,
|
||||
|
||||
@@ -80,6 +80,7 @@ func TestGetReconcileResults_Refresh(t *testing.T) {
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Source: &v1alpha1.ApplicationSource{},
|
||||
Project: "default",
|
||||
Destination: v1alpha1.ApplicationDestination{
|
||||
Server: v1alpha1.KubernetesInternalAPIServerAddr,
|
||||
|
||||
@@ -265,9 +265,7 @@ func runClusterNamespacesCommand(ctx context.Context, clientConfig clientcmd.Cli
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if app.Spec.Destination.Server == cluster.Server {
|
||||
nsSet[app.Spec.Destination.Namespace] = true
|
||||
}
|
||||
nsSet[app.Spec.Destination.Namespace] = true
|
||||
}
|
||||
}
|
||||
var namespaces []string
|
||||
|
||||
46
cmd/argocd/commands/admin/initial_password.go
Normal file
46
cmd/argocd/commands/admin/initial_password.go
Normal file
@@ -0,0 +1,46 @@
|
||||
package admin
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/util/cli"
|
||||
"github.com/argoproj/argo-cd/v2/util/errors"
|
||||
)
|
||||
|
||||
const initialPasswordSecretName = "argocd-initial-admin-secret"
|
||||
|
||||
// NewInitialPasswordCommand defines a new command to retrieve Argo CD initial password.
|
||||
func NewInitialPasswordCommand() *cobra.Command {
|
||||
var (
|
||||
clientConfig clientcmd.ClientConfig
|
||||
)
|
||||
var command = cobra.Command{
|
||||
Use: "initial-password",
|
||||
Short: "Prints initial password to log in to Argo CD for the first time",
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
|
||||
config, err := clientConfig.ClientConfig()
|
||||
errors.CheckError(err)
|
||||
namespace, _, err := clientConfig.Namespace()
|
||||
errors.CheckError(err)
|
||||
|
||||
kubeClientset := kubernetes.NewForConfigOrDie(config)
|
||||
secret, err := kubeClientset.CoreV1().Secrets(namespace).Get(context.Background(), initialPasswordSecretName, v1.GetOptions{})
|
||||
errors.CheckError(err)
|
||||
|
||||
if initialPass, ok := secret.Data["password"]; ok {
|
||||
fmt.Println(string(initialPass))
|
||||
fmt.Println("\n This password must be only used for first time login. We strongly recommend you update the password using `argocd account update-password`.")
|
||||
}
|
||||
},
|
||||
}
|
||||
clientConfig = cli.AddKubectlFlagsToCmd(&command)
|
||||
|
||||
return &command
|
||||
}
|
||||
@@ -33,7 +33,7 @@ func NewNotificationsCommand() *cobra.Command {
|
||||
var argocdService service.Service
|
||||
toolsCommand := cmd.NewToolsCommand(
|
||||
"notifications",
|
||||
"notifications",
|
||||
"argocd admin notifications",
|
||||
applications,
|
||||
settings.GetFactorySettings(argocdService, "argocd-notifications-secret", "argocd-notifications-cm"), func(clientConfig clientcmd.ClientConfig) {
|
||||
k8sCfg, err := clientConfig.ClientConfig()
|
||||
@@ -64,7 +64,7 @@ func NewNotificationsCommand() *cobra.Command {
|
||||
log.Fatalf("Failed to initialize Argo CD service: %v", err)
|
||||
}
|
||||
})
|
||||
toolsCommand.PersistentFlags().StringVar(&argocdRepoServer, "argocd-repo-server", "argocd-repo-server:8081", "Argo CD repo server address")
|
||||
toolsCommand.PersistentFlags().StringVar(&argocdRepoServer, "argocd-repo-server", common.DefaultRepoServerAddr, "Argo CD repo server address")
|
||||
toolsCommand.PersistentFlags().BoolVar(&argocdRepoServerPlaintext, "argocd-repo-server-plaintext", false, "Use a plaintext client (non-TLS) to connect to repository server")
|
||||
toolsCommand.PersistentFlags().BoolVar(&argocdRepoServerStrictTLS, "argocd-repo-server-strict-tls", false, "Perform strict validation of TLS certificates when connecting to repo server")
|
||||
return toolsCommand
|
||||
|
||||
@@ -206,7 +206,7 @@ var validatorsByGroup = map[string]settingValidator{
|
||||
}
|
||||
ssoProvider = "Dex"
|
||||
} else if general.OIDCConfigRAW != "" {
|
||||
if _, err := settings.UnmarshalOIDCConfig(general.OIDCConfigRAW); err != nil {
|
||||
if err := settings.ValidateOIDCConfig(general.OIDCConfigRAW); err != nil {
|
||||
return "", fmt.Errorf("invalid oidc.config: %v", err)
|
||||
}
|
||||
ssoProvider = "OIDC"
|
||||
|
||||
@@ -150,7 +150,7 @@ func NewApplicationCreateCommand(clientOpts *argocdclient.ClientOptions) *cobra.
|
||||
c.HelpFunc()(c, args)
|
||||
os.Exit(1)
|
||||
}
|
||||
if app.Spec.Source.Plugin != nil && app.Spec.Source.Plugin.Name != "" {
|
||||
if app.Spec.GetSource().Plugin != nil && app.Spec.GetSource().Plugin.Name != "" {
|
||||
log.Warnf(argocommon.ConfigMapPluginCLIDeprecationWarning)
|
||||
}
|
||||
if appNamespace != "" {
|
||||
@@ -294,7 +294,7 @@ func NewApplicationGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Com
|
||||
})
|
||||
errors.CheckError(err)
|
||||
|
||||
if app.Spec.Source.Plugin != nil && app.Spec.Source.Plugin.Name != "" {
|
||||
if app.Spec.GetSource().Plugin != nil && app.Spec.GetSource().Plugin.Name != "" {
|
||||
log.Warnf(argocommon.ConfigMapPluginCLIDeprecationWarning)
|
||||
}
|
||||
|
||||
@@ -440,15 +440,16 @@ func NewApplicationLogsCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
}
|
||||
|
||||
func printAppSummaryTable(app *argoappv1.Application, appURL string, windows *argoappv1.SyncWindows) {
|
||||
source := app.Spec.GetSource()
|
||||
fmt.Printf(printOpFmtStr, "Name:", app.QualifiedName())
|
||||
fmt.Printf(printOpFmtStr, "Project:", app.Spec.GetProject())
|
||||
fmt.Printf(printOpFmtStr, "Server:", getServer(app))
|
||||
fmt.Printf(printOpFmtStr, "Namespace:", app.Spec.Destination.Namespace)
|
||||
fmt.Printf(printOpFmtStr, "URL:", appURL)
|
||||
fmt.Printf(printOpFmtStr, "Repo:", app.Spec.Source.RepoURL)
|
||||
fmt.Printf(printOpFmtStr, "Target:", app.Spec.Source.TargetRevision)
|
||||
fmt.Printf(printOpFmtStr, "Path:", app.Spec.Source.Path)
|
||||
printAppSourceDetails(&app.Spec.Source)
|
||||
fmt.Printf(printOpFmtStr, "Repo:", source.RepoURL)
|
||||
fmt.Printf(printOpFmtStr, "Target:", source.TargetRevision)
|
||||
fmt.Printf(printOpFmtStr, "Path:", source.Path)
|
||||
printAppSourceDetails(&source)
|
||||
var wds []string
|
||||
var status string
|
||||
var allow, deny, inactiveAllows bool
|
||||
@@ -502,11 +503,11 @@ func printAppSummaryTable(app *argoappv1.Application, appURL string, windows *ar
|
||||
syncStatusStr := string(app.Status.Sync.Status)
|
||||
switch app.Status.Sync.Status {
|
||||
case argoappv1.SyncStatusCodeSynced:
|
||||
syncStatusStr += fmt.Sprintf(" to %s", app.Spec.Source.TargetRevision)
|
||||
syncStatusStr += fmt.Sprintf(" to %s", app.Spec.GetSource().TargetRevision)
|
||||
case argoappv1.SyncStatusCodeOutOfSync:
|
||||
syncStatusStr += fmt.Sprintf(" from %s", app.Spec.Source.TargetRevision)
|
||||
syncStatusStr += fmt.Sprintf(" from %s", app.Spec.GetSource().TargetRevision)
|
||||
}
|
||||
if !git.IsCommitSHA(app.Spec.Source.TargetRevision) && !git.IsTruncatedCommitSHA(app.Spec.Source.TargetRevision) && len(app.Status.Sync.Revision) > 7 {
|
||||
if !git.IsCommitSHA(app.Spec.GetSource().TargetRevision) && !git.IsTruncatedCommitSHA(app.Spec.GetSource().TargetRevision) && len(app.Status.Sync.Revision) > 7 {
|
||||
syncStatusStr += fmt.Sprintf(" (%s)", app.Status.Sync.Revision[0:7])
|
||||
}
|
||||
fmt.Printf(printOpFmtStr, "Sync Status:", syncStatusStr)
|
||||
@@ -575,8 +576,8 @@ func truncateString(str string, num int) string {
|
||||
|
||||
// printParams prints parameters and overrides
|
||||
func printParams(app *argoappv1.Application) {
|
||||
if app.Spec.Source.Helm != nil {
|
||||
printHelmParams(app.Spec.Source.Helm)
|
||||
if app.Spec.GetSource().Helm != nil {
|
||||
printHelmParams(app.Spec.GetSource().Helm)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -624,7 +625,7 @@ func NewApplicationSetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Com
|
||||
app, err := appIf.Get(ctx, &applicationpkg.ApplicationQuery{Name: &appName, AppNamespace: &appNs})
|
||||
errors.CheckError(err)
|
||||
|
||||
if app.Spec.Source.Plugin != nil && app.Spec.Source.Plugin.Name != "" {
|
||||
if app.Spec.GetSource().Plugin != nil && app.Spec.GetSource().Plugin.Name != "" {
|
||||
log.Warnf(argocommon.ConfigMapPluginCLIDeprecationWarning)
|
||||
}
|
||||
|
||||
@@ -692,11 +693,12 @@ func NewApplicationUnsetCommand(clientOpts *argocdclient.ClientOptions) *cobra.C
|
||||
app, err := appIf.Get(ctx, &applicationpkg.ApplicationQuery{Name: &appName, AppNamespace: &appNs})
|
||||
errors.CheckError(err)
|
||||
|
||||
if app.Spec.Source.Plugin != nil && app.Spec.Source.Plugin.Name != "" {
|
||||
if app.Spec.GetSource().Plugin != nil && app.Spec.GetSource().Plugin.Name != "" {
|
||||
log.Warnf(argocommon.ConfigMapPluginCLIDeprecationWarning)
|
||||
}
|
||||
|
||||
updated, nothingToUnset := unset(&app.Spec.Source, opts)
|
||||
source := app.Spec.GetSource()
|
||||
updated, nothingToUnset := unset(&source, opts)
|
||||
if nothingToUnset {
|
||||
c.HelpFunc()(c, args)
|
||||
os.Exit(1)
|
||||
@@ -842,18 +844,19 @@ func getLocalObjects(ctx context.Context, app *argoappv1.Application, local, loc
|
||||
|
||||
func getLocalObjectsString(ctx context.Context, app *argoappv1.Application, local, localRepoRoot, appLabelKey, kubeVersion string, apiVersions []string, kustomizeOptions *argoappv1.KustomizeOptions,
|
||||
configManagementPlugins []*argoappv1.ConfigManagementPlugin, trackingMethod string) []string {
|
||||
res, err := repository.GenerateManifests(ctx, local, localRepoRoot, app.Spec.Source.TargetRevision, &repoapiclient.ManifestRequest{
|
||||
Repo: &argoappv1.Repository{Repo: app.Spec.Source.RepoURL},
|
||||
source := app.Spec.GetSource()
|
||||
res, err := repository.GenerateManifests(ctx, local, localRepoRoot, source.TargetRevision, &repoapiclient.ManifestRequest{
|
||||
Repo: &argoappv1.Repository{Repo: source.RepoURL},
|
||||
AppLabelKey: appLabelKey,
|
||||
AppName: app.Name,
|
||||
Namespace: app.Spec.Destination.Namespace,
|
||||
ApplicationSource: &app.Spec.Source,
|
||||
ApplicationSource: &source,
|
||||
KustomizeOptions: kustomizeOptions,
|
||||
KubeVersion: kubeVersion,
|
||||
ApiVersions: apiVersions,
|
||||
Plugins: configManagementPlugins,
|
||||
TrackingMethod: trackingMethod,
|
||||
}, true, &git.NoopCredsStore{}, resource.MustParse("0"))
|
||||
}, true, &git.NoopCredsStore{}, resource.MustParse("0"), nil)
|
||||
errors.CheckError(err)
|
||||
|
||||
return res.Manifests
|
||||
@@ -930,7 +933,7 @@ func NewApplicationDiffCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
})
|
||||
errors.CheckError(err)
|
||||
|
||||
if app.Spec.Source.Plugin != nil && app.Spec.Source.Plugin.Name != "" {
|
||||
if app.Spec.GetSource().Plugin != nil && app.Spec.GetSource().Plugin.Name != "" {
|
||||
log.Warnf(argocommon.ConfigMapPluginCLIDeprecationWarning)
|
||||
}
|
||||
|
||||
@@ -964,7 +967,7 @@ func NewApplicationDiffCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
|
||||
diffOption.serversideRes = res
|
||||
} else {
|
||||
fmt.Fprintf(os.Stderr, "Warning: local diff without --server-side-generate is deprecated and does not work with plugins. Server-side generation will be the default in v2.6.")
|
||||
fmt.Fprintf(os.Stderr, "Warning: local diff without --server-side-generate is deprecated and does not work with plugins. Server-side generation will be the default in v2.7.")
|
||||
conn, clusterIf := clientset.NewClusterClientOrDie()
|
||||
defer argoio.Close(conn)
|
||||
cluster, err := clusterIf.Get(ctx, &clusterpkg.ClusterQuery{Name: app.Spec.Destination.Name, Server: app.Spec.Destination.Server})
|
||||
@@ -1247,7 +1250,7 @@ func printApplicationTable(apps []argoappv1.Application, output *string) {
|
||||
formatConditionsSummary(app),
|
||||
}
|
||||
if *output == "wide" {
|
||||
vals = append(vals, app.Spec.Source.RepoURL, app.Spec.Source.Path, app.Spec.Source.TargetRevision)
|
||||
vals = append(vals, app.Spec.GetSource().RepoURL, app.Spec.GetSource().Path, app.Spec.GetSource().TargetRevision)
|
||||
}
|
||||
_, _ = fmt.Fprintf(w, fmtStr, vals...)
|
||||
}
|
||||
@@ -1300,7 +1303,7 @@ func NewApplicationListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
}
|
||||
var appsWithDeprecatedPlugins []string
|
||||
for _, app := range appList {
|
||||
if app.Spec.Source.Plugin != nil && app.Spec.Source.Plugin.Name != "" {
|
||||
if app.Spec.GetSource().Plugin != nil && app.Spec.GetSource().Plugin.Name != "" {
|
||||
appsWithDeprecatedPlugins = append(appsWithDeprecatedPlugins, app.Name)
|
||||
}
|
||||
}
|
||||
@@ -1373,6 +1376,7 @@ const (
|
||||
resourceFieldCount = 3
|
||||
resourceFieldNamespaceDelimiter = "/"
|
||||
resourceFieldNameWithNamespaceCount = 2
|
||||
resourceExcludeIndicator = "!"
|
||||
)
|
||||
|
||||
// resource is GROUP:KIND:NAMESPACE/NAME or GROUP:KIND:NAME
|
||||
@@ -1397,6 +1401,12 @@ func parseSelectedResources(resources []string) ([]*argoappv1.SyncOperationResou
|
||||
}
|
||||
|
||||
for _, resource := range resources {
|
||||
isExcluded := false
|
||||
// check if the resource flag starts with a '!'
|
||||
if strings.HasPrefix(resource, resourceExcludeIndicator) {
|
||||
resource = strings.TrimPrefix(resource, resourceExcludeIndicator)
|
||||
isExcluded = true
|
||||
}
|
||||
fields := strings.Split(resource, resourceFieldDelimiter)
|
||||
if len(fields) != resourceFieldCount {
|
||||
return nil, fmt.Errorf("Resource should have GROUP%sKIND%sNAME, but instead got: %s", resourceFieldDelimiter, resourceFieldDelimiter, resource)
|
||||
@@ -1410,6 +1420,7 @@ func parseSelectedResources(resources []string) ([]*argoappv1.SyncOperationResou
|
||||
Kind: fields[1],
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
Exclude: isExcluded,
|
||||
})
|
||||
}
|
||||
return selectedResources, nil
|
||||
@@ -1444,6 +1455,16 @@ func NewApplicationWaitCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
# Wait for multiple apps
|
||||
argocd app wait my-app other-app
|
||||
|
||||
# Wait for apps by resource
|
||||
# Resource should be formatted as GROUP:KIND:NAME. If no GROUP is specified then :KIND:NAME.
|
||||
argocd app wait my-app --resource :Service:my-service
|
||||
argocd app wait my-app --resource argoproj.io:Rollout:my-rollout
|
||||
argocd app wait my-app --resource '!apps:Deployment:my-service'
|
||||
argocd app wait my-app --resource apps:Deployment:my-service --resource :Service:my-service
|
||||
argocd app wait my-app --resource '!*:Service:*'
|
||||
# Specify namespace if the application has resources with the same name in different namespaces
|
||||
argocd app wait my-app --resource argoproj.io:Rollout:my-namespace/my-rollout
|
||||
|
||||
# Wait for apps by label, in this example we waiting for apps that are children of another app (aka app-of-apps)
|
||||
argocd app wait -l app.kubernetes.io/instance=my-app
|
||||
argocd app wait -l app.kubernetes.io/instance!=my-app
|
||||
@@ -1482,7 +1503,7 @@ func NewApplicationWaitCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
command.Flags().BoolVar(&watch.suspended, "suspended", false, "Wait for suspended")
|
||||
command.Flags().BoolVar(&watch.degraded, "degraded", false, "Wait for degraded")
|
||||
command.Flags().StringVarP(&selector, "selector", "l", "", "Wait for apps by label. Supports '=', '==', '!=', in, notin, exists & not exists. Matching apps must satisfy all of the specified label constraints.")
|
||||
command.Flags().StringArrayVar(&resources, "resource", []string{}, fmt.Sprintf("Sync only specific resources as GROUP%sKIND%sNAME. Fields may be blank. This option may be specified repeatedly", resourceFieldDelimiter, resourceFieldDelimiter))
|
||||
command.Flags().StringArrayVar(&resources, "resource", []string{}, fmt.Sprintf("Sync only specific resources as GROUP%[1]sKIND%[1]sNAME or %[2]sGROUP%[1]sKIND%[1]sNAME. Fields may be blank and '*' can be used. This option may be specified repeatedly", resourceFieldDelimiter, resourceExcludeIndicator))
|
||||
command.Flags().BoolVar(&watch.operation, "operation", false, "Wait for pending operations")
|
||||
command.Flags().UintVar(&timeout, "timeout", defaultCheckTimeoutSeconds, "Time out after this many seconds")
|
||||
return command
|
||||
@@ -1542,6 +1563,9 @@ func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
# Resource should be formatted as GROUP:KIND:NAME. If no GROUP is specified then :KIND:NAME
|
||||
argocd app sync my-app --resource :Service:my-service
|
||||
argocd app sync my-app --resource argoproj.io:Rollout:my-rollout
|
||||
argocd app sync my-app --resource '!apps:Deployment:my-service'
|
||||
argocd app sync my-app --resource apps:Deployment:my-service --resource :Service:my-service
|
||||
argocd app sync my-app --resource '!*:Service:*'
|
||||
# Specify namespace if the application has resources with the same name in different namespaces
|
||||
argocd app sync my-app --resource argoproj.io:Rollout:my-namespace/my-rollout`,
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
@@ -1625,14 +1649,28 @@ func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
|
||||
var localObjsStrings []string
|
||||
diffOption := &DifferenceOption{}
|
||||
if local != "" {
|
||||
app, err := appIf.Get(ctx, &applicationpkg.ApplicationQuery{
|
||||
Name: &appName,
|
||||
AppNamespace: &appNs,
|
||||
})
|
||||
errors.CheckError(err)
|
||||
|
||||
if app.Spec.Source.Plugin != nil && app.Spec.Source.Plugin.Name != "" {
|
||||
app, err := appIf.Get(ctx, &applicationpkg.ApplicationQuery{
|
||||
Name: &appName,
|
||||
AppNamespace: &appNs,
|
||||
})
|
||||
errors.CheckError(err)
|
||||
|
||||
if app.Spec.HasMultipleSources() {
|
||||
log.Fatal("argocd cli does not work on multi-source app")
|
||||
return
|
||||
}
|
||||
|
||||
// filters out only those resources that needs to be synced
|
||||
filteredResources := filterAppResources(app, selectedResources)
|
||||
|
||||
// if resources are provided and no app resources match, then return error
|
||||
if len(resources) > 0 && len(filteredResources) == 0 {
|
||||
log.Fatalf("No matching app resources found for resource filter: %v", strings.Join(resources, ", "))
|
||||
}
|
||||
|
||||
if local != "" {
|
||||
if app.Spec.GetSource().Plugin != nil && app.Spec.GetSource().Plugin.Name != "" {
|
||||
log.Warnf(argocommon.ConfigMapPluginCLIDeprecationWarning)
|
||||
}
|
||||
|
||||
@@ -1681,7 +1719,7 @@ func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
AppNamespace: &appNs,
|
||||
DryRun: &dryRun,
|
||||
Revision: &revision,
|
||||
Resources: selectedResources,
|
||||
Resources: filteredResources,
|
||||
Prune: &prune,
|
||||
Manifests: localObjsStrings,
|
||||
Infos: getInfos(infos),
|
||||
@@ -1709,13 +1747,7 @@ func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
}
|
||||
}
|
||||
if diffChanges {
|
||||
app, err := appIf.Get(ctx, &applicationpkg.ApplicationQuery{
|
||||
Name: &appName,
|
||||
AppNamespace: &appNs,
|
||||
})
|
||||
errors.CheckError(err)
|
||||
|
||||
if app.Spec.Source.Plugin != nil && app.Spec.Source.Plugin.Name != "" {
|
||||
if app.Spec.GetSource().Plugin != nil && app.Spec.GetSource().Plugin.Name != "" {
|
||||
log.Warnf(argocommon.ConfigMapPluginCLIDeprecationWarning)
|
||||
}
|
||||
|
||||
@@ -1767,7 +1799,7 @@ func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
command.Flags().BoolVar(&dryRun, "dry-run", false, "Preview apply without affecting cluster")
|
||||
command.Flags().BoolVar(&prune, "prune", false, "Allow deleting unexpected resources")
|
||||
command.Flags().StringVar(&revision, "revision", "", "Sync to a specific revision. Preserves parameter overrides")
|
||||
command.Flags().StringArrayVar(&resources, "resource", []string{}, fmt.Sprintf("Sync only specific resources as GROUP%sKIND%sNAME. Fields may be blank. This option may be specified repeatedly", resourceFieldDelimiter, resourceFieldDelimiter))
|
||||
command.Flags().StringArrayVar(&resources, "resource", []string{}, fmt.Sprintf("Sync only specific resources as GROUP%[1]sKIND%[1]sNAME or %[2]sGROUP%[1]sKIND%[1]sNAME. Fields may be blank and '*' can be used. This option may be specified repeatedly", resourceFieldDelimiter, resourceExcludeIndicator))
|
||||
command.Flags().StringVarP(&selector, "selector", "l", "", "Sync apps that match this label. Supports '=', '==', '!=', in, notin, exists & not exists. Matching apps must satisfy all of the specified label constraints.")
|
||||
command.Flags().StringArrayVar(&labels, "label", []string{}, "Sync only specific resources with a label. This option may be specified repeatedly.")
|
||||
command.Flags().UintVar(&timeout, "timeout", defaultCheckTimeoutSeconds, "Time out after this many seconds")
|
||||
@@ -1892,15 +1924,9 @@ func getResourceStates(app *argoappv1.Application, selectedResources []*argoappv
|
||||
}
|
||||
// filter out not selected resources
|
||||
if len(selectedResources) > 0 {
|
||||
r := []argoappv1.SyncOperationResource{}
|
||||
for _, res := range selectedResources {
|
||||
if res != nil {
|
||||
r = append(r, *res)
|
||||
}
|
||||
}
|
||||
for i := len(states) - 1; i >= 0; i-- {
|
||||
res := states[i]
|
||||
if !argo.ContainsSyncResource(res.Name, res.Namespace, schema.GroupVersionKind{Group: res.Group, Kind: res.Kind}, r) {
|
||||
if !argo.IncludeResource(res.Name, res.Namespace, schema.GroupVersionKind{Group: res.Group, Kind: res.Kind}, selectedResources) {
|
||||
states = append(states[:i], states[i+1:]...)
|
||||
}
|
||||
}
|
||||
@@ -1908,6 +1934,26 @@ func getResourceStates(app *argoappv1.Application, selectedResources []*argoappv
|
||||
return states
|
||||
}
|
||||
|
||||
// filterAppResources selects the app resources that match atleast one of the resource filters.
|
||||
func filterAppResources(app *argoappv1.Application, selectedResources []*argoappv1.SyncOperationResource) []*argoappv1.SyncOperationResource {
|
||||
var filteredResources []*argoappv1.SyncOperationResource
|
||||
if app != nil && len(selectedResources) > 0 {
|
||||
for i := range app.Status.Resources {
|
||||
appResource := app.Status.Resources[i]
|
||||
if (argo.IncludeResource(appResource.Name, appResource.Namespace,
|
||||
schema.GroupVersionKind{Group: appResource.Group, Kind: appResource.Kind}, selectedResources)) {
|
||||
filteredResources = append(filteredResources, &argoappv1.SyncOperationResource{
|
||||
Group: appResource.Group,
|
||||
Kind: appResource.Kind,
|
||||
Name: appResource.Name,
|
||||
Namespace: appResource.Namespace,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
return filteredResources
|
||||
}
|
||||
|
||||
func groupResourceStates(app *argoappv1.Application, selectedResources []*argoappv1.SyncOperationResource) map[string]*resourceState {
|
||||
resStates := make(map[string]*resourceState)
|
||||
for _, result := range getResourceStates(app, selectedResources) {
|
||||
@@ -2087,8 +2133,9 @@ func setParameterOverrides(app *argoappv1.Application, parameters []string) {
|
||||
if len(parameters) == 0 {
|
||||
return
|
||||
}
|
||||
source := app.Spec.GetSource()
|
||||
var sourceType argoappv1.ApplicationSourceType
|
||||
if st, _ := app.Spec.Source.ExplicitType(); st != nil {
|
||||
if st, _ := source.ExplicitType(); st != nil {
|
||||
sourceType = *st
|
||||
} else if app.Status.SourceType != "" {
|
||||
sourceType = app.Status.SourceType
|
||||
@@ -2100,8 +2147,8 @@ func setParameterOverrides(app *argoappv1.Application, parameters []string) {
|
||||
|
||||
switch sourceType {
|
||||
case argoappv1.ApplicationSourceTypeHelm:
|
||||
if app.Spec.Source.Helm == nil {
|
||||
app.Spec.Source.Helm = &argoappv1.ApplicationSourceHelm{}
|
||||
if source.Helm == nil {
|
||||
source.Helm = &argoappv1.ApplicationSourceHelm{}
|
||||
}
|
||||
for _, p := range parameters {
|
||||
newParam, err := argoappv1.NewHelmParameter(p, false)
|
||||
@@ -2109,7 +2156,7 @@ func setParameterOverrides(app *argoappv1.Application, parameters []string) {
|
||||
log.Error(err)
|
||||
continue
|
||||
}
|
||||
app.Spec.Source.Helm.AddParameter(*newParam)
|
||||
source.Helm.AddParameter(*newParam)
|
||||
}
|
||||
default:
|
||||
log.Fatalf("Parameters can only be set against Helm applications")
|
||||
@@ -2161,7 +2208,7 @@ func NewApplicationHistoryCommand(clientOpts *argocdclient.ClientOptions) *cobra
|
||||
})
|
||||
errors.CheckError(err)
|
||||
|
||||
if app.Spec.Source.Plugin != nil && app.Spec.Source.Plugin.Name != "" {
|
||||
if app.Spec.GetSource().Plugin != nil && app.Spec.GetSource().Plugin.Name != "" {
|
||||
log.Warnf(argocommon.ConfigMapPluginCLIDeprecationWarning)
|
||||
}
|
||||
|
||||
@@ -2225,7 +2272,7 @@ func NewApplicationRollbackCommand(clientOpts *argocdclient.ClientOptions) *cobr
|
||||
})
|
||||
errors.CheckError(err)
|
||||
|
||||
if app.Spec.Source.Plugin != nil && app.Spec.Source.Plugin.Name != "" {
|
||||
if app.Spec.GetSource().Plugin != nil && app.Spec.GetSource().Plugin.Name != "" {
|
||||
log.Warnf(argocommon.ConfigMapPluginCLIDeprecationWarning)
|
||||
}
|
||||
|
||||
@@ -2312,7 +2359,7 @@ func NewApplicationManifestsCommand(clientOpts *argocdclient.ClientOptions) *cob
|
||||
app, err := appIf.Get(context.Background(), &applicationpkg.ApplicationQuery{Name: &appName})
|
||||
errors.CheckError(err)
|
||||
|
||||
if app.Spec.Source.Plugin != nil && app.Spec.Source.Plugin.Name != "" {
|
||||
if app.Spec.GetSource().Plugin != nil && app.Spec.GetSource().Plugin.Name != "" {
|
||||
log.Warnf(argocommon.ConfigMapPluginCLIDeprecationWarning)
|
||||
}
|
||||
|
||||
@@ -2415,7 +2462,7 @@ func NewApplicationEditCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
||||
})
|
||||
errors.CheckError(err)
|
||||
|
||||
if app.Spec.Source.Plugin != nil && app.Spec.Source.Plugin.Name != "" {
|
||||
if app.Spec.GetSource().Plugin != nil && app.Spec.GetSource().Plugin.Name != "" {
|
||||
log.Warnf(argocommon.ConfigMapPluginCLIDeprecationWarning)
|
||||
}
|
||||
|
||||
|
||||
@@ -498,7 +498,7 @@ func TestPrintAppSummaryTable(t *testing.T) {
|
||||
},
|
||||
Project: "default",
|
||||
Destination: v1alpha1.ApplicationDestination{Server: "local", Namespace: "argocd"},
|
||||
Source: v1alpha1.ApplicationSource{
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
RepoURL: "test",
|
||||
TargetRevision: "master",
|
||||
Path: "/test",
|
||||
@@ -604,7 +604,7 @@ func TestPrintParams(t *testing.T) {
|
||||
output, _ := captureOutput(func() error {
|
||||
app := &v1alpha1.Application{
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Source: v1alpha1.ApplicationSource{
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
Helm: &v1alpha1.ApplicationSourceHelm{
|
||||
Parameters: []v1alpha1.HelmParameter{
|
||||
{
|
||||
@@ -904,11 +904,220 @@ func Test_unset_nothingToUnset(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilterAppResources(t *testing.T) {
|
||||
// App resources
|
||||
var (
|
||||
appReplicaSet1 = v1alpha1.ResourceStatus{
|
||||
Group: "apps",
|
||||
Kind: "ReplicaSet",
|
||||
Namespace: "default",
|
||||
Name: "replicaSet-name1",
|
||||
}
|
||||
appReplicaSet2 = v1alpha1.ResourceStatus{
|
||||
Group: "apps",
|
||||
Kind: "ReplicaSet",
|
||||
Namespace: "default",
|
||||
Name: "replicaSet-name2",
|
||||
}
|
||||
appJob = v1alpha1.ResourceStatus{
|
||||
Group: "batch",
|
||||
Kind: "Job",
|
||||
Namespace: "default",
|
||||
Name: "job-name",
|
||||
}
|
||||
appService1 = v1alpha1.ResourceStatus{
|
||||
Group: "",
|
||||
Kind: "Service",
|
||||
Namespace: "default",
|
||||
Name: "service-name1",
|
||||
}
|
||||
appService2 = v1alpha1.ResourceStatus{
|
||||
Group: "",
|
||||
Kind: "Service",
|
||||
Namespace: "default",
|
||||
Name: "service-name2",
|
||||
}
|
||||
appDeployment = v1alpha1.ResourceStatus{
|
||||
Group: "apps",
|
||||
Kind: "Deployment",
|
||||
Namespace: "default",
|
||||
Name: "deployment-name",
|
||||
}
|
||||
)
|
||||
app := v1alpha1.Application{
|
||||
Status: v1alpha1.ApplicationStatus{
|
||||
Resources: []v1alpha1.ResourceStatus{
|
||||
appReplicaSet1, appReplicaSet2, appJob, appService1, appService2, appDeployment},
|
||||
},
|
||||
}
|
||||
// Resource filters
|
||||
var (
|
||||
blankValues = argoappv1.SyncOperationResource{
|
||||
Group: "",
|
||||
Kind: "",
|
||||
Name: "",
|
||||
Namespace: "",
|
||||
Exclude: false}
|
||||
// *:*:*
|
||||
includeAllResources = argoappv1.SyncOperationResource{
|
||||
Group: "*",
|
||||
Kind: "*",
|
||||
Name: "*",
|
||||
Namespace: "",
|
||||
Exclude: false}
|
||||
// !*:*:*
|
||||
excludeAllResources = argoappv1.SyncOperationResource{
|
||||
Group: "*",
|
||||
Kind: "*",
|
||||
Name: "*",
|
||||
Namespace: "",
|
||||
Exclude: true}
|
||||
// *:Service:*
|
||||
includeAllServiceResources = argoappv1.SyncOperationResource{
|
||||
Group: "*",
|
||||
Kind: "Service",
|
||||
Name: "*",
|
||||
Namespace: "",
|
||||
Exclude: false}
|
||||
// !*:Service:*
|
||||
excludeAllServiceResources = argoappv1.SyncOperationResource{
|
||||
Group: "*",
|
||||
Kind: "Service",
|
||||
Name: "*",
|
||||
Namespace: "",
|
||||
Exclude: true}
|
||||
// apps:ReplicaSet:replicaSet-name1
|
||||
includeReplicaSet1Resource = argoappv1.SyncOperationResource{
|
||||
Group: "apps",
|
||||
Kind: "ReplicaSet",
|
||||
Name: "replicaSet-name1",
|
||||
Namespace: "",
|
||||
Exclude: false}
|
||||
// !apps:ReplicaSet:replicaSet-name2
|
||||
excludeReplicaSet2Resource = argoappv1.SyncOperationResource{
|
||||
Group: "apps",
|
||||
Kind: "ReplicaSet",
|
||||
Name: "replicaSet-name2",
|
||||
Namespace: "",
|
||||
Exclude: true}
|
||||
)
|
||||
|
||||
// Filtered resources
|
||||
var (
|
||||
replicaSet1 = v1alpha1.SyncOperationResource{
|
||||
Group: "apps",
|
||||
Kind: "ReplicaSet",
|
||||
Namespace: "default",
|
||||
Name: "replicaSet-name1",
|
||||
}
|
||||
replicaSet2 = v1alpha1.SyncOperationResource{
|
||||
Group: "apps",
|
||||
Kind: "ReplicaSet",
|
||||
Namespace: "default",
|
||||
Name: "replicaSet-name2",
|
||||
}
|
||||
job = v1alpha1.SyncOperationResource{
|
||||
Group: "batch",
|
||||
Kind: "Job",
|
||||
Namespace: "default",
|
||||
Name: "job-name",
|
||||
}
|
||||
service1 = v1alpha1.SyncOperationResource{
|
||||
Group: "",
|
||||
Kind: "Service",
|
||||
Namespace: "default",
|
||||
Name: "service-name1",
|
||||
}
|
||||
service2 = v1alpha1.SyncOperationResource{
|
||||
Group: "",
|
||||
Kind: "Service",
|
||||
Namespace: "default",
|
||||
Name: "service-name2",
|
||||
}
|
||||
deployment = v1alpha1.SyncOperationResource{
|
||||
Group: "apps",
|
||||
Kind: "Deployment",
|
||||
Namespace: "default",
|
||||
Name: "deployment-name",
|
||||
}
|
||||
)
|
||||
tests := []struct {
|
||||
testName string
|
||||
selectedResources []*argoappv1.SyncOperationResource
|
||||
expectedResult []*argoappv1.SyncOperationResource
|
||||
}{
|
||||
//--resource apps:ReplicaSet:replicaSet-name1 --resource *:Service:*
|
||||
{testName: "Include ReplicaSet replicaSet-name1 resouce and all service resources",
|
||||
selectedResources: []*argoappv1.SyncOperationResource{&includeAllServiceResources, &includeReplicaSet1Resource},
|
||||
expectedResult: []*argoappv1.SyncOperationResource{&replicaSet1, &service1, &service2},
|
||||
},
|
||||
//--resource apps:ReplicaSet:replicaSet-name1 --resource !*:Service:*
|
||||
{testName: "Include ReplicaSet replicaSet-name1 resouce and exclude all service resources",
|
||||
selectedResources: []*argoappv1.SyncOperationResource{&excludeAllServiceResources, &includeReplicaSet1Resource},
|
||||
expectedResult: []*argoappv1.SyncOperationResource{&replicaSet1, &replicaSet2, &job, &deployment},
|
||||
},
|
||||
// --resource !apps:ReplicaSet:replicaSet-name2 --resource !*:Service:*
|
||||
{testName: "Exclude ReplicaSet replicaSet-name2 resouce and all service resources",
|
||||
selectedResources: []*argoappv1.SyncOperationResource{&excludeReplicaSet2Resource, &excludeAllServiceResources},
|
||||
expectedResult: []*argoappv1.SyncOperationResource{&replicaSet1, &replicaSet2, &job, &service1, &service2, &deployment},
|
||||
},
|
||||
// --resource !apps:ReplicaSet:replicaSet-name2
|
||||
{testName: "Exclude ReplicaSet replicaSet-name2 resouce",
|
||||
selectedResources: []*argoappv1.SyncOperationResource{&excludeReplicaSet2Resource},
|
||||
expectedResult: []*argoappv1.SyncOperationResource{&replicaSet1, &job, &service1, &service2, &deployment},
|
||||
},
|
||||
// --resource apps:ReplicaSet:replicaSet-name1
|
||||
{testName: "Include ReplicaSet replicaSet-name1 resouce",
|
||||
selectedResources: []*argoappv1.SyncOperationResource{&includeReplicaSet1Resource},
|
||||
expectedResult: []*argoappv1.SyncOperationResource{&replicaSet1},
|
||||
},
|
||||
// --resource !*:Service:*
|
||||
{testName: "Exclude Service resouces",
|
||||
selectedResources: []*argoappv1.SyncOperationResource{&excludeAllServiceResources},
|
||||
expectedResult: []*argoappv1.SyncOperationResource{&replicaSet1, &replicaSet2, &job, &deployment},
|
||||
},
|
||||
// --resource *:Service:*
|
||||
{testName: "Include Service resouces",
|
||||
selectedResources: []*argoappv1.SyncOperationResource{&includeAllServiceResources},
|
||||
expectedResult: []*argoappv1.SyncOperationResource{&service1, &service2},
|
||||
},
|
||||
// --resource !*:*:*
|
||||
{testName: "Exclude all resouces",
|
||||
selectedResources: []*argoappv1.SyncOperationResource{&excludeAllResources},
|
||||
expectedResult: nil,
|
||||
},
|
||||
// --resource *:*:*
|
||||
{testName: "Include all resouces",
|
||||
selectedResources: []*argoappv1.SyncOperationResource{&includeAllResources},
|
||||
expectedResult: []*argoappv1.SyncOperationResource{&replicaSet1, &replicaSet2, &job, &service1, &service2, &deployment},
|
||||
},
|
||||
{testName: "No Filters",
|
||||
selectedResources: []*argoappv1.SyncOperationResource{&blankValues},
|
||||
expectedResult: nil,
|
||||
},
|
||||
{testName: "Empty Filter",
|
||||
selectedResources: []*argoappv1.SyncOperationResource{},
|
||||
expectedResult: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.testName, func(t *testing.T) {
|
||||
filteredResources := filterAppResources(&app, test.selectedResources)
|
||||
assert.Equal(t, test.expectedResult, filteredResources)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseSelectedResources(t *testing.T) {
|
||||
resources := []string{"v1alpha:Application:test", "v1alpha:Application:namespace/test"}
|
||||
resources := []string{"v1alpha:Application:test",
|
||||
"v1alpha:Application:namespace/test",
|
||||
"!v1alpha:Application:test",
|
||||
"apps:Deployment:default/test",
|
||||
"!*:*:*"}
|
||||
operationResources, err := parseSelectedResources(resources)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, operationResources, 2)
|
||||
assert.Len(t, operationResources, 5)
|
||||
assert.Equal(t, *operationResources[0], v1alpha1.SyncOperationResource{
|
||||
Namespace: "",
|
||||
Name: "test",
|
||||
@@ -921,6 +1130,27 @@ func TestParseSelectedResources(t *testing.T) {
|
||||
Kind: "Application",
|
||||
Group: "v1alpha",
|
||||
})
|
||||
assert.Equal(t, *operationResources[2], v1alpha1.SyncOperationResource{
|
||||
Namespace: "",
|
||||
Name: "test",
|
||||
Kind: "Application",
|
||||
Group: "v1alpha",
|
||||
Exclude: true,
|
||||
})
|
||||
assert.Equal(t, *operationResources[3], v1alpha1.SyncOperationResource{
|
||||
Namespace: "default",
|
||||
Name: "test",
|
||||
Kind: "Deployment",
|
||||
Group: "apps",
|
||||
Exclude: false,
|
||||
})
|
||||
assert.Equal(t, *operationResources[4], v1alpha1.SyncOperationResource{
|
||||
Namespace: "",
|
||||
Name: "*",
|
||||
Kind: "*",
|
||||
Group: "*",
|
||||
Exclude: true,
|
||||
})
|
||||
}
|
||||
|
||||
func TestParseSelectedResourcesIncorrect(t *testing.T) {
|
||||
@@ -985,7 +1215,7 @@ func TestPrintApplicationTableWide(t *testing.T) {
|
||||
Server: "http://localhost:8080",
|
||||
Namespace: "default",
|
||||
},
|
||||
Source: v1alpha1.ApplicationSource{
|
||||
Source: &v1alpha1.ApplicationSource{
|
||||
RepoURL: "https://github.com/argoproj/argocd-example-apps",
|
||||
Path: "guestbook",
|
||||
TargetRevision: "123",
|
||||
@@ -1261,7 +1491,7 @@ func testApp(name, project string, labels map[string]string, annotations map[str
|
||||
Finalizers: finalizers,
|
||||
},
|
||||
Spec: argoappv1.ApplicationSpec{
|
||||
Source: argoappv1.ApplicationSource{
|
||||
Source: &argoappv1.ApplicationSource{
|
||||
RepoURL: "https://github.com/argoproj/argocd-example-apps.git",
|
||||
},
|
||||
Project: project,
|
||||
|
||||
@@ -95,7 +95,7 @@ func NewApplicationSetGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.
|
||||
fmt.Println()
|
||||
}
|
||||
if showParams {
|
||||
printHelmParams(appSet.Spec.Template.Spec.Source.Helm)
|
||||
printHelmParams(appSet.Spec.Template.Spec.GetSource().Helm)
|
||||
}
|
||||
default:
|
||||
errors.CheckError(fmt.Errorf("unknown output format: %s", output))
|
||||
@@ -317,7 +317,7 @@ func printApplicationSetTable(apps []arogappsetv1.ApplicationSet, output *string
|
||||
conditions,
|
||||
}
|
||||
if *output == "wide" {
|
||||
vals = append(vals, app.Spec.Template.Spec.Source.RepoURL, app.Spec.Template.Spec.Source.Path, app.Spec.Template.Spec.Source.TargetRevision)
|
||||
vals = append(vals, app.Spec.Template.Spec.GetSource().RepoURL, app.Spec.Template.Spec.GetSource().Path, app.Spec.Template.Spec.GetSource().TargetRevision)
|
||||
}
|
||||
_, _ = fmt.Fprintf(w, fmtStr, vals...)
|
||||
}
|
||||
@@ -333,14 +333,15 @@ func getServerForAppSet(appSet *arogappsetv1.ApplicationSet) string {
|
||||
}
|
||||
|
||||
func printAppSetSummaryTable(appSet *arogappsetv1.ApplicationSet) {
|
||||
source := appSet.Spec.Template.Spec.GetSource()
|
||||
fmt.Printf(printOpFmtStr, "Name:", appSet.Name)
|
||||
fmt.Printf(printOpFmtStr, "Project:", appSet.Spec.Template.Spec.GetProject())
|
||||
fmt.Printf(printOpFmtStr, "Server:", getServerForAppSet(appSet))
|
||||
fmt.Printf(printOpFmtStr, "Namespace:", appSet.Spec.Template.Spec.Destination.Namespace)
|
||||
fmt.Printf(printOpFmtStr, "Repo:", appSet.Spec.Template.Spec.Source.RepoURL)
|
||||
fmt.Printf(printOpFmtStr, "Target:", appSet.Spec.Template.Spec.Source.TargetRevision)
|
||||
fmt.Printf(printOpFmtStr, "Path:", appSet.Spec.Template.Spec.Source.Path)
|
||||
printAppSourceDetails(&appSet.Spec.Template.Spec.Source)
|
||||
fmt.Printf(printOpFmtStr, "Repo:", source.RepoURL)
|
||||
fmt.Printf(printOpFmtStr, "Target:", source.TargetRevision)
|
||||
fmt.Printf(printOpFmtStr, "Path:", source.Path)
|
||||
printAppSourceDetails(&source)
|
||||
|
||||
var syncPolicy string
|
||||
if appSet.Spec.SyncPolicy != nil && appSet.Spec.Template.Spec.SyncPolicy.Automated != nil {
|
||||
|
||||
@@ -27,6 +27,17 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/util/text/label"
|
||||
)
|
||||
|
||||
const (
|
||||
// type of the cluster ID is 'name'
|
||||
clusterIdTypeName = "name"
|
||||
// cluster field is 'name'
|
||||
clusterFieldName = "name"
|
||||
// cluster field is 'namespaces'
|
||||
clusterFieldNamespaces = "namespaces"
|
||||
// indicates managing all namespaces
|
||||
allNamespaces = "*"
|
||||
)
|
||||
|
||||
// NewClusterCommand returns a new instance of an `argocd cluster` command
|
||||
func NewClusterCommand(clientOpts *argocdclient.ClientOptions, pathOpts *clientcmd.PathOptions) *cobra.Command {
|
||||
var command = &cobra.Command{
|
||||
@@ -47,7 +58,10 @@ func NewClusterCommand(clientOpts *argocdclient.ClientOptions, pathOpts *clientc
|
||||
|
||||
# Remove a target cluster context from ArgoCD
|
||||
argocd cluster rm example-cluster
|
||||
`,
|
||||
|
||||
# Set a target cluster context from ArgoCD
|
||||
argocd cluster set CLUSTER_NAME --name new-cluster-name --namespace '*'
|
||||
argocd cluster set CLUSTER_NAME --name new-cluster-name --namespace namespace-one --namespace namespace-two`,
|
||||
}
|
||||
|
||||
command.AddCommand(NewClusterAddCommand(clientOpts, pathOpts))
|
||||
@@ -55,6 +69,7 @@ func NewClusterCommand(clientOpts *argocdclient.ClientOptions, pathOpts *clientc
|
||||
command.AddCommand(NewClusterListCommand(clientOpts))
|
||||
command.AddCommand(NewClusterRemoveCommand(clientOpts, pathOpts))
|
||||
command.AddCommand(NewClusterRotateAuthCommand(clientOpts))
|
||||
command.AddCommand(NewClusterSetCommand(clientOpts))
|
||||
return command
|
||||
}
|
||||
|
||||
@@ -185,6 +200,72 @@ func getRestConfig(pathOpts *clientcmd.PathOptions, ctxName string) (*rest.Confi
|
||||
return conf, nil
|
||||
}
|
||||
|
||||
// NewClusterSetCommand returns a new instance of an `argocd cluster set` command
|
||||
func NewClusterSetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
var (
|
||||
clusterOptions cmdutil.ClusterOptions
|
||||
clusterName string
|
||||
)
|
||||
var command = &cobra.Command{
|
||||
Use: "set NAME",
|
||||
Short: "Set cluster information",
|
||||
Example: ` # Set cluster information
|
||||
argocd cluster set CLUSTER_NAME --name new-cluster-name --namespace '*'
|
||||
argocd cluster set CLUSTER_NAME --name new-cluster-name --namespace namespace-one --namespace namespace-two`,
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
ctx := c.Context()
|
||||
if len(args) != 1 {
|
||||
c.HelpFunc()(c, args)
|
||||
os.Exit(1)
|
||||
}
|
||||
// name of the cluster whose fields have to be updated.
|
||||
clusterName = args[0]
|
||||
conn, clusterIf := headless.NewClientOrDie(clientOpts, c).NewClusterClientOrDie()
|
||||
defer io.Close(conn)
|
||||
// checks the fields that needs to be updated
|
||||
updatedFields := checkFieldsToUpdate(clusterOptions)
|
||||
namespaces := clusterOptions.Namespaces
|
||||
// check if all namespaces have to be considered
|
||||
if len(namespaces) == 1 && strings.EqualFold(namespaces[0], allNamespaces) {
|
||||
namespaces[0] = ""
|
||||
}
|
||||
if updatedFields != nil {
|
||||
clusterUpdateRequest := clusterpkg.ClusterUpdateRequest{
|
||||
Cluster: &argoappv1.Cluster{
|
||||
Name: clusterOptions.Name,
|
||||
Namespaces: namespaces,
|
||||
},
|
||||
UpdatedFields: updatedFields,
|
||||
Id: &clusterpkg.ClusterID{
|
||||
Type: clusterIdTypeName,
|
||||
Value: clusterName,
|
||||
},
|
||||
}
|
||||
_, err := clusterIf.Update(ctx, &clusterUpdateRequest)
|
||||
errors.CheckError(err)
|
||||
fmt.Printf("Cluster '%s' updated.\n", clusterName)
|
||||
} else {
|
||||
fmt.Print("Specify the cluster field to be updated.\n")
|
||||
}
|
||||
},
|
||||
}
|
||||
command.Flags().StringVar(&clusterOptions.Name, "name", "", "Overwrite the cluster name")
|
||||
command.Flags().StringArrayVar(&clusterOptions.Namespaces, "namespace", nil, "List of namespaces which are allowed to manage. Specify '*' to manage all namespaces")
|
||||
return command
|
||||
}
|
||||
|
||||
// checkFieldsToUpdate returns the fields that needs to be updated
|
||||
func checkFieldsToUpdate(clusterOptions cmdutil.ClusterOptions) []string {
|
||||
var updatedFields []string
|
||||
if clusterOptions.Name != "" {
|
||||
updatedFields = append(updatedFields, clusterFieldName)
|
||||
}
|
||||
if clusterOptions.Namespaces != nil {
|
||||
updatedFields = append(updatedFields, clusterFieldNamespaces)
|
||||
}
|
||||
return updatedFields
|
||||
}
|
||||
|
||||
// NewClusterGetCommand returns a new instance of an `argocd cluster get` command
|
||||
func NewClusterGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
var (
|
||||
|
||||
@@ -146,6 +146,7 @@ __argocd_custom_func() {
|
||||
;;
|
||||
argocd_cluster_get | \
|
||||
argocd_cluster_rm | \
|
||||
argocd_cluster_set | \
|
||||
argocd_login | \
|
||||
argocd_cluster_add)
|
||||
__argocd_list_servers
|
||||
|
||||
@@ -202,17 +202,18 @@ func StartLocalServer(ctx context.Context, clientOpts *apiclient.ClientOptions,
|
||||
}
|
||||
appstateCache := appstatecache.NewCache(cache.NewCache(&forwardCacheClient{namespace: namespace, context: ctxStr}), time.Hour)
|
||||
srv := server.NewServer(ctx, server.ArgoCDServerOpts{
|
||||
EnableGZip: false,
|
||||
Namespace: namespace,
|
||||
ListenPort: *port,
|
||||
AppClientset: appClientset,
|
||||
DisableAuth: true,
|
||||
RedisClient: redis.NewClient(&redis.Options{Addr: mr.Addr()}),
|
||||
Cache: servercache.NewCache(appstateCache, 0, 0, 0),
|
||||
KubeClientset: kubeClientset,
|
||||
Insecure: true,
|
||||
ListenHost: *address,
|
||||
RepoClientset: &forwardRepoClientset{namespace: namespace, context: ctxStr},
|
||||
EnableGZip: false,
|
||||
Namespace: namespace,
|
||||
ListenPort: *port,
|
||||
AppClientset: appClientset,
|
||||
DisableAuth: true,
|
||||
RedisClient: redis.NewClient(&redis.Options{Addr: mr.Addr()}),
|
||||
Cache: servercache.NewCache(appstateCache, 0, 0, 0),
|
||||
KubeClientset: kubeClientset,
|
||||
Insecure: true,
|
||||
ListenHost: *address,
|
||||
RepoClientset: &forwardRepoClientset{namespace: namespace, context: ctxStr},
|
||||
EnableProxyExtension: false,
|
||||
})
|
||||
srv.Init(ctx)
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/go-oidc"
|
||||
"github.com/coreos/go-oidc/v3/oidc"
|
||||
"github.com/golang-jwt/jwt/v4"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/skratchdot/open-golang/open"
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/coreos/go-oidc"
|
||||
"github.com/coreos/go-oidc/v3/oidc"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
|
||||
@@ -70,6 +70,9 @@ func NewRepoAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
|
||||
# Add a private Git repository on GitHub Enterprise via GitHub App
|
||||
argocd repo add https://ghe.example.com/repos/repo --github-app-id 1 --github-app-installation-id 2 --github-app-private-key-path test.private-key.pem --github-app-enterprise-base-url https://ghe.example.com/api/v3
|
||||
|
||||
# Add a private Git repository on Google Cloud Sources via GCP service account credentials
|
||||
argocd repo add https://source.developers.google.com/p/my-google-cloud-project/r/my-repo --gcp-service-account-key-path service-account-key.json
|
||||
`
|
||||
|
||||
var command = &cobra.Command{
|
||||
@@ -135,6 +138,17 @@ func NewRepoAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
}
|
||||
}
|
||||
|
||||
if repoOpts.GCPServiceAccountKeyPath != "" {
|
||||
if git.IsHTTPSURL(repoOpts.Repo.Repo) {
|
||||
gcpServiceAccountKey, err := os.ReadFile(repoOpts.GCPServiceAccountKeyPath)
|
||||
errors.CheckError(err)
|
||||
repoOpts.Repo.GCPServiceAccountKey = string(gcpServiceAccountKey)
|
||||
} else {
|
||||
err := fmt.Errorf("--gcp-service-account-key-path is only supported for HTTPS repositories")
|
||||
errors.CheckError(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Set repository connection properties only when creating repository, not
|
||||
// when creating repository credentials.
|
||||
// InsecureIgnoreHostKey is deprecated and only here for backwards compat
|
||||
@@ -146,6 +160,7 @@ func NewRepoAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
repoOpts.Repo.GithubAppInstallationId = repoOpts.GithubAppInstallationId
|
||||
repoOpts.Repo.GitHubAppEnterpriseBaseURL = repoOpts.GitHubAppEnterpriseBaseURL
|
||||
repoOpts.Repo.Proxy = repoOpts.Proxy
|
||||
repoOpts.Repo.ForceHttpBasicAuth = repoOpts.ForceHttpBasicAuth
|
||||
|
||||
if repoOpts.Repo.Type == "helm" && repoOpts.Repo.Name == "" {
|
||||
errors.CheckError(fmt.Errorf("Must specify --name for repos of type 'helm'"))
|
||||
@@ -184,6 +199,8 @@ func NewRepoAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
GithubAppEnterpriseBaseUrl: repoOpts.Repo.GitHubAppEnterpriseBaseURL,
|
||||
Proxy: repoOpts.Proxy,
|
||||
Project: repoOpts.Repo.Project,
|
||||
GcpServiceAccountKey: repoOpts.Repo.GCPServiceAccountKey,
|
||||
ForceHttpBasicAuth: repoOpts.Repo.ForceHttpBasicAuth,
|
||||
}
|
||||
_, err := repoIf.ValidateAccess(ctx, &repoAccessReq)
|
||||
errors.CheckError(err)
|
||||
@@ -294,7 +311,7 @@ func NewRepoListCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
},
|
||||
}
|
||||
command.Flags().StringVarP(&output, "output", "o", "wide", "Output format. One of: json|yaml|wide|url")
|
||||
command.Flags().StringVar(&refresh, "refresh", "", "Force a cache refresh on connection status")
|
||||
command.Flags().StringVar(&refresh, "refresh", "", "Force a cache refresh on connection status , must be one of: 'hard'")
|
||||
return command
|
||||
}
|
||||
|
||||
@@ -345,6 +362,6 @@ func NewRepoGetCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
},
|
||||
}
|
||||
command.Flags().StringVarP(&output, "output", "o", "wide", "Output format. One of: json|yaml|wide|url")
|
||||
command.Flags().StringVar(&refresh, "refresh", "", "Force a cache refresh on connection status")
|
||||
command.Flags().StringVar(&refresh, "refresh", "", "Force a cache refresh on connection status , must be one of: 'hard'")
|
||||
return command
|
||||
}
|
||||
|
||||
@@ -39,12 +39,13 @@ func NewRepoCredsCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command
|
||||
// NewRepoCredsAddCommand returns a new instance of an `argocd repocreds add` command
|
||||
func NewRepoCredsAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Command {
|
||||
var (
|
||||
repo appsv1.RepoCreds
|
||||
upsert bool
|
||||
sshPrivateKeyPath string
|
||||
tlsClientCertPath string
|
||||
tlsClientCertKeyPath string
|
||||
githubAppPrivateKeyPath string
|
||||
repo appsv1.RepoCreds
|
||||
upsert bool
|
||||
sshPrivateKeyPath string
|
||||
tlsClientCertPath string
|
||||
tlsClientCertKeyPath string
|
||||
githubAppPrivateKeyPath string
|
||||
gcpServiceAccountKeyPath string
|
||||
)
|
||||
|
||||
// For better readability and easier formatting
|
||||
@@ -62,6 +63,9 @@ func NewRepoCredsAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comma
|
||||
|
||||
# Add credentials with helm oci registry so that these oci registry urls do not need to be added as repos individually.
|
||||
argocd repocreds add localhost:5000/myrepo --enable-oci --type helm
|
||||
|
||||
# Add credentials with GCP credentials for all repositories under https://source.developers.google.com/p/my-google-cloud-project/r/
|
||||
argocd repocreds add https://source.developers.google.com/p/my-google-cloud-project/r/ --gcp-service-account-key-path service-account-key.json
|
||||
`
|
||||
|
||||
var command = &cobra.Command{
|
||||
@@ -127,6 +131,18 @@ func NewRepoCredsAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comma
|
||||
}
|
||||
}
|
||||
|
||||
// Specifying gcpServiceAccountKeyPath is only valid for HTTPS repositories
|
||||
if gcpServiceAccountKeyPath != "" {
|
||||
if git.IsHTTPSURL(repo.URL) {
|
||||
gcpServiceAccountKey, err := os.ReadFile(gcpServiceAccountKeyPath)
|
||||
errors.CheckError(err)
|
||||
repo.GCPServiceAccountKey = string(gcpServiceAccountKey)
|
||||
} else {
|
||||
err := fmt.Errorf("--gcp-service-account-key-path is only supported for HTTPS repositories")
|
||||
errors.CheckError(err)
|
||||
}
|
||||
}
|
||||
|
||||
conn, repoIf := headless.NewClientOrDie(clientOpts, c).NewRepoCredsClientOrDie()
|
||||
defer io.Close(conn)
|
||||
|
||||
@@ -158,6 +174,8 @@ func NewRepoCredsAddCommand(clientOpts *argocdclient.ClientOptions) *cobra.Comma
|
||||
command.Flags().BoolVar(&upsert, "upsert", false, "Override an existing repository with the same name even if the spec differs")
|
||||
command.Flags().BoolVar(&repo.EnableOCI, "enable-oci", false, "Specifies whether helm-oci support should be enabled for this repo")
|
||||
command.Flags().StringVar(&repo.Type, "type", common.DefaultRepoType, "type of the repository, \"git\" or \"helm\"")
|
||||
command.Flags().StringVar(&gcpServiceAccountKeyPath, "gcp-service-account-key-path", "", "service account key for the Google Cloud Platform")
|
||||
command.Flags().BoolVar(&repo.ForceHttpBasicAuth, "force-http-basic-auth", false, "whether to force basic auth when connecting via HTTP")
|
||||
return command
|
||||
}
|
||||
|
||||
|
||||
@@ -138,22 +138,26 @@ func SetAppSpecOptions(flags *pflag.FlagSet, spec *argoappv1.ApplicationSpec, ap
|
||||
}
|
||||
flags.Visit(func(f *pflag.Flag) {
|
||||
visited++
|
||||
source := spec.GetSourcePtr()
|
||||
if source == nil {
|
||||
source = &argoappv1.ApplicationSource{}
|
||||
}
|
||||
switch f.Name {
|
||||
case "repo":
|
||||
spec.Source.RepoURL = appOpts.repoURL
|
||||
source.RepoURL = appOpts.repoURL
|
||||
case "path":
|
||||
spec.Source.Path = appOpts.appPath
|
||||
source.Path = appOpts.appPath
|
||||
case "helm-chart":
|
||||
spec.Source.Chart = appOpts.chart
|
||||
source.Chart = appOpts.chart
|
||||
case "revision":
|
||||
spec.Source.TargetRevision = appOpts.revision
|
||||
source.TargetRevision = appOpts.revision
|
||||
case "revision-history-limit":
|
||||
i := int64(appOpts.revisionHistoryLimit)
|
||||
spec.RevisionHistoryLimit = &i
|
||||
case "values":
|
||||
setHelmOpt(&spec.Source, helmOpts{valueFiles: appOpts.valuesFiles})
|
||||
setHelmOpt(source, helmOpts{valueFiles: appOpts.valuesFiles})
|
||||
case "ignore-missing-value-files":
|
||||
setHelmOpt(&spec.Source, helmOpts{ignoreMissingValueFiles: appOpts.ignoreMissingValueFiles})
|
||||
setHelmOpt(source, helmOpts{ignoreMissingValueFiles: appOpts.ignoreMissingValueFiles})
|
||||
case "values-literal-file":
|
||||
var data []byte
|
||||
|
||||
@@ -165,41 +169,41 @@ func SetAppSpecOptions(flags *pflag.FlagSet, spec *argoappv1.ApplicationSpec, ap
|
||||
data, err = config.ReadRemoteFile(appOpts.values)
|
||||
}
|
||||
errors.CheckError(err)
|
||||
setHelmOpt(&spec.Source, helmOpts{values: string(data)})
|
||||
setHelmOpt(source, helmOpts{values: string(data)})
|
||||
case "release-name":
|
||||
setHelmOpt(&spec.Source, helmOpts{releaseName: appOpts.releaseName})
|
||||
setHelmOpt(source, helmOpts{releaseName: appOpts.releaseName})
|
||||
case "helm-version":
|
||||
setHelmOpt(&spec.Source, helmOpts{version: appOpts.helmVersion})
|
||||
setHelmOpt(source, helmOpts{version: appOpts.helmVersion})
|
||||
case "helm-pass-credentials":
|
||||
setHelmOpt(&spec.Source, helmOpts{passCredentials: appOpts.helmPassCredentials})
|
||||
setHelmOpt(source, helmOpts{passCredentials: appOpts.helmPassCredentials})
|
||||
case "helm-set":
|
||||
setHelmOpt(&spec.Source, helmOpts{helmSets: appOpts.helmSets})
|
||||
setHelmOpt(source, helmOpts{helmSets: appOpts.helmSets})
|
||||
case "helm-set-string":
|
||||
setHelmOpt(&spec.Source, helmOpts{helmSetStrings: appOpts.helmSetStrings})
|
||||
setHelmOpt(source, helmOpts{helmSetStrings: appOpts.helmSetStrings})
|
||||
case "helm-set-file":
|
||||
setHelmOpt(&spec.Source, helmOpts{helmSetFiles: appOpts.helmSetFiles})
|
||||
setHelmOpt(source, helmOpts{helmSetFiles: appOpts.helmSetFiles})
|
||||
case "helm-skip-crds":
|
||||
setHelmOpt(&spec.Source, helmOpts{skipCrds: appOpts.helmSkipCrds})
|
||||
setHelmOpt(source, helmOpts{skipCrds: appOpts.helmSkipCrds})
|
||||
case "directory-recurse":
|
||||
if spec.Source.Directory != nil {
|
||||
spec.Source.Directory.Recurse = appOpts.directoryRecurse
|
||||
if source.Directory != nil {
|
||||
source.Directory.Recurse = appOpts.directoryRecurse
|
||||
} else {
|
||||
spec.Source.Directory = &argoappv1.ApplicationSourceDirectory{Recurse: appOpts.directoryRecurse}
|
||||
source.Directory = &argoappv1.ApplicationSourceDirectory{Recurse: appOpts.directoryRecurse}
|
||||
}
|
||||
case "directory-exclude":
|
||||
if spec.Source.Directory != nil {
|
||||
spec.Source.Directory.Exclude = appOpts.directoryExclude
|
||||
if source.Directory != nil {
|
||||
source.Directory.Exclude = appOpts.directoryExclude
|
||||
} else {
|
||||
spec.Source.Directory = &argoappv1.ApplicationSourceDirectory{Exclude: appOpts.directoryExclude}
|
||||
source.Directory = &argoappv1.ApplicationSourceDirectory{Exclude: appOpts.directoryExclude}
|
||||
}
|
||||
case "directory-include":
|
||||
if spec.Source.Directory != nil {
|
||||
spec.Source.Directory.Include = appOpts.directoryInclude
|
||||
if source.Directory != nil {
|
||||
source.Directory.Include = appOpts.directoryInclude
|
||||
} else {
|
||||
spec.Source.Directory = &argoappv1.ApplicationSourceDirectory{Include: appOpts.directoryInclude}
|
||||
source.Directory = &argoappv1.ApplicationSourceDirectory{Include: appOpts.directoryInclude}
|
||||
}
|
||||
case "config-management-plugin":
|
||||
spec.Source.Plugin = &argoappv1.ApplicationSourcePlugin{Name: appOpts.configManagementPlugin}
|
||||
source.Plugin = &argoappv1.ApplicationSourcePlugin{Name: appOpts.configManagementPlugin}
|
||||
case "dest-name":
|
||||
spec.Destination.Name = appOpts.destName
|
||||
case "dest-server":
|
||||
@@ -209,37 +213,37 @@ func SetAppSpecOptions(flags *pflag.FlagSet, spec *argoappv1.ApplicationSpec, ap
|
||||
case "project":
|
||||
spec.Project = appOpts.project
|
||||
case "nameprefix":
|
||||
setKustomizeOpt(&spec.Source, kustomizeOpts{namePrefix: appOpts.namePrefix})
|
||||
setKustomizeOpt(source, kustomizeOpts{namePrefix: appOpts.namePrefix})
|
||||
case "namesuffix":
|
||||
setKustomizeOpt(&spec.Source, kustomizeOpts{nameSuffix: appOpts.nameSuffix})
|
||||
setKustomizeOpt(source, kustomizeOpts{nameSuffix: appOpts.nameSuffix})
|
||||
case "kustomize-image":
|
||||
setKustomizeOpt(&spec.Source, kustomizeOpts{images: appOpts.kustomizeImages})
|
||||
setKustomizeOpt(source, kustomizeOpts{images: appOpts.kustomizeImages})
|
||||
case "kustomize-version":
|
||||
setKustomizeOpt(&spec.Source, kustomizeOpts{version: appOpts.kustomizeVersion})
|
||||
setKustomizeOpt(source, kustomizeOpts{version: appOpts.kustomizeVersion})
|
||||
case "kustomize-common-label":
|
||||
parsedLabels, err := label.Parse(appOpts.kustomizeCommonLabels)
|
||||
errors.CheckError(err)
|
||||
setKustomizeOpt(&spec.Source, kustomizeOpts{commonLabels: parsedLabels})
|
||||
setKustomizeOpt(source, kustomizeOpts{commonLabels: parsedLabels})
|
||||
case "kustomize-common-annotation":
|
||||
parsedAnnotations, err := label.Parse(appOpts.kustomizeCommonAnnotations)
|
||||
errors.CheckError(err)
|
||||
setKustomizeOpt(&spec.Source, kustomizeOpts{commonAnnotations: parsedAnnotations})
|
||||
setKustomizeOpt(source, kustomizeOpts{commonAnnotations: parsedAnnotations})
|
||||
case "kustomize-force-common-label":
|
||||
setKustomizeOpt(&spec.Source, kustomizeOpts{forceCommonLabels: appOpts.kustomizeForceCommonLabels})
|
||||
setKustomizeOpt(source, kustomizeOpts{forceCommonLabels: appOpts.kustomizeForceCommonLabels})
|
||||
case "kustomize-force-common-annotation":
|
||||
setKustomizeOpt(&spec.Source, kustomizeOpts{forceCommonAnnotations: appOpts.kustomizeForceCommonAnnotations})
|
||||
setKustomizeOpt(source, kustomizeOpts{forceCommonAnnotations: appOpts.kustomizeForceCommonAnnotations})
|
||||
case "jsonnet-tla-str":
|
||||
setJsonnetOpt(&spec.Source, appOpts.jsonnetTlaStr, false)
|
||||
setJsonnetOpt(source, appOpts.jsonnetTlaStr, false)
|
||||
case "jsonnet-tla-code":
|
||||
setJsonnetOpt(&spec.Source, appOpts.jsonnetTlaCode, true)
|
||||
setJsonnetOpt(source, appOpts.jsonnetTlaCode, true)
|
||||
case "jsonnet-ext-var-str":
|
||||
setJsonnetOptExtVar(&spec.Source, appOpts.jsonnetExtVarStr, false)
|
||||
setJsonnetOptExtVar(source, appOpts.jsonnetExtVarStr, false)
|
||||
case "jsonnet-ext-var-code":
|
||||
setJsonnetOptExtVar(&spec.Source, appOpts.jsonnetExtVarCode, true)
|
||||
setJsonnetOptExtVar(source, appOpts.jsonnetExtVarCode, true)
|
||||
case "jsonnet-libs":
|
||||
setJsonnetOptLibs(&spec.Source, appOpts.jsonnetLibs)
|
||||
setJsonnetOptLibs(source, appOpts.jsonnetLibs)
|
||||
case "plugin-env":
|
||||
setPluginOptEnvs(&spec.Source, appOpts.pluginEnvs)
|
||||
setPluginOptEnvs(source, appOpts.pluginEnvs)
|
||||
case "sync-policy":
|
||||
switch appOpts.syncPolicy {
|
||||
case "none":
|
||||
@@ -296,6 +300,7 @@ func SetAppSpecOptions(flags *pflag.FlagSet, spec *argoappv1.ApplicationSpec, ap
|
||||
log.Fatalf("Invalid sync-retry-limit [%d]", appOpts.retryLimit)
|
||||
}
|
||||
}
|
||||
spec.Source = source
|
||||
})
|
||||
if flags.Changed("auto-prune") {
|
||||
if spec.SyncPolicy == nil || spec.SyncPolicy.Automated == nil {
|
||||
@@ -473,8 +478,9 @@ func SetParameterOverrides(app *argoappv1.Application, parameters []string) {
|
||||
if len(parameters) == 0 {
|
||||
return
|
||||
}
|
||||
source := app.Spec.GetSource()
|
||||
var sourceType argoappv1.ApplicationSourceType
|
||||
if st, _ := app.Spec.Source.ExplicitType(); st != nil {
|
||||
if st, _ := source.ExplicitType(); st != nil {
|
||||
sourceType = *st
|
||||
} else if app.Status.SourceType != "" {
|
||||
sourceType = app.Status.SourceType
|
||||
@@ -486,8 +492,8 @@ func SetParameterOverrides(app *argoappv1.Application, parameters []string) {
|
||||
|
||||
switch sourceType {
|
||||
case argoappv1.ApplicationSourceTypeHelm:
|
||||
if app.Spec.Source.Helm == nil {
|
||||
app.Spec.Source.Helm = &argoappv1.ApplicationSourceHelm{}
|
||||
if source.Helm == nil {
|
||||
source.Helm = &argoappv1.ApplicationSourceHelm{}
|
||||
}
|
||||
for _, p := range parameters {
|
||||
newParam, err := argoappv1.NewHelmParameter(p, false)
|
||||
@@ -495,7 +501,7 @@ func SetParameterOverrides(app *argoappv1.Application, parameters []string) {
|
||||
log.Error(err)
|
||||
continue
|
||||
}
|
||||
app.Spec.Source.Helm.AddParameter(*newParam)
|
||||
source.Helm.AddParameter(*newParam)
|
||||
}
|
||||
default:
|
||||
log.Fatalf("Parameters can only be set against Helm applications")
|
||||
@@ -580,6 +586,9 @@ func constructAppsBaseOnName(appName string, labels, annotations, args []string,
|
||||
Name: appName,
|
||||
Namespace: appNs,
|
||||
},
|
||||
Spec: argoappv1.ApplicationSpec{
|
||||
Source: &argoappv1.ApplicationSource{},
|
||||
},
|
||||
}
|
||||
SetAppSpecOptions(flags, &app.Spec, &appOpts)
|
||||
SetParameterOverrides(app, appOpts.Parameters)
|
||||
|
||||
@@ -149,7 +149,9 @@ func (f *appOptionsFixture) SetFlag(key, value string) error {
|
||||
|
||||
func newAppOptionsFixture() *appOptionsFixture {
|
||||
fixture := &appOptionsFixture{
|
||||
spec: &v1alpha1.ApplicationSpec{},
|
||||
spec: &v1alpha1.ApplicationSpec{
|
||||
Source: &v1alpha1.ApplicationSource{},
|
||||
},
|
||||
command: &cobra.Command{},
|
||||
options: &AppOptions{},
|
||||
}
|
||||
|
||||
@@ -61,6 +61,6 @@ func readAppset(yml []byte, appsets *[]*argoprojiov1alpha1.ApplicationSet) error
|
||||
*appsets = append(*appsets, &appset)
|
||||
|
||||
}
|
||||
|
||||
return fmt.Errorf("error reading app set: %w", err)
|
||||
// we reach here if there is no error found while reading the Application Set
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -22,6 +22,8 @@ type RepoOptions struct {
|
||||
GithubAppPrivateKeyPath string
|
||||
GitHubAppEnterpriseBaseURL string
|
||||
Proxy string
|
||||
GCPServiceAccountKeyPath string
|
||||
ForceHttpBasicAuth bool
|
||||
}
|
||||
|
||||
func AddRepoFlags(command *cobra.Command, opts *RepoOptions) {
|
||||
@@ -42,4 +44,6 @@ func AddRepoFlags(command *cobra.Command, opts *RepoOptions) {
|
||||
command.Flags().StringVar(&opts.GithubAppPrivateKeyPath, "github-app-private-key-path", "", "private key of the GitHub Application")
|
||||
command.Flags().StringVar(&opts.GitHubAppEnterpriseBaseURL, "github-app-enterprise-base-url", "", "base url to use when using GitHub Enterprise (e.g. https://ghe.example.com/api/v3")
|
||||
command.Flags().StringVar(&opts.Proxy, "proxy", "", "use proxy to access repository")
|
||||
command.Flags().StringVar(&opts.GCPServiceAccountKeyPath, "gcp-service-account-key-path", "", "service account key for the Google Cloud Platform")
|
||||
command.Flags().BoolVar(&opts.ForceHttpBasicAuth, "force-http-basic-auth", false, "whether to force use of basic auth when connecting repository via HTTP")
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ package apiclient
|
||||
import (
|
||||
context "context"
|
||||
fmt "fmt"
|
||||
apiclient "github.com/argoproj/argo-cd/v2/reposerver/apiclient"
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
grpc "google.golang.org/grpc"
|
||||
codes "google.golang.org/grpc/codes"
|
||||
@@ -317,6 +318,7 @@ func (m *ManifestResponse) GetSourceType() string {
|
||||
|
||||
type RepositoryResponse struct {
|
||||
IsSupported bool `protobuf:"varint,1,opt,name=isSupported,proto3" json:"isSupported,omitempty"`
|
||||
IsDiscoveryEnabled bool `protobuf:"varint,2,opt,name=isDiscoveryEnabled,proto3" json:"isDiscoveryEnabled,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
@@ -362,6 +364,62 @@ func (m *RepositoryResponse) GetIsSupported() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *RepositoryResponse) GetIsDiscoveryEnabled() bool {
|
||||
if m != nil {
|
||||
return m.IsDiscoveryEnabled
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ParametersAnnouncementResponse contains a list of announcements. This list represents all the parameters which a CMP
|
||||
// is able to accept.
|
||||
type ParametersAnnouncementResponse struct {
|
||||
ParameterAnnouncements []*apiclient.ParameterAnnouncement `protobuf:"bytes,1,rep,name=parameterAnnouncements,proto3" json:"parameterAnnouncements,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ParametersAnnouncementResponse) Reset() { *m = ParametersAnnouncementResponse{} }
|
||||
func (m *ParametersAnnouncementResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*ParametersAnnouncementResponse) ProtoMessage() {}
|
||||
func (*ParametersAnnouncementResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b21875a7079a06ed, []int{5}
|
||||
}
|
||||
func (m *ParametersAnnouncementResponse) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *ParametersAnnouncementResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_ParametersAnnouncementResponse.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalToSizedBuffer(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *ParametersAnnouncementResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ParametersAnnouncementResponse.Merge(m, src)
|
||||
}
|
||||
func (m *ParametersAnnouncementResponse) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *ParametersAnnouncementResponse) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ParametersAnnouncementResponse.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ParametersAnnouncementResponse proto.InternalMessageInfo
|
||||
|
||||
func (m *ParametersAnnouncementResponse) GetParameterAnnouncements() []*apiclient.ParameterAnnouncement {
|
||||
if m != nil {
|
||||
return m.ParameterAnnouncements
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type File struct {
|
||||
Chunk []byte `protobuf:"bytes,1,opt,name=chunk,proto3" json:"chunk,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
@@ -373,7 +431,7 @@ func (m *File) Reset() { *m = File{} }
|
||||
func (m *File) String() string { return proto.CompactTextString(m) }
|
||||
func (*File) ProtoMessage() {}
|
||||
func (*File) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b21875a7079a06ed, []int{5}
|
||||
return fileDescriptor_b21875a7079a06ed, []int{6}
|
||||
}
|
||||
func (m *File) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -415,44 +473,50 @@ func init() {
|
||||
proto.RegisterType((*EnvEntry)(nil), "plugin.EnvEntry")
|
||||
proto.RegisterType((*ManifestResponse)(nil), "plugin.ManifestResponse")
|
||||
proto.RegisterType((*RepositoryResponse)(nil), "plugin.RepositoryResponse")
|
||||
proto.RegisterType((*ParametersAnnouncementResponse)(nil), "plugin.ParametersAnnouncementResponse")
|
||||
proto.RegisterType((*File)(nil), "plugin.File")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("cmpserver/plugin/plugin.proto", fileDescriptor_b21875a7079a06ed) }
|
||||
|
||||
var fileDescriptor_b21875a7079a06ed = []byte{
|
||||
// 483 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x53, 0xd1, 0x8e, 0x12, 0x3d,
|
||||
0x14, 0xa6, 0x3f, 0xec, 0x2e, 0x1c, 0x36, 0xf9, 0x49, 0x63, 0xe2, 0x84, 0xec, 0x22, 0x99, 0x2b,
|
||||
0x6e, 0x84, 0x04, 0x8d, 0x77, 0x26, 0xba, 0x66, 0x75, 0xa3, 0xc1, 0x90, 0xe2, 0x95, 0x77, 0xdd,
|
||||
0x72, 0x80, 0xba, 0x33, 0x6d, 0x6d, 0x3b, 0x93, 0xe0, 0x95, 0x6f, 0xe3, 0x2b, 0xf8, 0x08, 0x5e,
|
||||
0xfa, 0x08, 0x86, 0x27, 0x31, 0x53, 0x66, 0x18, 0xe2, 0x46, 0xaf, 0x38, 0xdf, 0x77, 0xce, 0xf9,
|
||||
0xf8, 0xbe, 0x4e, 0x0b, 0x97, 0x22, 0x35, 0x0e, 0x6d, 0x8e, 0x76, 0x62, 0x92, 0x6c, 0x2d, 0x55,
|
||||
0xf9, 0x33, 0x36, 0x56, 0x7b, 0x4d, 0x4f, 0xf7, 0x28, 0xfe, 0x4a, 0xa0, 0xf7, 0xd2, 0x98, 0x85,
|
||||
0xb7, 0xc8, 0x53, 0x86, 0x9f, 0x33, 0x74, 0x9e, 0x3e, 0x87, 0x76, 0x8a, 0x9e, 0x2f, 0xb9, 0xe7,
|
||||
0x11, 0x19, 0x92, 0x51, 0x77, 0xfa, 0x68, 0x5c, 0x6e, 0xcf, 0xb8, 0x92, 0x2b, 0x74, 0xbe, 0x1c,
|
||||
0x9d, 0x95, 0x63, 0x37, 0x0d, 0x76, 0x58, 0xa1, 0x31, 0xb4, 0x56, 0x32, 0xc1, 0xe8, 0xbf, 0xb0,
|
||||
0x7a, 0x5e, 0xad, 0xbe, 0x96, 0x09, 0xde, 0x34, 0x58, 0xe8, 0x5d, 0x75, 0xe0, 0xcc, 0xee, 0x25,
|
||||
0xe2, 0x6f, 0x04, 0x1e, 0xfe, 0x45, 0x96, 0x46, 0x70, 0xc6, 0x8d, 0x79, 0xcf, 0x53, 0x0c, 0x46,
|
||||
0x3a, 0xac, 0x82, 0x74, 0x00, 0xc0, 0x8d, 0x61, 0x98, 0xcc, 0xb9, 0xdf, 0x84, 0xbf, 0xea, 0xb0,
|
||||
0x23, 0x86, 0xf6, 0xa1, 0x2d, 0x36, 0x28, 0xee, 0x5c, 0x96, 0x46, 0xcd, 0xd0, 0x3d, 0x60, 0x4a,
|
||||
0xa1, 0xe5, 0xe4, 0x17, 0x8c, 0x5a, 0x43, 0x32, 0x6a, 0xb2, 0x50, 0xd3, 0x18, 0x9a, 0xa8, 0xf2,
|
||||
0xe8, 0x64, 0xd8, 0x1c, 0x75, 0xa7, 0xbd, 0xca, 0xf3, 0xb5, 0xca, 0xaf, 0x95, 0xb7, 0x5b, 0x56,
|
||||
0x34, 0xe3, 0xa7, 0xd0, 0xae, 0x88, 0x42, 0x43, 0xd5, 0xb6, 0x42, 0x4d, 0x1f, 0xc0, 0x49, 0xce,
|
||||
0x93, 0x0c, 0x4b, 0x3b, 0x7b, 0x10, 0xcf, 0xa1, 0x57, 0xc7, 0x73, 0x46, 0x2b, 0x87, 0xf4, 0x02,
|
||||
0x3a, 0x69, 0xc9, 0xb9, 0x88, 0x0c, 0x9b, 0xa3, 0x0e, 0xab, 0x89, 0x22, 0x9b, 0xd3, 0x99, 0x15,
|
||||
0xf8, 0x61, 0x6b, 0x2a, 0xb1, 0x23, 0x26, 0x7e, 0x06, 0x94, 0xa1, 0xd1, 0x4e, 0x7a, 0x6d, 0xb7,
|
||||
0x07, 0xcd, 0x21, 0x74, 0xa5, 0x5b, 0x64, 0xc6, 0x68, 0xeb, 0x71, 0x19, 0x8c, 0xb5, 0xd9, 0x31,
|
||||
0x15, 0x5f, 0x40, 0xab, 0xf8, 0x08, 0x85, 0x4f, 0xb1, 0xc9, 0xd4, 0x5d, 0x98, 0x39, 0x67, 0x7b,
|
||||
0x30, 0xfd, 0x4e, 0xe0, 0xf2, 0x95, 0x56, 0x2b, 0xb9, 0x9e, 0x71, 0xc5, 0xd7, 0x98, 0xa2, 0xf2,
|
||||
0xf3, 0x70, 0x0c, 0x0b, 0xb4, 0xb9, 0x14, 0x48, 0xdf, 0x42, 0xef, 0x0d, 0x2a, 0xb4, 0xdc, 0x63,
|
||||
0x95, 0x88, 0x46, 0xd5, 0x51, 0xfd, 0x79, 0x8b, 0xfa, 0xd1, 0xfd, 0x3b, 0xb3, 0x77, 0x1a, 0x37,
|
||||
0x46, 0x84, 0xbe, 0x83, 0xff, 0x67, 0xdc, 0x8b, 0x4d, 0x1d, 0xe4, 0x1f, 0x52, 0xfd, 0xaa, 0x73,
|
||||
0x3f, 0x76, 0x21, 0x76, 0xf5, 0xe2, 0xc7, 0x6e, 0x40, 0x7e, 0xee, 0x06, 0xe4, 0xd7, 0x6e, 0x40,
|
||||
0x3e, 0x4e, 0xd7, 0xd2, 0x6f, 0xb2, 0xdb, 0xb1, 0xd0, 0xe9, 0x84, 0xdb, 0xb5, 0x36, 0x56, 0x7f,
|
||||
0x0a, 0xc5, 0x63, 0xb1, 0x9c, 0xe4, 0xd3, 0x49, 0xfd, 0x32, 0xb8, 0x91, 0x22, 0x91, 0xa8, 0xfc,
|
||||
0xed, 0x69, 0x78, 0x16, 0x4f, 0x7e, 0x07, 0x00, 0x00, 0xff, 0xff, 0x83, 0x01, 0x5e, 0x48, 0x37,
|
||||
0x03, 0x00, 0x00,
|
||||
// 576 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x94, 0xdd, 0x6e, 0x12, 0x4f,
|
||||
0x14, 0xc0, 0xbb, 0x85, 0xb6, 0x70, 0x68, 0xf2, 0x27, 0x93, 0x7f, 0x74, 0x25, 0x2d, 0xe2, 0x5e,
|
||||
0x18, 0x6e, 0x84, 0x04, 0xbd, 0x35, 0xb1, 0x55, 0x6c, 0xa3, 0xc1, 0x90, 0xa9, 0x37, 0x7a, 0x37,
|
||||
0x1d, 0x0e, 0x30, 0x76, 0x77, 0x66, 0x9c, 0x99, 0xdd, 0x04, 0xbd, 0xf1, 0x3d, 0x7c, 0x00, 0x5f,
|
||||
0xc5, 0x4b, 0x1f, 0xc1, 0xf4, 0x49, 0x0c, 0xb3, 0xbb, 0x40, 0x6c, 0x8b, 0x57, 0x7b, 0x3e, 0x7f,
|
||||
0x7b, 0xbe, 0x32, 0x70, 0xcc, 0x13, 0x6d, 0xd1, 0x64, 0x68, 0xfa, 0x3a, 0x4e, 0x67, 0x42, 0x16,
|
||||
0x9f, 0x9e, 0x36, 0xca, 0x29, 0xb2, 0x9f, 0x6b, 0xad, 0xe1, 0x4c, 0xb8, 0x79, 0x7a, 0xd9, 0xe3,
|
||||
0x2a, 0xe9, 0x33, 0x33, 0x53, 0xda, 0xa8, 0x4f, 0x5e, 0x78, 0xc2, 0x27, 0xfd, 0x6c, 0xd0, 0x37,
|
||||
0xa8, 0x55, 0x81, 0xf1, 0xa2, 0x70, 0xca, 0x2c, 0x36, 0xc4, 0x1c, 0x17, 0x7d, 0x0b, 0xa0, 0x79,
|
||||
0xa2, 0xf5, 0x85, 0x33, 0xc8, 0x12, 0x8a, 0x9f, 0x53, 0xb4, 0x8e, 0x3c, 0x87, 0x5a, 0x82, 0x8e,
|
||||
0x4d, 0x98, 0x63, 0x61, 0xd0, 0x09, 0xba, 0x8d, 0xc1, 0xc3, 0x5e, 0x51, 0xc4, 0x88, 0x49, 0x31,
|
||||
0x45, 0xeb, 0x8a, 0xd0, 0x51, 0x11, 0x76, 0xbe, 0x43, 0x57, 0x29, 0x24, 0x82, 0xea, 0x54, 0xc4,
|
||||
0x18, 0xee, 0xfa, 0xd4, 0xc3, 0x32, 0xf5, 0xb5, 0x88, 0xf1, 0x7c, 0x87, 0x7a, 0xdf, 0x69, 0x1d,
|
||||
0x0e, 0x4c, 0x8e, 0x88, 0x7e, 0x04, 0x70, 0xff, 0x0e, 0x2c, 0x09, 0xe1, 0x80, 0x69, 0xfd, 0x8e,
|
||||
0x25, 0xe8, 0x0b, 0xa9, 0xd3, 0x52, 0x25, 0x6d, 0x00, 0xa6, 0x35, 0xc5, 0x78, 0xcc, 0xdc, 0xdc,
|
||||
0xff, 0xaa, 0x4e, 0x37, 0x2c, 0xa4, 0x05, 0x35, 0x3e, 0x47, 0x7e, 0x65, 0xd3, 0x24, 0xac, 0x78,
|
||||
0xef, 0x4a, 0x27, 0x04, 0xaa, 0x56, 0x7c, 0xc1, 0xb0, 0xda, 0x09, 0xba, 0x15, 0xea, 0x65, 0x12,
|
||||
0x41, 0x05, 0x65, 0x16, 0xee, 0x75, 0x2a, 0xdd, 0xc6, 0xa0, 0x59, 0xd6, 0x3c, 0x94, 0xd9, 0x50,
|
||||
0x3a, 0xb3, 0xa0, 0x4b, 0x67, 0xf4, 0x0c, 0x6a, 0xa5, 0x61, 0xc9, 0x90, 0xeb, 0xb2, 0xbc, 0x4c,
|
||||
0xfe, 0x87, 0xbd, 0x8c, 0xc5, 0x29, 0x16, 0xe5, 0xe4, 0x4a, 0x34, 0x86, 0xe6, 0xba, 0x3d, 0xab,
|
||||
0x95, 0xb4, 0x48, 0x8e, 0xa0, 0x9e, 0x14, 0x36, 0x1b, 0x06, 0x9d, 0x4a, 0xb7, 0x4e, 0xd7, 0x86,
|
||||
0x65, 0x6f, 0x56, 0xa5, 0x86, 0xe3, 0xfb, 0x85, 0x2e, 0x61, 0x1b, 0x96, 0x68, 0x0a, 0x84, 0xae,
|
||||
0x16, 0xb9, 0x62, 0x76, 0xa0, 0x21, 0xec, 0x45, 0xaa, 0xb5, 0x32, 0x0e, 0x27, 0xbe, 0xb0, 0x1a,
|
||||
0xdd, 0x34, 0x91, 0x1e, 0x10, 0x61, 0x5f, 0x09, 0xcb, 0x55, 0x86, 0x66, 0x31, 0x94, 0xec, 0x32,
|
||||
0xc6, 0x89, 0xe7, 0xd7, 0xe8, 0x2d, 0x9e, 0xe8, 0x2b, 0xb4, 0xc7, 0xcc, 0xb0, 0x04, 0x1d, 0x1a,
|
||||
0x7b, 0x22, 0xa5, 0x4a, 0x25, 0xc7, 0x04, 0xe5, 0xba, 0x8f, 0x0f, 0x70, 0x4f, 0x97, 0x11, 0x9b,
|
||||
0x01, 0x79, 0x53, 0x8d, 0xc1, 0xa3, 0xde, 0xc6, 0xc5, 0x8d, 0x6f, 0x8b, 0xa4, 0x77, 0x00, 0xa2,
|
||||
0x23, 0xa8, 0x2e, 0x2f, 0x66, 0x39, 0x54, 0x3e, 0x4f, 0xe5, 0x95, 0x6f, 0xe8, 0x90, 0xe6, 0xca,
|
||||
0xe0, 0xfb, 0x2e, 0x1c, 0xbf, 0x54, 0x72, 0x2a, 0x66, 0x23, 0x26, 0xd9, 0xcc, 0xe7, 0x8c, 0xfd,
|
||||
0xce, 0x2e, 0xd0, 0x64, 0x82, 0x23, 0x79, 0x03, 0xcd, 0x33, 0x94, 0x68, 0x98, 0xc3, 0x72, 0xfc,
|
||||
0x24, 0x2c, 0xf7, 0xfa, 0xf7, 0xc9, 0xb7, 0xc2, 0x9b, 0x07, 0x9e, 0xb7, 0x18, 0xed, 0x74, 0x03,
|
||||
0xf2, 0x16, 0xfe, 0x1b, 0x31, 0xc7, 0xe7, 0xeb, 0xa9, 0x6f, 0x41, 0xb5, 0x4a, 0xcf, 0xcd, 0x1d,
|
||||
0x79, 0x18, 0x83, 0x07, 0x67, 0xe8, 0x6e, 0x1f, 0xec, 0x16, 0xec, 0xe3, 0xd2, 0xb3, 0x7d, 0x25,
|
||||
0xcb, 0x5f, 0x9c, 0xbe, 0xf8, 0x79, 0xdd, 0x0e, 0x7e, 0x5d, 0xb7, 0x83, 0xdf, 0xd7, 0xed, 0xe0,
|
||||
0xe3, 0xe0, 0x1f, 0x4f, 0xc5, 0xfa, 0xc1, 0x61, 0x5a, 0xf0, 0x58, 0xa0, 0x74, 0x97, 0xfb, 0xfe,
|
||||
0x79, 0x78, 0xfa, 0x27, 0x00, 0x00, 0xff, 0xff, 0x23, 0x88, 0x8e, 0xd3, 0x8e, 0x04, 0x00, 0x00,
|
||||
}
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
@@ -472,6 +536,8 @@ type ConfigManagementPluginServiceClient interface {
|
||||
GenerateManifest(ctx context.Context, opts ...grpc.CallOption) (ConfigManagementPluginService_GenerateManifestClient, error)
|
||||
// MatchRepository returns whether or not the given application is supported by the plugin
|
||||
MatchRepository(ctx context.Context, opts ...grpc.CallOption) (ConfigManagementPluginService_MatchRepositoryClient, error)
|
||||
// GetParametersAnnouncement gets a list of parameter announcements for the given app
|
||||
GetParametersAnnouncement(ctx context.Context, opts ...grpc.CallOption) (ConfigManagementPluginService_GetParametersAnnouncementClient, error)
|
||||
}
|
||||
|
||||
type configManagementPluginServiceClient struct {
|
||||
@@ -550,6 +616,40 @@ func (x *configManagementPluginServiceMatchRepositoryClient) CloseAndRecv() (*Re
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (c *configManagementPluginServiceClient) GetParametersAnnouncement(ctx context.Context, opts ...grpc.CallOption) (ConfigManagementPluginService_GetParametersAnnouncementClient, error) {
|
||||
stream, err := c.cc.NewStream(ctx, &_ConfigManagementPluginService_serviceDesc.Streams[2], "/plugin.ConfigManagementPluginService/GetParametersAnnouncement", opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
x := &configManagementPluginServiceGetParametersAnnouncementClient{stream}
|
||||
return x, nil
|
||||
}
|
||||
|
||||
type ConfigManagementPluginService_GetParametersAnnouncementClient interface {
|
||||
Send(*AppStreamRequest) error
|
||||
CloseAndRecv() (*ParametersAnnouncementResponse, error)
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
type configManagementPluginServiceGetParametersAnnouncementClient struct {
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
func (x *configManagementPluginServiceGetParametersAnnouncementClient) Send(m *AppStreamRequest) error {
|
||||
return x.ClientStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func (x *configManagementPluginServiceGetParametersAnnouncementClient) CloseAndRecv() (*ParametersAnnouncementResponse, error) {
|
||||
if err := x.ClientStream.CloseSend(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m := new(ParametersAnnouncementResponse)
|
||||
if err := x.ClientStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// ConfigManagementPluginServiceServer is the server API for ConfigManagementPluginService service.
|
||||
type ConfigManagementPluginServiceServer interface {
|
||||
// GenerateManifests receive a stream containing a tgz archive with all required files necessary
|
||||
@@ -557,6 +657,8 @@ type ConfigManagementPluginServiceServer interface {
|
||||
GenerateManifest(ConfigManagementPluginService_GenerateManifestServer) error
|
||||
// MatchRepository returns whether or not the given application is supported by the plugin
|
||||
MatchRepository(ConfigManagementPluginService_MatchRepositoryServer) error
|
||||
// GetParametersAnnouncement gets a list of parameter announcements for the given app
|
||||
GetParametersAnnouncement(ConfigManagementPluginService_GetParametersAnnouncementServer) error
|
||||
}
|
||||
|
||||
// UnimplementedConfigManagementPluginServiceServer can be embedded to have forward compatible implementations.
|
||||
@@ -569,6 +671,9 @@ func (*UnimplementedConfigManagementPluginServiceServer) GenerateManifest(srv Co
|
||||
func (*UnimplementedConfigManagementPluginServiceServer) MatchRepository(srv ConfigManagementPluginService_MatchRepositoryServer) error {
|
||||
return status.Errorf(codes.Unimplemented, "method MatchRepository not implemented")
|
||||
}
|
||||
func (*UnimplementedConfigManagementPluginServiceServer) GetParametersAnnouncement(srv ConfigManagementPluginService_GetParametersAnnouncementServer) error {
|
||||
return status.Errorf(codes.Unimplemented, "method GetParametersAnnouncement not implemented")
|
||||
}
|
||||
|
||||
func RegisterConfigManagementPluginServiceServer(s *grpc.Server, srv ConfigManagementPluginServiceServer) {
|
||||
s.RegisterService(&_ConfigManagementPluginService_serviceDesc, srv)
|
||||
@@ -626,6 +731,32 @@ func (x *configManagementPluginServiceMatchRepositoryServer) Recv() (*AppStreamR
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func _ConfigManagementPluginService_GetParametersAnnouncement_Handler(srv interface{}, stream grpc.ServerStream) error {
|
||||
return srv.(ConfigManagementPluginServiceServer).GetParametersAnnouncement(&configManagementPluginServiceGetParametersAnnouncementServer{stream})
|
||||
}
|
||||
|
||||
type ConfigManagementPluginService_GetParametersAnnouncementServer interface {
|
||||
SendAndClose(*ParametersAnnouncementResponse) error
|
||||
Recv() (*AppStreamRequest, error)
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
type configManagementPluginServiceGetParametersAnnouncementServer struct {
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
func (x *configManagementPluginServiceGetParametersAnnouncementServer) SendAndClose(m *ParametersAnnouncementResponse) error {
|
||||
return x.ServerStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func (x *configManagementPluginServiceGetParametersAnnouncementServer) Recv() (*AppStreamRequest, error) {
|
||||
m := new(AppStreamRequest)
|
||||
if err := x.ServerStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
var _ConfigManagementPluginService_serviceDesc = grpc.ServiceDesc{
|
||||
ServiceName: "plugin.ConfigManagementPluginService",
|
||||
HandlerType: (*ConfigManagementPluginServiceServer)(nil),
|
||||
@@ -641,6 +772,11 @@ var _ConfigManagementPluginService_serviceDesc = grpc.ServiceDesc{
|
||||
Handler: _ConfigManagementPluginService_MatchRepository_Handler,
|
||||
ClientStreams: true,
|
||||
},
|
||||
{
|
||||
StreamName: "GetParametersAnnouncement",
|
||||
Handler: _ConfigManagementPluginService_GetParametersAnnouncement_Handler,
|
||||
ClientStreams: true,
|
||||
},
|
||||
},
|
||||
Metadata: "cmpserver/plugin/plugin.proto",
|
||||
}
|
||||
@@ -898,6 +1034,16 @@ func (m *RepositoryResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i -= len(m.XXX_unrecognized)
|
||||
copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
if m.IsDiscoveryEnabled {
|
||||
i--
|
||||
if m.IsDiscoveryEnabled {
|
||||
dAtA[i] = 1
|
||||
} else {
|
||||
dAtA[i] = 0
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0x10
|
||||
}
|
||||
if m.IsSupported {
|
||||
i--
|
||||
if m.IsSupported {
|
||||
@@ -911,6 +1057,47 @@ func (m *RepositoryResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func (m *ParametersAnnouncementResponse) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *ParametersAnnouncementResponse) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *ParametersAnnouncementResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.XXX_unrecognized != nil {
|
||||
i -= len(m.XXX_unrecognized)
|
||||
copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
if len(m.ParameterAnnouncements) > 0 {
|
||||
for iNdEx := len(m.ParameterAnnouncements) - 1; iNdEx >= 0; iNdEx-- {
|
||||
{
|
||||
size, err := m.ParameterAnnouncements[iNdEx].MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintPlugin(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0xa
|
||||
}
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func (m *File) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
@@ -1079,6 +1266,27 @@ func (m *RepositoryResponse) Size() (n int) {
|
||||
if m.IsSupported {
|
||||
n += 2
|
||||
}
|
||||
if m.IsDiscoveryEnabled {
|
||||
n += 2
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *ParametersAnnouncementResponse) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.ParameterAnnouncements) > 0 {
|
||||
for _, e := range m.ParameterAnnouncements {
|
||||
l = e.Size()
|
||||
n += 1 + l + sovPlugin(uint64(l))
|
||||
}
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
@@ -1707,6 +1915,111 @@ func (m *RepositoryResponse) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
}
|
||||
m.IsSupported = bool(v != 0)
|
||||
case 2:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field IsDiscoveryEnabled", wireType)
|
||||
}
|
||||
var v int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowPlugin
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
v |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
m.IsDiscoveryEnabled = bool(v != 0)
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipPlugin(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthPlugin
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *ParametersAnnouncementResponse) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowPlugin
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: ParametersAnnouncementResponse: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: ParametersAnnouncementResponse: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field ParameterAnnouncements", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowPlugin
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthPlugin
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthPlugin
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.ParameterAnnouncements = append(m.ParameterAnnouncements, &apiclient.ParameterAnnouncement{})
|
||||
if err := m.ParameterAnnouncements[len(m.ParameterAnnouncements)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipPlugin(dAtA[iNdEx:])
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/common"
|
||||
"github.com/argoproj/argo-cd/v2/reposerver/apiclient"
|
||||
configUtil "github.com/argoproj/argo-cd/v2/util/config"
|
||||
)
|
||||
|
||||
@@ -21,10 +22,11 @@ type PluginConfig struct {
|
||||
}
|
||||
|
||||
type PluginConfigSpec struct {
|
||||
Version string `json:"version"`
|
||||
Init Command `json:"init,omitempty"`
|
||||
Generate Command `json:"generate"`
|
||||
Discover Discover `json:"discover"`
|
||||
Version string `json:"version"`
|
||||
Init Command `json:"init,omitempty"`
|
||||
Generate Command `json:"generate"`
|
||||
Discover Discover `json:"discover"`
|
||||
Parameters Parameters `yaml:"parameters"`
|
||||
}
|
||||
|
||||
//Discover holds find and fileName
|
||||
@@ -45,6 +47,17 @@ type Find struct {
|
||||
Glob string `json:"glob"`
|
||||
}
|
||||
|
||||
// Parameters holds static and dynamic configurations
|
||||
type Parameters struct {
|
||||
Static []*apiclient.ParameterAnnouncement `yaml:"static"`
|
||||
Dynamic Command `yaml:"dynamic"`
|
||||
}
|
||||
|
||||
// Dynamic hold the dynamic announcements for CMP's
|
||||
type Dynamic struct {
|
||||
Command
|
||||
}
|
||||
|
||||
func ReadPluginConfig(filePath string) (*PluginConfig, error) {
|
||||
path := fmt.Sprintf("%s/%s", strings.TrimRight(filePath, "/"), common.PluginConfigFileName)
|
||||
|
||||
@@ -71,9 +84,7 @@ func ValidatePluginConfig(config PluginConfig) error {
|
||||
if len(config.Spec.Generate.Command) == 0 {
|
||||
return fmt.Errorf("invalid plugin configuration file. spec.generate command should be non-empty")
|
||||
}
|
||||
if config.Spec.Discover.Find.Glob == "" && len(config.Spec.Discover.Find.Command.Command) == 0 && config.Spec.Discover.FileName == "" {
|
||||
return fmt.Errorf("invalid plugin configuration file. atleast one of discover.find.command or discover.find.glob or discover.fineName should be non-empty")
|
||||
}
|
||||
// discovery field is optional as apps can now specify plugin names directly
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@ package plugin
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
@@ -13,7 +14,9 @@ import (
|
||||
|
||||
"github.com/argoproj/pkg/rand"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/cmpserver/apiclient"
|
||||
"github.com/argoproj/argo-cd/v2/common"
|
||||
repoclient "github.com/argoproj/argo-cd/v2/reposerver/apiclient"
|
||||
"github.com/argoproj/argo-cd/v2/util/buffered_context"
|
||||
"github.com/argoproj/argo-cd/v2/util/cmp"
|
||||
"github.com/argoproj/argo-cd/v2/util/io/files"
|
||||
@@ -21,8 +24,6 @@ import (
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
"github.com/mattn/go-zglob"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/cmpserver/apiclient"
|
||||
)
|
||||
|
||||
// cmpTimeoutBuffer is the amount of time before the request deadline to timeout server-side work. It makes sure there's
|
||||
@@ -45,15 +46,14 @@ func NewService(initConstants CMPServerInitConstants) *Service {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) Init() error {
|
||||
workDir := common.GetCMPWorkDir()
|
||||
func (s *Service) Init(workDir string) error {
|
||||
err := os.RemoveAll(workDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error removing workdir %q: %s", workDir, err)
|
||||
return fmt.Errorf("error removing workdir %q: %w", workDir, err)
|
||||
}
|
||||
err = os.MkdirAll(workDir, 0700)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating workdir %q: %s", workDir, err)
|
||||
return fmt.Errorf("error creating workdir %q: %w", workDir, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -143,24 +143,50 @@ func environ(envVars []*apiclient.EnvEntry) []string {
|
||||
return environ
|
||||
}
|
||||
|
||||
// getTempDirMustCleanup creates a temporary directory and returns a cleanup function.
|
||||
func getTempDirMustCleanup(baseDir string) (workDir string, cleanup func(), err error) {
|
||||
workDir, err = files.CreateTempDir(baseDir)
|
||||
if err != nil {
|
||||
return "", nil, fmt.Errorf("error creating temp dir: %w", err)
|
||||
}
|
||||
cleanup = func() {
|
||||
if err := os.RemoveAll(workDir); err != nil {
|
||||
log.WithFields(map[string]interface{}{
|
||||
common.SecurityField: common.SecurityHigh,
|
||||
common.SecurityCWEField: 459,
|
||||
}).Errorf("Failed to clean up temp directory: %s", err)
|
||||
}
|
||||
}
|
||||
return workDir, cleanup, nil
|
||||
}
|
||||
|
||||
type Stream interface {
|
||||
Recv() (*apiclient.AppStreamRequest, error)
|
||||
Context() context.Context
|
||||
}
|
||||
|
||||
type GenerateManifestStream interface {
|
||||
Stream
|
||||
SendAndClose(response *apiclient.ManifestResponse) error
|
||||
}
|
||||
|
||||
// GenerateManifest runs generate command from plugin config file and returns generated manifest files
|
||||
func (s *Service) GenerateManifest(stream apiclient.ConfigManagementPluginService_GenerateManifestServer) error {
|
||||
return s.generateManifestGeneric(stream)
|
||||
}
|
||||
|
||||
func (s *Service) generateManifestGeneric(stream GenerateManifestStream) error {
|
||||
ctx, cancel := buffered_context.WithEarlierDeadline(stream.Context(), cmpTimeoutBuffer)
|
||||
defer cancel()
|
||||
workDir, err := files.CreateTempDir(common.GetCMPWorkDir())
|
||||
workDir, cleanup, err := getTempDirMustCleanup(common.GetCMPWorkDir())
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating temp dir: %s", err)
|
||||
return fmt.Errorf("error creating workdir for manifest generation: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := os.RemoveAll(workDir); err != nil {
|
||||
// we panic here as the workDir may contain sensitive information
|
||||
panic(fmt.Sprintf("error removing generate manifest workdir: %s", err))
|
||||
}
|
||||
}()
|
||||
defer cleanup()
|
||||
|
||||
metadata, err := cmp.ReceiveRepoStream(ctx, stream, workDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("generate manifest error receiving stream: %s", err)
|
||||
return fmt.Errorf("generate manifest error receiving stream: %w", err)
|
||||
}
|
||||
|
||||
appPath := filepath.Clean(filepath.Join(workDir, metadata.AppRelPath))
|
||||
@@ -169,11 +195,11 @@ func (s *Service) GenerateManifest(stream apiclient.ConfigManagementPluginServic
|
||||
}
|
||||
response, err := s.generateManifest(ctx, appPath, metadata.GetEnv())
|
||||
if err != nil {
|
||||
return fmt.Errorf("error generating manifests: %s", err)
|
||||
return fmt.Errorf("error generating manifests: %w", err)
|
||||
}
|
||||
err = stream.SendAndClose(response)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error sending manifest response: %s", err)
|
||||
return fmt.Errorf("error sending manifest response: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -203,6 +229,11 @@ func (s *Service) generateManifest(ctx context.Context, appDir string, envEntrie
|
||||
|
||||
manifests, err := kube.SplitYAMLToString([]byte(out))
|
||||
if err != nil {
|
||||
sanitizedManifests := manifests
|
||||
if len(sanitizedManifests) > 1000 {
|
||||
sanitizedManifests = manifests[:1000]
|
||||
}
|
||||
log.Debugf("Failed to split generated manifests. Beginning of generated manifests: %q", sanitizedManifests)
|
||||
return &apiclient.ManifestResponse{}, err
|
||||
}
|
||||
|
||||
@@ -211,58 +242,63 @@ func (s *Service) generateManifest(ctx context.Context, appDir string, envEntrie
|
||||
}, err
|
||||
}
|
||||
|
||||
type MatchRepositoryStream interface {
|
||||
Stream
|
||||
SendAndClose(response *apiclient.RepositoryResponse) error
|
||||
}
|
||||
|
||||
// MatchRepository receives the application stream and checks whether
|
||||
// its repository type is supported by the config management plugin
|
||||
// server.
|
||||
//The checks are implemented in the following order:
|
||||
// 1. If spec.Discover.FileName is provided it finds for a name match in Applications files
|
||||
// 2. If spec.Discover.Find.Glob is provided if finds for a glob match in Applications files
|
||||
// 3. Otherwise it runs the spec.Discover.Find.Command
|
||||
// The checks are implemented in the following order:
|
||||
// 1. If spec.Discover.FileName is provided it finds for a name match in Applications files
|
||||
// 2. If spec.Discover.Find.Glob is provided if finds for a glob match in Applications files
|
||||
// 3. Otherwise it runs the spec.Discover.Find.Command
|
||||
func (s *Service) MatchRepository(stream apiclient.ConfigManagementPluginService_MatchRepositoryServer) error {
|
||||
return s.matchRepositoryGeneric(stream)
|
||||
}
|
||||
|
||||
func (s *Service) matchRepositoryGeneric(stream MatchRepositoryStream) error {
|
||||
bufferedCtx, cancel := buffered_context.WithEarlierDeadline(stream.Context(), cmpTimeoutBuffer)
|
||||
defer cancel()
|
||||
|
||||
workDir, err := files.CreateTempDir(common.GetCMPWorkDir())
|
||||
workDir, cleanup, err := getTempDirMustCleanup(common.GetCMPWorkDir())
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating match repository workdir: %s", err)
|
||||
return fmt.Errorf("error creating workdir for repository matching: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := os.RemoveAll(workDir); err != nil {
|
||||
// we panic here as the workDir may contain sensitive information
|
||||
panic(fmt.Sprintf("error removing match repository workdir: %s", err))
|
||||
}
|
||||
}()
|
||||
defer cleanup()
|
||||
|
||||
metadata, err := cmp.ReceiveRepoStream(bufferedCtx, stream, workDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("match repository error receiving stream: %s", err)
|
||||
return fmt.Errorf("match repository error receiving stream: %w", err)
|
||||
}
|
||||
|
||||
isSupported, err := s.matchRepository(bufferedCtx, workDir, metadata.GetEnv())
|
||||
isSupported, isDiscoveryEnabled, err := s.matchRepository(bufferedCtx, workDir, metadata.GetEnv())
|
||||
if err != nil {
|
||||
return fmt.Errorf("match repository error: %s", err)
|
||||
return fmt.Errorf("match repository error: %w", err)
|
||||
}
|
||||
repoResponse := &apiclient.RepositoryResponse{IsSupported: isSupported}
|
||||
repoResponse := &apiclient.RepositoryResponse{IsSupported: isSupported, IsDiscoveryEnabled: isDiscoveryEnabled}
|
||||
|
||||
err = stream.SendAndClose(repoResponse)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error sending match repository response: %s", err)
|
||||
return fmt.Errorf("error sending match repository response: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) matchRepository(ctx context.Context, workdir string, envEntries []*apiclient.EnvEntry) (bool, error) {
|
||||
func (s *Service) matchRepository(ctx context.Context, workdir string, envEntries []*apiclient.EnvEntry) (isSupported bool, isDiscoveryEnabled bool, err error) {
|
||||
config := s.initConstants.PluginConfig
|
||||
|
||||
if config.Spec.Discover.FileName != "" {
|
||||
log.Debugf("config.Spec.Discover.FileName is provided")
|
||||
pattern := filepath.Join(workdir, config.Spec.Discover.FileName)
|
||||
matches, err := filepath.Glob(pattern)
|
||||
if err != nil {
|
||||
e := fmt.Errorf("error finding filename match for pattern %q: %s", pattern, err)
|
||||
e := fmt.Errorf("error finding filename match for pattern %q: %w", pattern, err)
|
||||
log.Debug(e)
|
||||
return false, e
|
||||
return false, true, e
|
||||
}
|
||||
return len(matches) > 0, nil
|
||||
return len(matches) > 0, true, nil
|
||||
}
|
||||
|
||||
if config.Spec.Discover.Find.Glob != "" {
|
||||
@@ -272,27 +308,86 @@ func (s *Service) matchRepository(ctx context.Context, workdir string, envEntrie
|
||||
// https://github.com/golang/go/issues/11862
|
||||
matches, err := zglob.Glob(pattern)
|
||||
if err != nil {
|
||||
e := fmt.Errorf("error finding glob match for pattern %q: %s", pattern, err)
|
||||
e := fmt.Errorf("error finding glob match for pattern %q: %w", pattern, err)
|
||||
log.Debug(e)
|
||||
return false, e
|
||||
return false, true, e
|
||||
}
|
||||
|
||||
if len(matches) > 0 {
|
||||
return true, nil
|
||||
return len(matches) > 0, true, nil
|
||||
}
|
||||
|
||||
if len(config.Spec.Discover.Find.Command.Command) > 0 {
|
||||
log.Debugf("Going to try runCommand.")
|
||||
env := append(os.Environ(), environ(envEntries)...)
|
||||
find, err := runCommand(ctx, config.Spec.Discover.Find.Command, workdir, env)
|
||||
if err != nil {
|
||||
return false, true, fmt.Errorf("error running find command: %w", err)
|
||||
}
|
||||
return false, nil
|
||||
return find != "", true, nil
|
||||
}
|
||||
|
||||
log.Debugf("Going to try runCommand.")
|
||||
env := append(os.Environ(), environ(envEntries)...)
|
||||
|
||||
find, err := runCommand(ctx, config.Spec.Discover.Find.Command, workdir, env)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("error running find command: %s", err)
|
||||
}
|
||||
|
||||
if find != "" {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
return false, false, nil
|
||||
}
|
||||
|
||||
// ParametersAnnouncementStream defines an interface able to send/receive a stream of parameter announcements.
|
||||
type ParametersAnnouncementStream interface {
|
||||
Stream
|
||||
SendAndClose(response *apiclient.ParametersAnnouncementResponse) error
|
||||
}
|
||||
|
||||
// GetParametersAnnouncement gets parameter announcements for a given Application and repo contents.
|
||||
func (s *Service) GetParametersAnnouncement(stream apiclient.ConfigManagementPluginService_GetParametersAnnouncementServer) error {
|
||||
bufferedCtx, cancel := buffered_context.WithEarlierDeadline(stream.Context(), cmpTimeoutBuffer)
|
||||
defer cancel()
|
||||
|
||||
workDir, cleanup, err := getTempDirMustCleanup(common.GetCMPWorkDir())
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating workdir for generating parameter announcements: %w", err)
|
||||
}
|
||||
defer cleanup()
|
||||
|
||||
metadata, err := cmp.ReceiveRepoStream(bufferedCtx, stream, workDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parameters announcement error receiving stream: %w", err)
|
||||
}
|
||||
appPath := filepath.Clean(filepath.Join(workDir, metadata.AppRelPath))
|
||||
if !strings.HasPrefix(appPath, workDir) {
|
||||
return fmt.Errorf("illegal appPath: out of workDir bound")
|
||||
}
|
||||
|
||||
repoResponse, err := getParametersAnnouncement(bufferedCtx, appPath, s.initConstants.PluginConfig.Spec.Parameters.Static, s.initConstants.PluginConfig.Spec.Parameters.Dynamic)
|
||||
if err != nil {
|
||||
return fmt.Errorf("get parameters announcement error: %w", err)
|
||||
}
|
||||
|
||||
err = stream.SendAndClose(repoResponse)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error sending parameters announcement response: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getParametersAnnouncement(ctx context.Context, appDir string, announcements []*repoclient.ParameterAnnouncement, command Command) (*apiclient.ParametersAnnouncementResponse, error) {
|
||||
augmentedAnnouncements := announcements
|
||||
|
||||
if len(command.Command) > 0 {
|
||||
stdout, err := runCommand(ctx, command, appDir, os.Environ())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error executing dynamic parameter output command: %w", err)
|
||||
}
|
||||
|
||||
var dynamicParamAnnouncements []*repoclient.ParameterAnnouncement
|
||||
err = json.Unmarshal([]byte(stdout), &dynamicParamAnnouncements)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error unmarshaling dynamic parameter output into ParametersAnnouncementResponse: %w", err)
|
||||
}
|
||||
|
||||
// dynamic goes first, because static should take precedence by being later.
|
||||
augmentedAnnouncements = append(dynamicParamAnnouncements, announcements...)
|
||||
}
|
||||
|
||||
repoResponse := &apiclient.ParametersAnnouncementResponse{
|
||||
ParameterAnnouncements: augmentedAnnouncements,
|
||||
}
|
||||
return repoResponse, nil
|
||||
}
|
||||
|
||||
@@ -3,6 +3,8 @@ option go_package = "github.com/argoproj/argo-cd/v2/cmpserver/apiclient";
|
||||
|
||||
package plugin;
|
||||
|
||||
import "github.com/argoproj/argo-cd/v2/reposerver/repository/repository.proto";
|
||||
|
||||
// AppStreamRequest is the request object used to send the application's
|
||||
// files over a stream.
|
||||
message AppStreamRequest {
|
||||
@@ -42,6 +44,13 @@ message ManifestResponse {
|
||||
|
||||
message RepositoryResponse {
|
||||
bool isSupported = 1;
|
||||
bool isDiscoveryEnabled = 2;
|
||||
}
|
||||
|
||||
// ParametersAnnouncementResponse contains a list of announcements. This list represents all the parameters which a CMP
|
||||
// is able to accept.
|
||||
message ParametersAnnouncementResponse {
|
||||
repeated repository.ParameterAnnouncement parameterAnnouncements = 1;
|
||||
}
|
||||
|
||||
message File {
|
||||
@@ -58,4 +67,8 @@ service ConfigManagementPluginService {
|
||||
// MatchRepository returns whether or not the given application is supported by the plugin
|
||||
rpc MatchRepository(stream AppStreamRequest) returns (RepositoryResponse) {
|
||||
}
|
||||
|
||||
// GetParametersAnnouncement gets a list of parameter announcements for the given app
|
||||
rpc GetParametersAnnouncement(stream AppStreamRequest) returns (ParametersAnnouncementResponse) {
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,17 +1,27 @@
|
||||
package plugin
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"gopkg.in/yaml.v2"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/argoproj/argo-cd/v2/cmpserver/apiclient"
|
||||
repoclient "github.com/argoproj/argo-cd/v2/reposerver/apiclient"
|
||||
"github.com/argoproj/argo-cd/v2/test"
|
||||
"github.com/argoproj/argo-cd/v2/util/cmp"
|
||||
"github.com/argoproj/argo-cd/v2/util/tgzstream"
|
||||
)
|
||||
|
||||
func newService(configFilePath string) (*Service, error) {
|
||||
@@ -30,6 +40,11 @@ func newService(configFilePath string) (*Service, error) {
|
||||
return service, nil
|
||||
}
|
||||
|
||||
func (s *Service) WithGenerateCommand(command Command) *Service {
|
||||
s.initConstants.PluginConfig.Spec.Generate = command
|
||||
return s
|
||||
}
|
||||
|
||||
type pluginOpt func(*CMPServerInitConstants)
|
||||
|
||||
func withDiscover(d Discover) pluginOpt {
|
||||
@@ -84,11 +99,12 @@ func TestMatchRepository(t *testing.T) {
|
||||
f := setup(t, withDiscover(d))
|
||||
|
||||
// when
|
||||
match, err := f.service.matchRepository(context.Background(), f.path, f.env)
|
||||
match, discovery, err := f.service.matchRepository(context.Background(), f.path, f.env)
|
||||
|
||||
// then
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, match)
|
||||
assert.True(t, discovery)
|
||||
})
|
||||
t.Run("will not match plugin by filename if file not found", func(t *testing.T) {
|
||||
// given
|
||||
@@ -98,11 +114,25 @@ func TestMatchRepository(t *testing.T) {
|
||||
f := setup(t, withDiscover(d))
|
||||
|
||||
// when
|
||||
match, err := f.service.matchRepository(context.Background(), f.path, f.env)
|
||||
match, discovery, err := f.service.matchRepository(context.Background(), f.path, f.env)
|
||||
|
||||
// then
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, match)
|
||||
assert.True(t, discovery)
|
||||
})
|
||||
t.Run("will not match a pattern with a syntax error", func(t *testing.T) {
|
||||
// given
|
||||
d := Discover{
|
||||
FileName: "[",
|
||||
}
|
||||
f := setup(t, withDiscover(d))
|
||||
|
||||
// when
|
||||
_, _, err := f.service.matchRepository(context.Background(), f.path, f.env)
|
||||
|
||||
// then
|
||||
assert.ErrorContains(t, err, "syntax error")
|
||||
})
|
||||
t.Run("will match plugin by glob", func(t *testing.T) {
|
||||
// given
|
||||
@@ -114,11 +144,12 @@ func TestMatchRepository(t *testing.T) {
|
||||
f := setup(t, withDiscover(d))
|
||||
|
||||
// when
|
||||
match, err := f.service.matchRepository(context.Background(), f.path, f.env)
|
||||
match, discovery, err := f.service.matchRepository(context.Background(), f.path, f.env)
|
||||
|
||||
// then
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, match)
|
||||
assert.True(t, discovery)
|
||||
})
|
||||
t.Run("will not match plugin by glob if not found", func(t *testing.T) {
|
||||
// given
|
||||
@@ -130,11 +161,27 @@ func TestMatchRepository(t *testing.T) {
|
||||
f := setup(t, withDiscover(d))
|
||||
|
||||
// when
|
||||
match, err := f.service.matchRepository(context.Background(), f.path, f.env)
|
||||
match, discovery, err := f.service.matchRepository(context.Background(), f.path, f.env)
|
||||
|
||||
// then
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, match)
|
||||
assert.True(t, discovery)
|
||||
})
|
||||
t.Run("will throw an error for a bad pattern", func(t *testing.T) {
|
||||
// given
|
||||
d := Discover{
|
||||
Find: Find{
|
||||
Glob: "does-not-exist",
|
||||
},
|
||||
}
|
||||
f := setup(t, withDiscover(d))
|
||||
|
||||
// when
|
||||
_, _, err := f.service.matchRepository(context.Background(), f.path, f.env)
|
||||
|
||||
// then
|
||||
assert.ErrorContains(t, err, "error finding glob match for pattern")
|
||||
})
|
||||
t.Run("will match plugin by command when returns any output", func(t *testing.T) {
|
||||
// given
|
||||
@@ -148,11 +195,12 @@ func TestMatchRepository(t *testing.T) {
|
||||
f := setup(t, withDiscover(d))
|
||||
|
||||
// when
|
||||
match, err := f.service.matchRepository(context.Background(), f.path, f.env)
|
||||
match, discovery, err := f.service.matchRepository(context.Background(), f.path, f.env)
|
||||
|
||||
// then
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, match)
|
||||
assert.True(t, discovery)
|
||||
})
|
||||
t.Run("will not match plugin by command when returns no output", func(t *testing.T) {
|
||||
// given
|
||||
@@ -166,11 +214,11 @@ func TestMatchRepository(t *testing.T) {
|
||||
f := setup(t, withDiscover(d))
|
||||
|
||||
// when
|
||||
match, err := f.service.matchRepository(context.Background(), f.path, f.env)
|
||||
|
||||
match, discovery, err := f.service.matchRepository(context.Background(), f.path, f.env)
|
||||
// then
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, match)
|
||||
assert.True(t, discovery)
|
||||
})
|
||||
t.Run("will match plugin because env var defined", func(t *testing.T) {
|
||||
// given
|
||||
@@ -184,11 +232,12 @@ func TestMatchRepository(t *testing.T) {
|
||||
f := setup(t, withDiscover(d))
|
||||
|
||||
// when
|
||||
match, err := f.service.matchRepository(context.Background(), f.path, f.env)
|
||||
match, discovery, err := f.service.matchRepository(context.Background(), f.path, f.env)
|
||||
|
||||
// then
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, match)
|
||||
assert.True(t, discovery)
|
||||
})
|
||||
t.Run("will not match plugin because no env var defined", func(t *testing.T) {
|
||||
// given
|
||||
@@ -203,11 +252,12 @@ func TestMatchRepository(t *testing.T) {
|
||||
f := setup(t, withDiscover(d))
|
||||
|
||||
// when
|
||||
match, err := f.service.matchRepository(context.Background(), f.path, f.env)
|
||||
match, discovery, err := f.service.matchRepository(context.Background(), f.path, f.env)
|
||||
|
||||
// then
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, match)
|
||||
assert.True(t, discovery)
|
||||
})
|
||||
t.Run("will not match plugin by command when command fails", func(t *testing.T) {
|
||||
// given
|
||||
@@ -221,11 +271,25 @@ func TestMatchRepository(t *testing.T) {
|
||||
f := setup(t, withDiscover(d))
|
||||
|
||||
// when
|
||||
match, err := f.service.matchRepository(context.Background(), f.path, f.env)
|
||||
match, discovery, err := f.service.matchRepository(context.Background(), f.path, f.env)
|
||||
|
||||
// then
|
||||
assert.Error(t, err)
|
||||
assert.False(t, match)
|
||||
assert.True(t, discovery)
|
||||
})
|
||||
t.Run("will not match plugin as discovery is not set", func(t *testing.T) {
|
||||
// given
|
||||
d := Discover{}
|
||||
f := setup(t, withDiscover(d))
|
||||
|
||||
// when
|
||||
match, discovery, err := f.service.matchRepository(context.Background(), f.path, f.env)
|
||||
|
||||
// then
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, match)
|
||||
assert.False(t, discovery)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -238,17 +302,49 @@ func Test_Negative_ConfigFile_DoesnotExist(t *testing.T) {
|
||||
|
||||
func TestGenerateManifest(t *testing.T) {
|
||||
configFilePath := "./testdata/kustomize/config"
|
||||
|
||||
t.Run("successful generate", func(t *testing.T) {
|
||||
service, err := newService(configFilePath)
|
||||
require.NoError(t, err)
|
||||
|
||||
res1, err := service.generateManifest(context.Background(), "testdata/kustomize", nil)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, res1)
|
||||
|
||||
expectedOutput := "{\"apiVersion\":\"v1\",\"data\":{\"foo\":\"bar\"},\"kind\":\"ConfigMap\",\"metadata\":{\"name\":\"my-map\"}}"
|
||||
if res1 != nil {
|
||||
require.Equal(t, expectedOutput, res1.Manifests[0])
|
||||
}
|
||||
})
|
||||
t.Run("bad generate command", func(t *testing.T) {
|
||||
service, err := newService(configFilePath)
|
||||
require.NoError(t, err)
|
||||
service.WithGenerateCommand(Command{Command: []string{"bad-command"}})
|
||||
|
||||
res, err := service.generateManifest(context.Background(), "testdata/kustomize", nil)
|
||||
assert.ErrorContains(t, err, "executable file not found")
|
||||
assert.Nil(t, res.Manifests)
|
||||
})
|
||||
t.Run("bad yaml output", func(t *testing.T) {
|
||||
service, err := newService(configFilePath)
|
||||
require.NoError(t, err)
|
||||
service.WithGenerateCommand(Command{Command: []string{"echo", "invalid yaml: }"}})
|
||||
|
||||
res, err := service.generateManifest(context.Background(), "testdata/kustomize", nil)
|
||||
assert.ErrorContains(t, err, "failed to unmarshal manifest")
|
||||
assert.Nil(t, res.Manifests)
|
||||
})
|
||||
}
|
||||
|
||||
func TestGenerateManifest_deadline_exceeded(t *testing.T) {
|
||||
configFilePath := "./testdata/kustomize/config"
|
||||
service, err := newService(configFilePath)
|
||||
require.NoError(t, err)
|
||||
|
||||
res1, err := service.generateManifest(context.Background(), "", nil)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, res1)
|
||||
|
||||
expectedOutput := "{\"apiVersion\":\"v1\",\"data\":{\"foo\":\"bar\"},\"kind\":\"ConfigMap\",\"metadata\":{\"name\":\"my-map\"}}"
|
||||
if res1 != nil {
|
||||
require.Equal(t, expectedOutput, res1.Manifests[0])
|
||||
}
|
||||
expiredCtx, cancel := context.WithTimeout(context.Background(), time.Second*0)
|
||||
defer cancel()
|
||||
_, err = service.generateManifest(expiredCtx, "", nil)
|
||||
assert.ErrorContains(t, err, "context deadline exceeded")
|
||||
}
|
||||
|
||||
// TestRunCommandContextTimeout makes sure the command dies at timeout rather than sleeping past the timeout.
|
||||
@@ -266,3 +362,388 @@ func TestRunCommandContextTimeout(t *testing.T) {
|
||||
assert.Error(t, err) // The command should time out, causing an error.
|
||||
assert.Less(t, after.Sub(before), 1*time.Second)
|
||||
}
|
||||
|
||||
func TestRunCommandEmptyCommand(t *testing.T) {
|
||||
_, err := runCommand(context.Background(), Command{}, "", nil)
|
||||
assert.ErrorContains(t, err, "Command is empty")
|
||||
}
|
||||
|
||||
func Test_getParametersAnnouncement_empty_command(t *testing.T) {
|
||||
staticYAML := `
|
||||
- name: static-a
|
||||
- name: static-b
|
||||
`
|
||||
static := &[]*repoclient.ParameterAnnouncement{}
|
||||
err := yaml.Unmarshal([]byte(staticYAML), static)
|
||||
require.NoError(t, err)
|
||||
command := Command{
|
||||
Command: []string{"echo"},
|
||||
Args: []string{`[]`},
|
||||
}
|
||||
res, err := getParametersAnnouncement(context.Background(), "", *static, command)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []*repoclient.ParameterAnnouncement{{Name: "static-a"}, {Name: "static-b"}}, res.ParameterAnnouncements)
|
||||
}
|
||||
|
||||
func Test_getParametersAnnouncement_no_command(t *testing.T) {
|
||||
staticYAML := `
|
||||
- name: static-a
|
||||
- name: static-b
|
||||
`
|
||||
static := &[]*repoclient.ParameterAnnouncement{}
|
||||
err := yaml.Unmarshal([]byte(staticYAML), static)
|
||||
require.NoError(t, err)
|
||||
command := Command{}
|
||||
res, err := getParametersAnnouncement(context.Background(), "", *static, command)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []*repoclient.ParameterAnnouncement{{Name: "static-a"}, {Name: "static-b"}}, res.ParameterAnnouncements)
|
||||
}
|
||||
|
||||
func Test_getParametersAnnouncement_static_and_dynamic(t *testing.T) {
|
||||
staticYAML := `
|
||||
- name: static-a
|
||||
- name: static-b
|
||||
`
|
||||
static := &[]*repoclient.ParameterAnnouncement{}
|
||||
err := yaml.Unmarshal([]byte(staticYAML), static)
|
||||
require.NoError(t, err)
|
||||
command := Command{
|
||||
Command: []string{"echo"},
|
||||
Args: []string{`[{"name": "dynamic-a"}, {"name": "dynamic-b"}]`},
|
||||
}
|
||||
res, err := getParametersAnnouncement(context.Background(), "", *static, command)
|
||||
require.NoError(t, err)
|
||||
expected := []*repoclient.ParameterAnnouncement{
|
||||
{Name: "dynamic-a"},
|
||||
{Name: "dynamic-b"},
|
||||
{Name: "static-a"},
|
||||
{Name: "static-b"},
|
||||
}
|
||||
assert.Equal(t, expected, res.ParameterAnnouncements)
|
||||
}
|
||||
|
||||
func Test_getParametersAnnouncement_invalid_json(t *testing.T) {
|
||||
command := Command{
|
||||
Command: []string{"echo"},
|
||||
Args: []string{`[`},
|
||||
}
|
||||
_, err := getParametersAnnouncement(context.Background(), "", []*repoclient.ParameterAnnouncement{}, command)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "unexpected end of JSON input")
|
||||
}
|
||||
|
||||
func Test_getParametersAnnouncement_bad_command(t *testing.T) {
|
||||
command := Command{
|
||||
Command: []string{"exit"},
|
||||
Args: []string{"1"},
|
||||
}
|
||||
_, err := getParametersAnnouncement(context.Background(), "", []*repoclient.ParameterAnnouncement{}, command)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "error executing dynamic parameter output command")
|
||||
}
|
||||
|
||||
func Test_getTempDirMustCleanup(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
|
||||
// Induce a directory create error to verify error handling.
|
||||
err := os.Chmod(tempDir, 0000)
|
||||
require.NoError(t, err)
|
||||
_, _, err = getTempDirMustCleanup(path.Join(tempDir, "test"))
|
||||
assert.ErrorContains(t, err, "error creating temp dir")
|
||||
|
||||
err = os.Chmod(tempDir, 0700)
|
||||
require.NoError(t, err)
|
||||
workDir, cleanup, err := getTempDirMustCleanup(tempDir)
|
||||
require.NoError(t, err)
|
||||
require.DirExists(t, workDir)
|
||||
cleanup()
|
||||
assert.NoDirExists(t, workDir)
|
||||
}
|
||||
|
||||
func TestService_Init(t *testing.T) {
|
||||
// Set up a base directory containing a test directory and a test file.
|
||||
tempDir := t.TempDir()
|
||||
workDir := path.Join(tempDir, "workDir")
|
||||
err := os.MkdirAll(workDir, 0700)
|
||||
require.NoError(t, err)
|
||||
testfile := path.Join(workDir, "testfile")
|
||||
file, err := os.Create(testfile)
|
||||
require.NoError(t, err)
|
||||
err = file.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Make the base directory read-only so Init's cleanup fails.
|
||||
err = os.Chmod(tempDir, 0000)
|
||||
require.NoError(t, err)
|
||||
s := NewService(CMPServerInitConstants{PluginConfig: PluginConfig{}})
|
||||
err = s.Init(workDir)
|
||||
assert.ErrorContains(t, err, "error removing workdir", "Init must throw an error if it can't remove the work directory")
|
||||
|
||||
// Make the base directory writable so Init's cleanup succeeds.
|
||||
err = os.Chmod(tempDir, 0700)
|
||||
require.NoError(t, err)
|
||||
err = s.Init(workDir)
|
||||
assert.NoError(t, err)
|
||||
assert.DirExists(t, workDir)
|
||||
assert.NoFileExists(t, testfile)
|
||||
}
|
||||
|
||||
func TestEnviron(t *testing.T) {
|
||||
t.Run("empty environ", func(t *testing.T) {
|
||||
env := environ([]*apiclient.EnvEntry{})
|
||||
assert.Nil(t, env)
|
||||
})
|
||||
t.Run("env vars with empty names or values", func(t *testing.T) {
|
||||
env := environ([]*apiclient.EnvEntry{
|
||||
{Value: "test"},
|
||||
{Name: "test"},
|
||||
})
|
||||
assert.Nil(t, env)
|
||||
})
|
||||
t.Run("proper env vars", func(t *testing.T) {
|
||||
env := environ([]*apiclient.EnvEntry{
|
||||
{Name: "name1", Value: "value1"},
|
||||
{Name: "name2", Value: "value2"},
|
||||
})
|
||||
assert.Equal(t, []string{"name1=value1", "name2=value2"}, env)
|
||||
})
|
||||
}
|
||||
|
||||
type MockGenerateManifestStream struct {
|
||||
metadataSent bool
|
||||
fileSent bool
|
||||
metadataRequest *apiclient.AppStreamRequest
|
||||
fileRequest *apiclient.AppStreamRequest
|
||||
response *apiclient.ManifestResponse
|
||||
}
|
||||
|
||||
func NewMockGenerateManifestStream(repoPath, appPath string, env []string) (*MockGenerateManifestStream, error) {
|
||||
tgz, mr, err := cmp.GetCompressedRepoAndMetadata(repoPath, appPath, env, nil, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer tgzstream.CloseAndDelete(tgz)
|
||||
|
||||
tgzBuffer := bytes.NewBuffer(nil)
|
||||
_, err = io.Copy(tgzBuffer, tgz)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to copy manifest targz to a byte buffer: %w", err)
|
||||
}
|
||||
|
||||
return &MockGenerateManifestStream{
|
||||
metadataRequest: mr,
|
||||
fileRequest: cmp.AppFileRequest(tgzBuffer.Bytes()),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (m *MockGenerateManifestStream) SendAndClose(response *apiclient.ManifestResponse) error {
|
||||
m.response = response
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MockGenerateManifestStream) Recv() (*apiclient.AppStreamRequest, error) {
|
||||
if !m.metadataSent {
|
||||
m.metadataSent = true
|
||||
return m.metadataRequest, nil
|
||||
}
|
||||
|
||||
if !m.fileSent {
|
||||
m.fileSent = true
|
||||
return m.fileRequest, nil
|
||||
}
|
||||
return nil, io.EOF
|
||||
}
|
||||
|
||||
func (m *MockGenerateManifestStream) Context() context.Context {
|
||||
return context.Background()
|
||||
}
|
||||
|
||||
func TestService_GenerateManifest(t *testing.T) {
|
||||
configFilePath := "./testdata/kustomize/config"
|
||||
service, err := newService(configFilePath)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("successful generate", func(t *testing.T) {
|
||||
s, err := NewMockGenerateManifestStream("./testdata/kustomize", "./testdata/kustomize", nil)
|
||||
require.NoError(t, err)
|
||||
err = service.generateManifestGeneric(s)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, s.response)
|
||||
assert.Equal(t, []string{"{\"apiVersion\":\"v1\",\"data\":{\"foo\":\"bar\"},\"kind\":\"ConfigMap\",\"metadata\":{\"name\":\"my-map\"}}"}, s.response.Manifests)
|
||||
})
|
||||
|
||||
t.Run("out-of-bounds app path", func(t *testing.T) {
|
||||
s, err := NewMockGenerateManifestStream("./testdata/kustomize", "./testdata/kustomize", nil)
|
||||
require.NoError(t, err)
|
||||
// set a malicious app path on the metadata
|
||||
s.metadataRequest.Request.(*apiclient.AppStreamRequest_Metadata).Metadata.AppRelPath = "../out-of-bounds"
|
||||
err = service.generateManifestGeneric(s)
|
||||
require.ErrorContains(t, err, "illegal appPath")
|
||||
assert.Nil(t, s.response)
|
||||
})
|
||||
}
|
||||
|
||||
type MockMatchRepositoryStream struct {
|
||||
metadataSent bool
|
||||
fileSent bool
|
||||
metadataRequest *apiclient.AppStreamRequest
|
||||
fileRequest *apiclient.AppStreamRequest
|
||||
response *apiclient.RepositoryResponse
|
||||
}
|
||||
|
||||
func NewMockMatchRepositoryStream(repoPath, appPath string, env []string) (*MockMatchRepositoryStream, error) {
|
||||
tgz, mr, err := cmp.GetCompressedRepoAndMetadata(repoPath, appPath, env, nil, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer tgzstream.CloseAndDelete(tgz)
|
||||
|
||||
tgzBuffer := bytes.NewBuffer(nil)
|
||||
_, err = io.Copy(tgzBuffer, tgz)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to copy manifest targz to a byte buffer: %w", err)
|
||||
}
|
||||
|
||||
return &MockMatchRepositoryStream{
|
||||
metadataRequest: mr,
|
||||
fileRequest: cmp.AppFileRequest(tgzBuffer.Bytes()),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (m *MockMatchRepositoryStream) SendAndClose(response *apiclient.RepositoryResponse) error {
|
||||
m.response = response
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MockMatchRepositoryStream) Recv() (*apiclient.AppStreamRequest, error) {
|
||||
if !m.metadataSent {
|
||||
m.metadataSent = true
|
||||
return m.metadataRequest, nil
|
||||
}
|
||||
|
||||
if !m.fileSent {
|
||||
m.fileSent = true
|
||||
return m.fileRequest, nil
|
||||
}
|
||||
return nil, io.EOF
|
||||
}
|
||||
|
||||
func (m *MockMatchRepositoryStream) Context() context.Context {
|
||||
return context.Background()
|
||||
}
|
||||
|
||||
func TestService_MatchRepository(t *testing.T) {
|
||||
configFilePath := "./testdata/kustomize/config"
|
||||
service, err := newService(configFilePath)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("supported app", func(t *testing.T) {
|
||||
s, err := NewMockMatchRepositoryStream("./testdata/kustomize", "./testdata/kustomize", nil)
|
||||
require.NoError(t, err)
|
||||
err = service.matchRepositoryGeneric(s)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, s.response)
|
||||
assert.True(t, s.response.IsSupported)
|
||||
})
|
||||
|
||||
t.Run("unsupported app", func(t *testing.T) {
|
||||
s, err := NewMockMatchRepositoryStream("./testdata/ksonnet", "./testdata/ksonnet", nil)
|
||||
require.NoError(t, err)
|
||||
err = service.matchRepositoryGeneric(s)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, s.response)
|
||||
assert.False(t, s.response.IsSupported)
|
||||
})
|
||||
}
|
||||
|
||||
type MockParametersAnnouncementStream struct {
|
||||
metadataSent bool
|
||||
fileSent bool
|
||||
metadataRequest *apiclient.AppStreamRequest
|
||||
fileRequest *apiclient.AppStreamRequest
|
||||
response *apiclient.ParametersAnnouncementResponse
|
||||
}
|
||||
|
||||
func NewMockParametersAnnouncementStream(repoPath, appPath string, env []string) (*MockParametersAnnouncementStream, error) {
|
||||
tgz, mr, err := cmp.GetCompressedRepoAndMetadata(repoPath, appPath, env, nil, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer tgzstream.CloseAndDelete(tgz)
|
||||
|
||||
tgzBuffer := bytes.NewBuffer(nil)
|
||||
_, err = io.Copy(tgzBuffer, tgz)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to copy manifest targz to a byte buffer: %w", err)
|
||||
}
|
||||
|
||||
return &MockParametersAnnouncementStream{
|
||||
metadataRequest: mr,
|
||||
fileRequest: cmp.AppFileRequest(tgzBuffer.Bytes()),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (m *MockParametersAnnouncementStream) SendAndClose(response *apiclient.ParametersAnnouncementResponse) error {
|
||||
m.response = response
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MockParametersAnnouncementStream) Recv() (*apiclient.AppStreamRequest, error) {
|
||||
if !m.metadataSent {
|
||||
m.metadataSent = true
|
||||
return m.metadataRequest, nil
|
||||
}
|
||||
|
||||
if !m.fileSent {
|
||||
m.fileSent = true
|
||||
return m.fileRequest, nil
|
||||
}
|
||||
return nil, io.EOF
|
||||
}
|
||||
|
||||
func (m *MockParametersAnnouncementStream) SetHeader(metadata.MD) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MockParametersAnnouncementStream) SendHeader(metadata.MD) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MockParametersAnnouncementStream) SetTrailer(metadata.MD) {}
|
||||
|
||||
func (m *MockParametersAnnouncementStream) Context() context.Context {
|
||||
return context.Background()
|
||||
}
|
||||
|
||||
func (m *MockParametersAnnouncementStream) SendMsg(interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MockParametersAnnouncementStream) RecvMsg(interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestService_GetParametersAnnouncement(t *testing.T) {
|
||||
configFilePath := "./testdata/kustomize/config"
|
||||
service, err := newService(configFilePath)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("successful response", func(t *testing.T) {
|
||||
s, err := NewMockParametersAnnouncementStream("./testdata/kustomize", "./testdata/kustomize", nil)
|
||||
require.NoError(t, err)
|
||||
err = service.GetParametersAnnouncement(s)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, s.response)
|
||||
require.Len(t, s.response.ParameterAnnouncements, 1)
|
||||
assert.Equal(t, repoclient.ParameterAnnouncement{Name: "test-param", String_: "test-value"}, *s.response.ParameterAnnouncements[0])
|
||||
})
|
||||
t.Run("out of bounds app", func(t *testing.T) {
|
||||
s, err := NewMockParametersAnnouncementStream("./testdata/kustomize", "./testdata/kustomize", nil)
|
||||
require.NoError(t, err)
|
||||
// set a malicious app path on the metadata
|
||||
s.metadataRequest.Request.(*apiclient.AppStreamRequest_Metadata).Metadata.AppRelPath = "../out-of-bounds"
|
||||
err = service.GetParametersAnnouncement(s)
|
||||
require.ErrorContains(t, err, "illegal appPath")
|
||||
require.Nil(t, s.response)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -7,8 +7,12 @@ spec:
|
||||
init:
|
||||
command: [kustomize, version]
|
||||
generate:
|
||||
command: [sh, -c, "cd testdata/kustomize && kustomize build"]
|
||||
command: [sh, -c, "kustomize build"]
|
||||
discover:
|
||||
find:
|
||||
command: [sh, -c, find . -name kustomization.yaml]
|
||||
glob: "**/*/kustomization.yaml"
|
||||
glob: "**/kustomization.yaml"
|
||||
parameters:
|
||||
static:
|
||||
- name: test-param
|
||||
string: test-value
|
||||
|
||||
@@ -108,7 +108,7 @@ func (a *ArgoCDCMPServer) CreateGRPC() (*grpc.Server, error) {
|
||||
return true, nil
|
||||
}))
|
||||
pluginService := plugin.NewService(a.initConstants)
|
||||
err := pluginService.Init()
|
||||
err := pluginService.Init(common.GetCMPWorkDir())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error initializing plugin service: %s", err)
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
@@ -316,3 +317,8 @@ const (
|
||||
SecurityMedium = 2 // Could indicate malicious events, but has a high likelihood of being user/system error (i.e. access denied)
|
||||
SecurityLow = 1 // Unexceptional entries (i.e. successful access logs)
|
||||
)
|
||||
|
||||
// Common error messages
|
||||
const TokenVerificationError = "failed to verify the token"
|
||||
|
||||
var TokenVerificationErr = errors.New(TokenVerificationError)
|
||||
|
||||
@@ -335,7 +335,7 @@ func (ctrl *ApplicationController) handleObjectUpdated(managedByApp map[string]b
|
||||
}
|
||||
|
||||
if !ctrl.canProcessApp(obj) {
|
||||
// Don't force refresh app if app belongs to a different controller shard
|
||||
// Don't force refresh app if app belongs to a different controller shard or is outside the allowed namespaces.
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -415,33 +415,32 @@ func isKnownOrphanedResourceExclusion(key kube.ResourceKey, proj *appv1.AppProje
|
||||
|
||||
func (ctrl *ApplicationController) getResourceTree(a *appv1.Application, managedResources []*appv1.ResourceDiff) (*appv1.ApplicationTree, error) {
|
||||
nodes := make([]appv1.ResourceNode, 0)
|
||||
|
||||
proj, err := ctrl.getAppProj(a)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("failed to get project: %w", err)
|
||||
}
|
||||
|
||||
orphanedNodesMap := make(map[kube.ResourceKey]appv1.ResourceNode)
|
||||
warnOrphaned := true
|
||||
if proj.Spec.OrphanedResources != nil {
|
||||
orphanedNodesMap, err = ctrl.stateCache.GetNamespaceTopLevelResources(a.Spec.Destination.Server, a.Spec.Destination.Namespace)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("failed to get namespace top-level resources: %w", err)
|
||||
}
|
||||
warnOrphaned = proj.Spec.OrphanedResources.IsWarn()
|
||||
}
|
||||
|
||||
for i := range managedResources {
|
||||
managedResource := managedResources[i]
|
||||
delete(orphanedNodesMap, kube.NewResourceKey(managedResource.Group, managedResource.Kind, managedResource.Namespace, managedResource.Name))
|
||||
var live = &unstructured.Unstructured{}
|
||||
err := json.Unmarshal([]byte(managedResource.LiveState), &live)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("failed to unmarshal live state of managed resources: %w", err)
|
||||
}
|
||||
var target = &unstructured.Unstructured{}
|
||||
err = json.Unmarshal([]byte(managedResource.TargetState), &target)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("failed to unmarshal target state of managed resources: %w", err)
|
||||
}
|
||||
|
||||
if live == nil {
|
||||
@@ -457,7 +456,11 @@ func (ctrl *ApplicationController) getResourceTree(a *appv1.Application, managed
|
||||
} else {
|
||||
err := ctrl.stateCache.IterateHierarchy(a.Spec.Destination.Server, kube.GetResourceKey(live), func(child appv1.ResourceNode, appName string) bool {
|
||||
permitted, _ := proj.IsResourcePermitted(schema.GroupKind{Group: child.ResourceRef.Group, Kind: child.ResourceRef.Kind}, child.Namespace, a.Spec.Destination, func(project string) ([]*appv1.Cluster, error) {
|
||||
return ctrl.db.GetProjectClusters(context.TODO(), project)
|
||||
clusters, err := ctrl.db.GetProjectClusters(context.TODO(), project)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get project clusters: %w", err)
|
||||
}
|
||||
return clusters, nil
|
||||
})
|
||||
if !permitted {
|
||||
return false
|
||||
@@ -466,7 +469,7 @@ func (ctrl *ApplicationController) getResourceTree(a *appv1.Application, managed
|
||||
return true
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("failed to iterate resource hierarchy: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -515,7 +518,7 @@ func (ctrl *ApplicationController) getResourceTree(a *appv1.Application, managed
|
||||
|
||||
hosts, err := ctrl.getAppHosts(a, nodes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("failed to get app hosts: %w", err)
|
||||
}
|
||||
return &appv1.ApplicationTree{Nodes: nodes, OrphanedNodes: orphanedNodes, Hosts: hosts}, nil
|
||||
}
|
||||
@@ -751,6 +754,7 @@ func (ctrl *ApplicationController) Run(ctx context.Context, statusProcessors int
|
||||
// needs to be the qualified name of the application, i.e. <namespace>/<name>.
|
||||
func (ctrl *ApplicationController) requestAppRefresh(appName string, compareWith *CompareWith, after *time.Duration) {
|
||||
key := ctrl.toAppKey(appName)
|
||||
|
||||
if compareWith != nil && after != nil {
|
||||
ctrl.appComparisonTypeRefreshQueue.AddAfter(fmt.Sprintf("%s/%d", key, compareWith), *after)
|
||||
} else {
|
||||
@@ -1313,7 +1317,6 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo
|
||||
}
|
||||
ctrl.appRefreshQueue.Done(appKey)
|
||||
}()
|
||||
|
||||
obj, exists, err := ctrl.appInformer.GetIndexer().GetByKey(appKey.(string))
|
||||
if err != nil {
|
||||
log.Errorf("Failed to get application '%s' from informer index: %+v", appKey, err)
|
||||
@@ -1334,9 +1337,9 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo
|
||||
if !needRefresh {
|
||||
return
|
||||
}
|
||||
|
||||
app := origApp.DeepCopy()
|
||||
logCtx := log.WithFields(log.Fields{"application": app.QualifiedName()})
|
||||
|
||||
startTime := time.Now()
|
||||
defer func() {
|
||||
reconcileDuration := time.Since(startTime)
|
||||
@@ -1357,7 +1360,7 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo
|
||||
} else {
|
||||
var tree *appv1.ApplicationTree
|
||||
if tree, err = ctrl.getResourceTree(app, managedResources); err == nil {
|
||||
app.Status.Summary = tree.GetSummary()
|
||||
app.Status.Summary = tree.GetSummary(app)
|
||||
if err := ctrl.cache.SetAppResourcesTree(app.InstanceName(ctrl.namespace), tree); err != nil {
|
||||
logCtx.Errorf("Failed to cache resources tree: %v", err)
|
||||
return
|
||||
@@ -1389,15 +1392,38 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo
|
||||
localManifests = opState.Operation.Sync.Manifests
|
||||
}
|
||||
|
||||
revision := app.Spec.Source.TargetRevision
|
||||
if comparisonLevel == CompareWithRecent {
|
||||
revision = app.Status.Sync.Revision
|
||||
}
|
||||
revisions := make([]string, 0)
|
||||
sources := make([]appv1.ApplicationSource, 0)
|
||||
|
||||
hasMultipleSources := app.Spec.HasMultipleSources()
|
||||
|
||||
// If we have multiple sources, we use all the sources under `sources` field and ignore source under `source` field.
|
||||
// else we use the source under the source field.
|
||||
if hasMultipleSources {
|
||||
for _, source := range app.Spec.Sources {
|
||||
// We do not perform any filtering of duplicate sources.
|
||||
// Argo CD will apply and update the resources generated from the sources automatically
|
||||
// based on the order in which manifests were generated
|
||||
sources = append(sources, source)
|
||||
revisions = append(revisions, source.TargetRevision)
|
||||
}
|
||||
if comparisonLevel == CompareWithRecent {
|
||||
revisions = app.Status.Sync.Revisions
|
||||
}
|
||||
} else {
|
||||
revision := app.Spec.GetSource().TargetRevision
|
||||
if comparisonLevel == CompareWithRecent {
|
||||
revision = app.Status.Sync.Revision
|
||||
}
|
||||
revisions = append(revisions, revision)
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
}
|
||||
now := metav1.Now()
|
||||
compareResult := ctrl.appStateManager.CompareAppState(app, project, revision, app.Spec.Source,
|
||||
|
||||
compareResult := ctrl.appStateManager.CompareAppState(app, project, revisions, sources,
|
||||
refreshType == appv1.RefreshTypeHard,
|
||||
comparisonLevel == CompareWithLatestForceResolve, localManifests)
|
||||
comparisonLevel == CompareWithLatestForceResolve, localManifests, hasMultipleSources)
|
||||
|
||||
for k, v := range compareResult.timings {
|
||||
logCtx = logCtx.WithField(k, v.Milliseconds())
|
||||
}
|
||||
@@ -1408,7 +1434,7 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo
|
||||
if err != nil {
|
||||
logCtx.Errorf("Failed to cache app resources: %v", err)
|
||||
} else {
|
||||
app.Status.Summary = tree.GetSummary()
|
||||
app.Status.Summary = tree.GetSummary(app)
|
||||
}
|
||||
|
||||
if project.Spec.SyncWindows.Matches(app).CanSync(false) {
|
||||
@@ -1438,6 +1464,7 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo
|
||||
return resourceStatusKey(app.Status.Resources[i]) < resourceStatusKey(app.Status.Resources[j])
|
||||
})
|
||||
app.Status.SourceType = compareResult.appSourceType
|
||||
app.Status.SourceTypes = compareResult.appSourceTypes
|
||||
ctrl.persistAppStatus(origApp, &app.Status)
|
||||
return
|
||||
}
|
||||
@@ -1463,27 +1490,34 @@ func (ctrl *ApplicationController) needRefreshAppStatus(app *appv1.Application,
|
||||
// user requested app refresh.
|
||||
refreshType = requestedType
|
||||
reason = fmt.Sprintf("%s refresh requested", refreshType)
|
||||
} else if !app.Spec.Source.Equals(app.Status.Sync.ComparedTo.Source) {
|
||||
reason = "spec.source differs"
|
||||
compareWith = CompareWithLatestForceResolve
|
||||
} else if hardExpired || softExpired {
|
||||
// The commented line below mysteriously crashes if app.Status.ReconciledAt is nil
|
||||
// reason = fmt.Sprintf("comparison expired. reconciledAt: %v, expiry: %v", app.Status.ReconciledAt, statusRefreshTimeout)
|
||||
//TODO: find existing Golang bug or create a new one
|
||||
reconciledAtStr := "never"
|
||||
if app.Status.ReconciledAt != nil {
|
||||
reconciledAtStr = app.Status.ReconciledAt.String()
|
||||
} else {
|
||||
if app.Spec.HasMultipleSources() {
|
||||
if (len(app.Spec.Sources) != len(app.Status.Sync.ComparedTo.Sources)) || !reflect.DeepEqual(app.Spec.Sources, app.Status.Sync.ComparedTo.Sources) {
|
||||
reason = "atleast one of the spec.sources differs"
|
||||
compareWith = CompareWithLatestForceResolve
|
||||
}
|
||||
} else if !app.Spec.Source.Equals(app.Status.Sync.ComparedTo.Source) {
|
||||
reason = "spec.source differs"
|
||||
compareWith = CompareWithLatestForceResolve
|
||||
} else if hardExpired || softExpired {
|
||||
// The commented line below mysteriously crashes if app.Status.ReconciledAt is nil
|
||||
// reason = fmt.Sprintf("comparison expired. reconciledAt: %v, expiry: %v", app.Status.ReconciledAt, statusRefreshTimeout)
|
||||
//TODO: find existing Golang bug or create a new one
|
||||
reconciledAtStr := "never"
|
||||
if app.Status.ReconciledAt != nil {
|
||||
reconciledAtStr = app.Status.ReconciledAt.String()
|
||||
}
|
||||
reason = fmt.Sprintf("comparison expired, requesting refresh. reconciledAt: %v, expiry: %v", reconciledAtStr, statusRefreshTimeout)
|
||||
if hardExpired {
|
||||
reason = fmt.Sprintf("comparison expired, requesting hard refresh. reconciledAt: %v, expiry: %v", reconciledAtStr, statusHardRefreshTimeout)
|
||||
refreshType = appv1.RefreshTypeHard
|
||||
}
|
||||
} else if !app.Spec.Destination.Equals(app.Status.Sync.ComparedTo.Destination) {
|
||||
reason = "spec.destination differs"
|
||||
} else if requested, level := ctrl.isRefreshRequested(app.QualifiedName()); requested {
|
||||
compareWith = level
|
||||
reason = "controller refresh requested"
|
||||
}
|
||||
reason = fmt.Sprintf("comparison expired, requesting refresh. reconciledAt: %v, expiry: %v", reconciledAtStr, statusRefreshTimeout)
|
||||
if hardExpired {
|
||||
reason = fmt.Sprintf("comparison expired, requesting hard refresh. reconciledAt: %v, expiry: %v", reconciledAtStr, statusHardRefreshTimeout)
|
||||
refreshType = appv1.RefreshTypeHard
|
||||
}
|
||||
} else if !app.Spec.Destination.Equals(app.Status.Sync.ComparedTo.Destination) {
|
||||
reason = "spec.destination differs"
|
||||
} else if requested, level := ctrl.isRefreshRequested(app.QualifiedName()); requested {
|
||||
compareWith = level
|
||||
reason = "controller refresh requested"
|
||||
}
|
||||
|
||||
if reason != "" {
|
||||
@@ -1497,17 +1531,7 @@ func (ctrl *ApplicationController) refreshAppConditions(app *appv1.Application)
|
||||
errorConditions := make([]appv1.ApplicationCondition, 0)
|
||||
proj, err := ctrl.getAppProj(app)
|
||||
if err != nil {
|
||||
if apierr.IsNotFound(err) {
|
||||
errorConditions = append(errorConditions, appv1.ApplicationCondition{
|
||||
Type: appv1.ApplicationConditionInvalidSpecError,
|
||||
Message: fmt.Sprintf("Application referencing project %s which does not exist", app.Spec.Project),
|
||||
})
|
||||
} else {
|
||||
errorConditions = append(errorConditions, appv1.ApplicationCondition{
|
||||
Type: appv1.ApplicationConditionUnknownError,
|
||||
Message: err.Error(),
|
||||
})
|
||||
}
|
||||
errorConditions = append(errorConditions, ctrl.projectErrorToCondition(err, app))
|
||||
} else {
|
||||
specConditions, err := argo.ValidatePermissions(context.Background(), &app.Spec, proj, ctrl.db)
|
||||
if err != nil {
|
||||
@@ -1530,7 +1554,9 @@ func (ctrl *ApplicationController) refreshAppConditions(app *appv1.Application)
|
||||
func (ctrl *ApplicationController) normalizeApplication(orig, app *appv1.Application) {
|
||||
logCtx := log.WithFields(log.Fields{"application": app.QualifiedName()})
|
||||
app.Spec = *argo.NormalizeApplicationSpec(&app.Spec)
|
||||
|
||||
patch, modified, err := diff.CreateTwoWayMergePatch(orig, app, appv1.Application{})
|
||||
|
||||
if err != nil {
|
||||
logCtx.Errorf("error constructing app spec patch: %v", err)
|
||||
} else if modified {
|
||||
@@ -1574,7 +1600,6 @@ func (ctrl *ApplicationController) persistAppStatus(orig *appv1.Application, new
|
||||
logCtx.Infof("No status changes. Skipping patch")
|
||||
return
|
||||
}
|
||||
logCtx.Debugf("patch: %s", string(patch))
|
||||
appClient := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(orig.Namespace)
|
||||
_, err = appClient.Patch(context.Background(), orig.Name, types.MergePatchType, patch, metav1.PatchOptions{})
|
||||
if err != nil {
|
||||
@@ -1590,6 +1615,7 @@ func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus *
|
||||
return nil
|
||||
}
|
||||
logCtx := log.WithFields(log.Fields{"application": app.QualifiedName()})
|
||||
|
||||
if app.Operation != nil {
|
||||
logCtx.Infof("Skipping auto-sync: another operation is in progress")
|
||||
return nil
|
||||
@@ -1621,13 +1647,15 @@ func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus *
|
||||
}
|
||||
|
||||
desiredCommitSHA := syncStatus.Revision
|
||||
alreadyAttempted, attemptPhase := alreadyAttemptedSync(app, desiredCommitSHA)
|
||||
desiredCommitSHAsMS := syncStatus.Revisions
|
||||
alreadyAttempted, attemptPhase := alreadyAttemptedSync(app, desiredCommitSHA, desiredCommitSHAsMS, app.Spec.HasMultipleSources())
|
||||
selfHeal := app.Spec.SyncPolicy.Automated.SelfHeal
|
||||
op := appv1.Operation{
|
||||
Sync: &appv1.SyncOperation{
|
||||
Revision: desiredCommitSHA,
|
||||
Prune: app.Spec.SyncPolicy.Automated.Prune,
|
||||
SyncOptions: app.Spec.SyncPolicy.SyncOptions,
|
||||
Revisions: desiredCommitSHAsMS,
|
||||
},
|
||||
InitiatedBy: appv1.OperationInitiator{Automated: true},
|
||||
Retry: appv1.RetryStrategy{Limit: 5},
|
||||
@@ -1679,7 +1707,6 @@ func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus *
|
||||
return &appv1.ApplicationCondition{Type: appv1.ApplicationConditionSyncError, Message: message}
|
||||
}
|
||||
}
|
||||
|
||||
appIf := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(app.Namespace)
|
||||
_, err := argo.SetAppOperation(appIf, app.Name, &op)
|
||||
if err != nil {
|
||||
@@ -1694,20 +1721,41 @@ func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus *
|
||||
|
||||
// alreadyAttemptedSync returns whether or not the most recent sync was performed against the
|
||||
// commitSHA and with the same app source config which are currently set in the app
|
||||
func alreadyAttemptedSync(app *appv1.Application, commitSHA string) (bool, synccommon.OperationPhase) {
|
||||
func alreadyAttemptedSync(app *appv1.Application, commitSHA string, commitSHAsMS []string, hasMultipleSources bool) (bool, synccommon.OperationPhase) {
|
||||
if app.Status.OperationState == nil || app.Status.OperationState.Operation.Sync == nil || app.Status.OperationState.SyncResult == nil {
|
||||
return false, ""
|
||||
}
|
||||
if app.Status.OperationState.SyncResult.Revision != commitSHA {
|
||||
return false, ""
|
||||
if hasMultipleSources {
|
||||
if !reflect.DeepEqual(app.Status.OperationState.SyncResult.Revisions, commitSHAsMS) {
|
||||
return false, ""
|
||||
}
|
||||
} else {
|
||||
if app.Status.OperationState.SyncResult.Revision != commitSHA {
|
||||
return false, ""
|
||||
}
|
||||
}
|
||||
|
||||
if hasMultipleSources {
|
||||
// Ignore differences in target revision, since we already just verified commitSHAs are equal,
|
||||
// and we do not want to trigger auto-sync due to things like HEAD != master
|
||||
specSources := app.Spec.Sources.DeepCopy()
|
||||
syncSources := app.Status.OperationState.SyncResult.Sources.DeepCopy()
|
||||
for _, source := range specSources {
|
||||
source.TargetRevision = ""
|
||||
}
|
||||
for _, source := range syncSources {
|
||||
source.TargetRevision = ""
|
||||
}
|
||||
return reflect.DeepEqual(app.Spec.Sources, app.Status.OperationState.SyncResult.Sources), app.Status.OperationState.Phase
|
||||
} else {
|
||||
// Ignore differences in target revision, since we already just verified commitSHAs are equal,
|
||||
// and we do not want to trigger auto-sync due to things like HEAD != master
|
||||
specSource := app.Spec.Source.DeepCopy()
|
||||
specSource.TargetRevision = ""
|
||||
syncResSource := app.Status.OperationState.SyncResult.Source.DeepCopy()
|
||||
syncResSource.TargetRevision = ""
|
||||
return reflect.DeepEqual(app.Spec.GetSource(), app.Status.OperationState.SyncResult.Source), app.Status.OperationState.Phase
|
||||
}
|
||||
// Ignore differences in target revision, since we already just verified commitSHAs are equal,
|
||||
// and we do not want to trigger auto-sync due to things like HEAD != master
|
||||
specSource := app.Spec.Source.DeepCopy()
|
||||
specSource.TargetRevision = ""
|
||||
syncResSource := app.Status.OperationState.SyncResult.Source.DeepCopy()
|
||||
syncResSource.TargetRevision = ""
|
||||
return reflect.DeepEqual(app.Spec.Source, app.Status.OperationState.SyncResult.Source), app.Status.OperationState.Phase
|
||||
}
|
||||
|
||||
func (ctrl *ApplicationController) shouldSelfHeal(app *appv1.Application) (bool, time.Duration) {
|
||||
@@ -1729,6 +1777,13 @@ func (ctrl *ApplicationController) canProcessApp(obj interface{}) bool {
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
// Only process given app if it exists in a watched namespace, or in the
|
||||
// control plane's namespace.
|
||||
if app.Namespace != ctrl.namespace && !glob.MatchStringInList(ctrl.applicationNamespaces, app.Namespace, false) {
|
||||
return false
|
||||
}
|
||||
|
||||
if ctrl.clusterFilter != nil {
|
||||
cluster, err := ctrl.db.GetCluster(context.Background(), app.Spec.Destination.Server)
|
||||
if err != nil {
|
||||
@@ -1737,12 +1792,6 @@ func (ctrl *ApplicationController) canProcessApp(obj interface{}) bool {
|
||||
return ctrl.clusterFilter(cluster)
|
||||
}
|
||||
|
||||
// Only process given app if it exists in a watched namespace, or in the
|
||||
// control plane's namespace.
|
||||
if app.Namespace != ctrl.namespace && !glob.MatchStringInList(ctrl.applicationNamespaces, app.Namespace, false) {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -1798,7 +1847,7 @@ func (ctrl *ApplicationController) newApplicationInformerAndLister() (cache.Shar
|
||||
// If the application is not allowed to use the project,
|
||||
// log an error.
|
||||
if _, err := ctrl.getAppProj(app); err != nil {
|
||||
ctrl.setAppCondition(app, appv1.ApplicationCondition{Type: appv1.ApplicationConditionUnknownError, Message: err.Error()})
|
||||
ctrl.setAppCondition(app, ctrl.projectErrorToCondition(err, app))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1869,6 +1918,19 @@ func (ctrl *ApplicationController) newApplicationInformerAndLister() (cache.Shar
|
||||
return informer, lister
|
||||
}
|
||||
|
||||
func (ctrl *ApplicationController) projectErrorToCondition(err error, app *appv1.Application) appv1.ApplicationCondition {
|
||||
var condition appv1.ApplicationCondition
|
||||
if apierr.IsNotFound(err) {
|
||||
condition = appv1.ApplicationCondition{
|
||||
Type: appv1.ApplicationConditionInvalidSpecError,
|
||||
Message: fmt.Sprintf("Application referencing project %s which does not exist", app.Spec.Project),
|
||||
}
|
||||
} else {
|
||||
condition = appv1.ApplicationCondition{Type: appv1.ApplicationConditionUnknownError, Message: err.Error()}
|
||||
}
|
||||
return condition
|
||||
}
|
||||
|
||||
func (ctrl *ApplicationController) RegisterClusterSecretUpdater(ctx context.Context) {
|
||||
updater := NewClusterInfoUpdater(ctrl.stateCache, ctrl.db, ctrl.appLister.Applications(""), ctrl.cache, ctrl.clusterFilter, ctrl.getAppProj, ctrl.namespace)
|
||||
go updater.Run(ctx)
|
||||
|
||||
@@ -865,7 +865,7 @@ func TestNeedRefreshAppStatus(t *testing.T) {
|
||||
app.Status.Sync = argoappv1.SyncStatus{
|
||||
Status: argoappv1.SyncStatusCodeSynced,
|
||||
ComparedTo: argoappv1.ComparedTo{
|
||||
Source: app.Spec.Source,
|
||||
Source: app.Spec.GetSource(),
|
||||
Destination: app.Spec.Destination,
|
||||
},
|
||||
}
|
||||
@@ -909,7 +909,7 @@ func TestNeedRefreshAppStatus(t *testing.T) {
|
||||
app.Status.Sync = argoappv1.SyncStatus{
|
||||
Status: argoappv1.SyncStatusCodeSynced,
|
||||
ComparedTo: argoappv1.ComparedTo{
|
||||
Source: app.Spec.Source,
|
||||
Source: app.Spec.GetSource(),
|
||||
Destination: app.Spec.Destination,
|
||||
},
|
||||
}
|
||||
@@ -1012,7 +1012,7 @@ func TestUpdateReconciledAt(t *testing.T) {
|
||||
app := newFakeApp()
|
||||
reconciledAt := metav1.NewTime(time.Now().Add(-1 * time.Second))
|
||||
app.Status = argoappv1.ApplicationStatus{ReconciledAt: &reconciledAt}
|
||||
app.Status.Sync = argoappv1.SyncStatus{ComparedTo: argoappv1.ComparedTo{Source: app.Spec.Source, Destination: app.Spec.Destination}}
|
||||
app.Status.Sync = argoappv1.SyncStatus{ComparedTo: argoappv1.ComparedTo{Source: app.Spec.GetSource(), Destination: app.Spec.Destination}}
|
||||
ctrl := newFakeController(&fakeData{
|
||||
apps: []runtime.Object{app, &defaultProj},
|
||||
manifestResponse: &apiclient.ManifestResponse{
|
||||
@@ -1068,6 +1068,34 @@ func TestUpdateReconciledAt(t *testing.T) {
|
||||
|
||||
}
|
||||
|
||||
func TestProjectErrorToCondition(t *testing.T) {
|
||||
app := newFakeApp()
|
||||
app.Spec.Project = "wrong project"
|
||||
ctrl := newFakeController(&fakeData{
|
||||
apps: []runtime.Object{app, &defaultProj},
|
||||
manifestResponse: &apiclient.ManifestResponse{
|
||||
Manifests: []string{},
|
||||
Namespace: test.FakeDestNamespace,
|
||||
Server: test.FakeClusterURL,
|
||||
Revision: "abc123",
|
||||
},
|
||||
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
|
||||
})
|
||||
key, _ := cache.MetaNamespaceKeyFunc(app)
|
||||
ctrl.appRefreshQueue.Add(key)
|
||||
ctrl.requestAppRefresh(app.Name, CompareWithRecent.Pointer(), nil)
|
||||
|
||||
ctrl.processAppRefreshQueueItem()
|
||||
|
||||
obj, ok, err := ctrl.appInformer.GetIndexer().GetByKey(key)
|
||||
assert.True(t, ok)
|
||||
assert.NoError(t, err)
|
||||
updatedApp := obj.(*argoappv1.Application)
|
||||
assert.Equal(t, argoappv1.ApplicationConditionInvalidSpecError, updatedApp.Status.Conditions[0].Type)
|
||||
assert.Equal(t, "Application referencing project wrong project which does not exist", updatedApp.Status.Conditions[0].Message)
|
||||
assert.Equal(t, argoappv1.ApplicationConditionInvalidSpecError, updatedApp.Status.Conditions[0].Type)
|
||||
}
|
||||
|
||||
func TestFinalizeProjectDeletion_HasApplications(t *testing.T) {
|
||||
app := newFakeApp()
|
||||
proj := &argoappv1.AppProject{ObjectMeta: metav1.ObjectMeta{Name: "default", Namespace: test.FakeArgoCDNamespace}}
|
||||
@@ -1345,3 +1373,31 @@ func TestToAppKey(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_canProcessApp(t *testing.T) {
|
||||
app := newFakeApp()
|
||||
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}})
|
||||
ctrl.applicationNamespaces = []string{"good"}
|
||||
t.Run("without cluster filter, good namespace", func(t *testing.T) {
|
||||
app.Namespace = "good"
|
||||
canProcess := ctrl.canProcessApp(app)
|
||||
assert.True(t, canProcess)
|
||||
})
|
||||
t.Run("without cluster filter, bad namespace", func(t *testing.T) {
|
||||
app.Namespace = "bad"
|
||||
canProcess := ctrl.canProcessApp(app)
|
||||
assert.False(t, canProcess)
|
||||
})
|
||||
t.Run("with cluster filter, good namespace", func(t *testing.T) {
|
||||
app.Namespace = "good"
|
||||
ctrl.clusterFilter = func(_ *argoappv1.Cluster) bool { return true }
|
||||
canProcess := ctrl.canProcessApp(app)
|
||||
assert.True(t, canProcess)
|
||||
})
|
||||
t.Run("with cluster filter, bad namespace", func(t *testing.T) {
|
||||
app.Namespace = "bad"
|
||||
ctrl.clusterFilter = func(_ *argoappv1.Cluster) bool { return true }
|
||||
canProcess := ctrl.canProcessApp(app)
|
||||
assert.False(t, canProcess)
|
||||
})
|
||||
}
|
||||
|
||||
11
controller/cache/cache.go
vendored
11
controller/cache/cache.go
vendored
@@ -220,10 +220,10 @@ func asResourceNode(r *clustercache.Resource) appv1.ResourceNode {
|
||||
gv = schema.GroupVersion{}
|
||||
}
|
||||
parentRefs := make([]appv1.ResourceRef, len(r.OwnerRefs))
|
||||
for _, ownerRef := range r.OwnerRefs {
|
||||
for i, ownerRef := range r.OwnerRefs {
|
||||
ownerGvk := schema.FromAPIVersionAndKind(ownerRef.APIVersion, ownerRef.Kind)
|
||||
ownerKey := kube.NewResourceKey(ownerGvk.Group, ownerRef.Kind, r.Ref.Namespace, ownerRef.Name)
|
||||
parentRefs[0] = appv1.ResourceRef{Name: ownerRef.Name, Kind: ownerKey.Kind, Namespace: r.Ref.Namespace, Group: ownerKey.Group, UID: string(ownerRef.UID)}
|
||||
parentRefs[i] = appv1.ResourceRef{Name: ownerRef.Name, Kind: ownerKey.Kind, Namespace: r.Ref.Namespace, Group: ownerKey.Group, UID: string(ownerRef.UID)}
|
||||
}
|
||||
var resHealth *appv1.HealthStatus
|
||||
resourceInfo := resInfo(r)
|
||||
@@ -389,6 +389,11 @@ func (c *liveStateCache) getCluster(server string) (clustercache.ClusterCache, e
|
||||
return nil, fmt.Errorf("controller is configured to ignore cluster %s", cluster.Server)
|
||||
}
|
||||
|
||||
resourceCustomLabels, err := c.settingsMgr.GetResourceCustomLabels()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error getting custom label: %w", err)
|
||||
}
|
||||
|
||||
clusterCacheOpts := []clustercache.UpdateSettingsFunc{
|
||||
clustercache.SetListSemaphore(semaphore.NewWeighted(clusterCacheListSemaphoreSize)),
|
||||
clustercache.SetListPageSize(clusterCacheListPageSize),
|
||||
@@ -400,7 +405,7 @@ func (c *liveStateCache) getCluster(server string) (clustercache.ClusterCache, e
|
||||
clustercache.SetClusterResources(cluster.ClusterResources),
|
||||
clustercache.SetPopulateResourceInfoHandler(func(un *unstructured.Unstructured, isRoot bool) (interface{}, bool) {
|
||||
res := &ResourceInfo{}
|
||||
populateNodeInfo(un, res)
|
||||
populateNodeInfo(un, res, resourceCustomLabels)
|
||||
c.lock.RLock()
|
||||
cacheSettings := c.cacheSettings
|
||||
c.lock.RUnlock()
|
||||
|
||||
55
controller/cache/cache_test.go
vendored
55
controller/cache/cache_test.go
vendored
@@ -6,10 +6,11 @@ import (
|
||||
"net/url"
|
||||
"testing"
|
||||
|
||||
apierr "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"k8s.io/api/core/v1"
|
||||
apierr "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/cache"
|
||||
"github.com/argoproj/gitops-engine/pkg/cache/mocks"
|
||||
@@ -153,3 +154,51 @@ func TestIsRetryableError(t *testing.T) {
|
||||
assert.True(t, isRetryableError(connectionReset))
|
||||
})
|
||||
}
|
||||
|
||||
func Test_asResourceNode_owner_refs(t *testing.T) {
|
||||
resNode := asResourceNode(&cache.Resource{
|
||||
ResourceVersion: "",
|
||||
Ref: v1.ObjectReference{
|
||||
APIVersion: "v1",
|
||||
},
|
||||
OwnerRefs: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: "v1",
|
||||
Kind: "ConfigMap",
|
||||
Name: "cm-1",
|
||||
},
|
||||
{
|
||||
APIVersion: "v1",
|
||||
Kind: "ConfigMap",
|
||||
Name: "cm-2",
|
||||
},
|
||||
},
|
||||
CreationTimestamp: nil,
|
||||
Info: nil,
|
||||
Resource: nil,
|
||||
})
|
||||
expected := appv1.ResourceNode{
|
||||
ResourceRef: appv1.ResourceRef{
|
||||
Version: "v1",
|
||||
},
|
||||
ParentRefs: []appv1.ResourceRef{
|
||||
{
|
||||
Group: "",
|
||||
Kind: "ConfigMap",
|
||||
Name: "cm-1",
|
||||
},
|
||||
{
|
||||
Group: "",
|
||||
Kind: "ConfigMap",
|
||||
Name: "cm-2",
|
||||
},
|
||||
},
|
||||
Info: nil,
|
||||
NetworkingInfo: nil,
|
||||
ResourceVersion: "",
|
||||
Images: nil,
|
||||
Health: nil,
|
||||
CreatedAt: nil,
|
||||
}
|
||||
assert.Equal(t, expected, resNode)
|
||||
}
|
||||
|
||||
11
controller/cache/info.go
vendored
11
controller/cache/info.go
vendored
@@ -19,12 +19,21 @@ import (
|
||||
"github.com/argoproj/argo-cd/v2/util/resource"
|
||||
)
|
||||
|
||||
func populateNodeInfo(un *unstructured.Unstructured, res *ResourceInfo) {
|
||||
func populateNodeInfo(un *unstructured.Unstructured, res *ResourceInfo, customLabels []string) {
|
||||
gvk := un.GroupVersionKind()
|
||||
revision := resource.GetRevision(un)
|
||||
if revision > 0 {
|
||||
res.Info = append(res.Info, v1alpha1.InfoItem{Name: "Revision", Value: fmt.Sprintf("Rev:%v", revision)})
|
||||
}
|
||||
if len(customLabels) > 0 {
|
||||
if labels := un.GetLabels(); labels != nil {
|
||||
for _, customLabel := range customLabels {
|
||||
if value, ok := labels[customLabel]; ok {
|
||||
res.Info = append(res.Info, v1alpha1.InfoItem{Name: customLabel, Value: value})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
switch gvk.Group {
|
||||
case "":
|
||||
switch gvk.Kind {
|
||||
|
||||
76
controller/cache/info_test.go
vendored
76
controller/cache/info_test.go
vendored
@@ -271,7 +271,7 @@ func TestGetPodInfo(t *testing.T) {
|
||||
`)
|
||||
|
||||
info := &ResourceInfo{}
|
||||
populateNodeInfo(pod, info)
|
||||
populateNodeInfo(pod, info, []string{})
|
||||
assert.Equal(t, []v1alpha1.InfoItem{
|
||||
{Name: "Node", Value: "minikube"},
|
||||
{Name: "Containers", Value: "0/1"},
|
||||
@@ -302,7 +302,7 @@ status:
|
||||
`)
|
||||
|
||||
info := &ResourceInfo{}
|
||||
populateNodeInfo(node, info)
|
||||
populateNodeInfo(node, info, []string{})
|
||||
assert.Equal(t, &NodeInfo{
|
||||
Name: "minikube",
|
||||
Capacity: v1.ResourceList{v1.ResourceMemory: resource.MustParse("6091320Ki"), v1.ResourceCPU: resource.MustParse("6")},
|
||||
@@ -312,7 +312,7 @@ status:
|
||||
|
||||
func TestGetServiceInfo(t *testing.T) {
|
||||
info := &ResourceInfo{}
|
||||
populateNodeInfo(testService, info)
|
||||
populateNodeInfo(testService, info, []string{})
|
||||
assert.Equal(t, 0, len(info.Info))
|
||||
assert.Equal(t, &v1alpha1.ResourceNetworkingInfo{
|
||||
TargetLabels: map[string]string{"app": "guestbook"},
|
||||
@@ -322,7 +322,7 @@ func TestGetServiceInfo(t *testing.T) {
|
||||
|
||||
func TestGetLinkAnnotatedServiceInfo(t *testing.T) {
|
||||
info := &ResourceInfo{}
|
||||
populateNodeInfo(testLinkAnnotatedService, info)
|
||||
populateNodeInfo(testLinkAnnotatedService, info, []string{})
|
||||
assert.Equal(t, 0, len(info.Info))
|
||||
assert.Equal(t, &v1alpha1.ResourceNetworkingInfo{
|
||||
TargetLabels: map[string]string{"app": "guestbook"},
|
||||
@@ -333,7 +333,7 @@ func TestGetLinkAnnotatedServiceInfo(t *testing.T) {
|
||||
|
||||
func TestGetIstioVirtualServiceInfo(t *testing.T) {
|
||||
info := &ResourceInfo{}
|
||||
populateNodeInfo(testIstioVirtualService, info)
|
||||
populateNodeInfo(testIstioVirtualService, info, []string{})
|
||||
assert.Equal(t, 0, len(info.Info))
|
||||
require.NotNil(t, info.NetworkingInfo)
|
||||
require.NotNil(t, info.NetworkingInfo.TargetRefs)
|
||||
@@ -363,7 +363,7 @@ func TestGetIngressInfo(t *testing.T) {
|
||||
}
|
||||
for _, tc := range tests {
|
||||
info := &ResourceInfo{}
|
||||
populateNodeInfo(tc.Ingress, info)
|
||||
populateNodeInfo(tc.Ingress, info, []string{})
|
||||
assert.Equal(t, 0, len(info.Info))
|
||||
sort.Slice(info.NetworkingInfo.TargetRefs, func(i, j int) bool {
|
||||
return strings.Compare(info.NetworkingInfo.TargetRefs[j].Name, info.NetworkingInfo.TargetRefs[i].Name) < 0
|
||||
@@ -388,7 +388,7 @@ func TestGetIngressInfo(t *testing.T) {
|
||||
|
||||
func TestGetLinkAnnotatedIngressInfo(t *testing.T) {
|
||||
info := &ResourceInfo{}
|
||||
populateNodeInfo(testLinkAnnotatedIngress, info)
|
||||
populateNodeInfo(testLinkAnnotatedIngress, info, []string{})
|
||||
assert.Equal(t, 0, len(info.Info))
|
||||
sort.Slice(info.NetworkingInfo.TargetRefs, func(i, j int) bool {
|
||||
return strings.Compare(info.NetworkingInfo.TargetRefs[j].Name, info.NetworkingInfo.TargetRefs[i].Name) < 0
|
||||
@@ -412,7 +412,7 @@ func TestGetLinkAnnotatedIngressInfo(t *testing.T) {
|
||||
|
||||
func TestGetIngressInfoWildCardPath(t *testing.T) {
|
||||
info := &ResourceInfo{}
|
||||
populateNodeInfo(testIngressWildCardPath, info)
|
||||
populateNodeInfo(testIngressWildCardPath, info, []string{})
|
||||
assert.Equal(t, 0, len(info.Info))
|
||||
sort.Slice(info.NetworkingInfo.TargetRefs, func(i, j int) bool {
|
||||
return strings.Compare(info.NetworkingInfo.TargetRefs[j].Name, info.NetworkingInfo.TargetRefs[i].Name) < 0
|
||||
@@ -436,7 +436,7 @@ func TestGetIngressInfoWildCardPath(t *testing.T) {
|
||||
|
||||
func TestGetIngressInfoWithoutTls(t *testing.T) {
|
||||
info := &ResourceInfo{}
|
||||
populateNodeInfo(testIngressWithoutTls, info)
|
||||
populateNodeInfo(testIngressWithoutTls, info, []string{})
|
||||
assert.Equal(t, 0, len(info.Info))
|
||||
sort.Slice(info.NetworkingInfo.TargetRefs, func(i, j int) bool {
|
||||
return strings.Compare(info.NetworkingInfo.TargetRefs[j].Name, info.NetworkingInfo.TargetRefs[i].Name) < 0
|
||||
@@ -481,7 +481,7 @@ func TestGetIngressInfoWithHost(t *testing.T) {
|
||||
- ip: 107.178.210.11`)
|
||||
|
||||
info := &ResourceInfo{}
|
||||
populateNodeInfo(ingress, info)
|
||||
populateNodeInfo(ingress, info, []string{})
|
||||
|
||||
assert.Equal(t, &v1alpha1.ResourceNetworkingInfo{
|
||||
Ingress: []v1.LoadBalancerIngress{{IP: "107.178.210.11"}},
|
||||
@@ -514,7 +514,7 @@ func TestGetIngressInfoNoHost(t *testing.T) {
|
||||
`)
|
||||
|
||||
info := &ResourceInfo{}
|
||||
populateNodeInfo(ingress, info)
|
||||
populateNodeInfo(ingress, info, []string{})
|
||||
|
||||
assert.Equal(t, &v1alpha1.ResourceNetworkingInfo{
|
||||
TargetRefs: []v1alpha1.ResourceRef{{
|
||||
@@ -549,7 +549,7 @@ func TestExternalUrlWithSubPath(t *testing.T) {
|
||||
- ip: 107.178.210.11`)
|
||||
|
||||
info := &ResourceInfo{}
|
||||
populateNodeInfo(ingress, info)
|
||||
populateNodeInfo(ingress, info, []string{})
|
||||
|
||||
expectedExternalUrls := []string{"https://107.178.210.11/my/sub/path/"}
|
||||
assert.Equal(t, expectedExternalUrls, info.NetworkingInfo.ExternalURLs)
|
||||
@@ -585,7 +585,7 @@ func TestExternalUrlWithMultipleSubPaths(t *testing.T) {
|
||||
- ip: 107.178.210.11`)
|
||||
|
||||
info := &ResourceInfo{}
|
||||
populateNodeInfo(ingress, info)
|
||||
populateNodeInfo(ingress, info, []string{})
|
||||
|
||||
expectedExternalUrls := []string{"https://helm-guestbook.com/my/sub/path/", "https://helm-guestbook.com/my/sub/path/2", "https://helm-guestbook.com"}
|
||||
actualURLs := info.NetworkingInfo.ExternalURLs
|
||||
@@ -615,7 +615,7 @@ func TestExternalUrlWithNoSubPath(t *testing.T) {
|
||||
- ip: 107.178.210.11`)
|
||||
|
||||
info := &ResourceInfo{}
|
||||
populateNodeInfo(ingress, info)
|
||||
populateNodeInfo(ingress, info, []string{})
|
||||
|
||||
expectedExternalUrls := []string{"https://107.178.210.11"}
|
||||
assert.Equal(t, expectedExternalUrls, info.NetworkingInfo.ExternalURLs)
|
||||
@@ -643,8 +643,54 @@ func TestExternalUrlWithNetworkingApi(t *testing.T) {
|
||||
- ip: 107.178.210.11`)
|
||||
|
||||
info := &ResourceInfo{}
|
||||
populateNodeInfo(ingress, info)
|
||||
populateNodeInfo(ingress, info, []string{})
|
||||
|
||||
expectedExternalUrls := []string{"https://107.178.210.11"}
|
||||
assert.Equal(t, expectedExternalUrls, info.NetworkingInfo.ExternalURLs)
|
||||
}
|
||||
|
||||
func TestCustomLabel(t *testing.T) {
|
||||
configmap := strToUnstructured(`
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: cm`)
|
||||
|
||||
info := &ResourceInfo{}
|
||||
populateNodeInfo(configmap, info, []string{"my-label"})
|
||||
|
||||
assert.Equal(t, 0, len(info.Info))
|
||||
|
||||
configmap = strToUnstructured(`
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: cm
|
||||
labels:
|
||||
my-label: value`)
|
||||
|
||||
info = &ResourceInfo{}
|
||||
populateNodeInfo(configmap, info, []string{"my-label", "other-label"})
|
||||
|
||||
assert.Equal(t, 1, len(info.Info))
|
||||
assert.Equal(t, "my-label", info.Info[0].Name)
|
||||
assert.Equal(t, "value", info.Info[0].Value)
|
||||
|
||||
configmap = strToUnstructured(`
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: cm
|
||||
labels:
|
||||
my-label: value
|
||||
other-label: value2`)
|
||||
|
||||
info = &ResourceInfo{}
|
||||
populateNodeInfo(configmap, info, []string{"my-label", "other-label"})
|
||||
|
||||
assert.Equal(t, 2, len(info.Info))
|
||||
assert.Equal(t, "my-label", info.Info[0].Name)
|
||||
assert.Equal(t, "value", info.Info[0].Value)
|
||||
assert.Equal(t, "other-label", info.Info[1].Name)
|
||||
assert.Equal(t, "value2", info.Info[1].Value)
|
||||
}
|
||||
|
||||
@@ -117,7 +117,7 @@ func (c *clusterInfoUpdater) updateClusterInfo(cluster appv1.Cluster, info *cach
|
||||
}
|
||||
if info != nil {
|
||||
clusterInfo.ServerVersion = info.K8SVersion
|
||||
clusterInfo.APIVersions = argo.APIResourcesToStrings(info.APIResources, false)
|
||||
clusterInfo.APIVersions = argo.APIResourcesToStrings(info.APIResources, true)
|
||||
if info.LastCacheSyncTime == nil {
|
||||
clusterInfo.ConnectionState.Status = appv1.ConnectionStatusUnknown
|
||||
} else if info.SyncError == nil {
|
||||
|
||||
@@ -381,7 +381,7 @@ func (c *appCollector) collectApps(ch chan<- prometheus.Metric, app *argoappv1.A
|
||||
healthStatus = health.HealthStatusUnknown
|
||||
}
|
||||
|
||||
addGauge(descAppInfo, 1, git.NormalizeGitURL(app.Spec.Source.RepoURL), app.Spec.Destination.Server, app.Spec.Destination.Namespace, string(syncStatus), string(healthStatus), operation)
|
||||
addGauge(descAppInfo, 1, git.NormalizeGitURL(app.Spec.GetSource().RepoURL), app.Spec.Destination.Server, app.Spec.Destination.Namespace, string(syncStatus), string(healthStatus), operation)
|
||||
|
||||
if len(c.appLabels) > 0 {
|
||||
labelValues := []string{}
|
||||
|
||||
@@ -62,7 +62,7 @@ type managedResource struct {
|
||||
|
||||
// AppStateManager defines methods which allow to compare application spec and actual application state.
|
||||
type AppStateManager interface {
|
||||
CompareAppState(app *v1alpha1.Application, project *appv1.AppProject, revision string, source v1alpha1.ApplicationSource, noCache bool, noRevisionCache bool, localObjects []string) *comparisonResult
|
||||
CompareAppState(app *v1alpha1.Application, project *appv1.AppProject, revisions []string, sources []v1alpha1.ApplicationSource, noCache bool, noRevisionCache bool, localObjects []string, hasMultipleSources bool) *comparisonResult
|
||||
SyncAppState(app *v1alpha1.Application, state *v1alpha1.OperationState)
|
||||
}
|
||||
|
||||
@@ -75,6 +75,8 @@ type comparisonResult struct {
|
||||
reconciliationResult sync.ReconciliationResult
|
||||
diffConfig argodiff.DiffConfig
|
||||
appSourceType v1alpha1.ApplicationSourceType
|
||||
// appSourceTypes stores the SourceType for each application source under sources field
|
||||
appSourceTypes []v1alpha1.ApplicationSourceType
|
||||
// timings maps phases of comparison to the duration it took to complete (for statistical purposes)
|
||||
timings map[string]time.Duration
|
||||
diffResultList *diff.DiffResultList
|
||||
@@ -105,7 +107,8 @@ type appStateManager struct {
|
||||
persistResourceHealth bool
|
||||
}
|
||||
|
||||
func (m *appStateManager) getRepoObjs(app *v1alpha1.Application, source v1alpha1.ApplicationSource, appLabelKey, revision string, noCache, noRevisionCache, verifySignature bool, proj *v1alpha1.AppProject) ([]*unstructured.Unstructured, *apiclient.ManifestResponse, error) {
|
||||
func (m *appStateManager) getRepoObjs(app *v1alpha1.Application, sources []v1alpha1.ApplicationSource, appLabelKey string, revisions []string, noCache, noRevisionCache, verifySignature bool, proj *v1alpha1.AppProject) ([]*unstructured.Unstructured, map[*v1alpha1.ApplicationSource]*apiclient.ManifestResponse, error) {
|
||||
|
||||
ts := stats.NewTimingStats()
|
||||
helmRepos, err := m.db.ListHelmRepositories(context.Background())
|
||||
if err != nil {
|
||||
@@ -115,11 +118,7 @@ func (m *appStateManager) getRepoObjs(app *v1alpha1.Application, source v1alpha1
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
ts.AddCheckpoint("helm_ms")
|
||||
repo, err := m.db.GetRepository(context.Background(), source.RepoURL)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
ts.AddCheckpoint("repo_ms")
|
||||
helmRepositoryCredentials, err := m.db.GetAllHelmRepositoryCredentials(context.Background())
|
||||
if err != nil {
|
||||
@@ -129,15 +128,6 @@ func (m *appStateManager) getRepoObjs(app *v1alpha1.Application, source v1alpha1
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
conn, repoClient, err := m.repoClientset.NewRepoServerClient()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
defer io.Close(conn)
|
||||
|
||||
if revision == "" {
|
||||
revision = source.TargetRevision
|
||||
}
|
||||
|
||||
plugins, err := m.settingsMgr.GetConfigManagementPlugins()
|
||||
if err != nil {
|
||||
@@ -158,48 +148,87 @@ func (m *appStateManager) getRepoObjs(app *v1alpha1.Application, source v1alpha1
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
kustomizeOptions, err := kustomizeSettings.GetOptions(app.Spec.Source)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
helmOptions, err := m.settingsMgr.GetHelmSettings()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
ts.AddCheckpoint("build_options_ms")
|
||||
serverVersion, apiResources, err := m.liveStateCache.GetVersionsInfo(app.Spec.Destination.Server)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
ts.AddCheckpoint("version_ms")
|
||||
manifestInfo, err := repoClient.GenerateManifest(context.Background(), &apiclient.ManifestRequest{
|
||||
Repo: repo,
|
||||
Repos: permittedHelmRepos,
|
||||
Revision: revision,
|
||||
NoCache: noCache,
|
||||
NoRevisionCache: noRevisionCache,
|
||||
AppLabelKey: appLabelKey,
|
||||
AppName: app.InstanceName(m.namespace),
|
||||
Namespace: app.Spec.Destination.Namespace,
|
||||
ApplicationSource: &source,
|
||||
Plugins: tools,
|
||||
KustomizeOptions: kustomizeOptions,
|
||||
KubeVersion: serverVersion,
|
||||
ApiVersions: argo.APIResourcesToStrings(apiResources, true),
|
||||
VerifySignature: verifySignature,
|
||||
HelmRepoCreds: permittedHelmCredentials,
|
||||
TrackingMethod: string(argo.GetTrackingMethod(m.settingsMgr)),
|
||||
EnabledSourceTypes: enabledSourceTypes,
|
||||
HelmOptions: helmOptions,
|
||||
})
|
||||
conn, repoClient, err := m.repoClientset.NewRepoServerClient()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
targetObjs, err := unmarshalManifests(manifestInfo.Manifests)
|
||||
defer io.Close(conn)
|
||||
|
||||
manifestInfoMap := make(map[*v1alpha1.ApplicationSource]*apiclient.ManifestResponse)
|
||||
targetObjs := make([]*unstructured.Unstructured, 0)
|
||||
|
||||
// Store the map of all sources having ref field into a map for applications with sources field
|
||||
refSources, err := argo.GetRefSources(context.Background(), app.Spec, m.db)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, fmt.Errorf("failed to get ref sources: %v", err)
|
||||
}
|
||||
|
||||
for i, source := range sources {
|
||||
if len(revisions) < len(sources) || revisions[i] == "" {
|
||||
revisions[i] = source.TargetRevision
|
||||
}
|
||||
ts.AddCheckpoint("helm_ms")
|
||||
repo, err := m.db.GetRepository(context.Background(), source.RepoURL)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
kustomizeOptions, err := kustomizeSettings.GetOptions(source)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
ts.AddCheckpoint("version_ms")
|
||||
log.Debugf("Generating Manifest for source %s revision %s", source, revisions[i])
|
||||
manifestInfo, err := repoClient.GenerateManifest(context.Background(), &apiclient.ManifestRequest{
|
||||
Repo: repo,
|
||||
Repos: permittedHelmRepos,
|
||||
Revision: revisions[i],
|
||||
NoCache: noCache,
|
||||
NoRevisionCache: noRevisionCache,
|
||||
AppLabelKey: appLabelKey,
|
||||
AppName: app.InstanceName(m.namespace),
|
||||
Namespace: app.Spec.Destination.Namespace,
|
||||
ApplicationSource: &source,
|
||||
Plugins: tools,
|
||||
KustomizeOptions: kustomizeOptions,
|
||||
KubeVersion: serverVersion,
|
||||
ApiVersions: argo.APIResourcesToStrings(apiResources, true),
|
||||
VerifySignature: verifySignature,
|
||||
HelmRepoCreds: permittedHelmCredentials,
|
||||
TrackingMethod: string(argo.GetTrackingMethod(m.settingsMgr)),
|
||||
EnabledSourceTypes: enabledSourceTypes,
|
||||
HelmOptions: helmOptions,
|
||||
HasMultipleSources: app.Spec.HasMultipleSources(),
|
||||
RefSources: refSources,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// GenerateManifest can return empty ManifestResponse without error if app has multiple sources
|
||||
// and if any of the source does not have path and chart field not specified.
|
||||
// In that scenario, we continue to the next source
|
||||
if app.Spec.HasMultipleSources() && len(manifestInfo.Manifests) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
targetObj, err := unmarshalManifests(manifestInfo.Manifests)
|
||||
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
targetObjs = append(targetObjs, targetObj...)
|
||||
manifestInfoMap[&source] = manifestInfo
|
||||
}
|
||||
|
||||
ts.AddCheckpoint("unmarshal_ms")
|
||||
@@ -209,7 +238,7 @@ func (m *appStateManager) getRepoObjs(app *v1alpha1.Application, source v1alpha1
|
||||
}
|
||||
logCtx = logCtx.WithField("time_ms", time.Since(ts.StartTime).Milliseconds())
|
||||
logCtx.Info("getRepoObjs stats")
|
||||
return targetObjs, manifestInfo, nil
|
||||
return targetObjs, manifestInfoMap, nil
|
||||
}
|
||||
|
||||
func unmarshalManifests(manifests []string) ([]*unstructured.Unstructured, error) {
|
||||
@@ -325,7 +354,7 @@ func verifyGnuPGSignature(revision string, project *appv1.AppProject, manifestIn
|
||||
// CompareAppState compares application git state to the live app state, using the specified
|
||||
// revision and supplied source. If revision or overrides are empty, then compares against
|
||||
// revision and overrides in the app spec.
|
||||
func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *appv1.AppProject, revision string, source v1alpha1.ApplicationSource, noCache bool, noRevisionCache bool, localManifests []string) *comparisonResult {
|
||||
func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *appv1.AppProject, revisions []string, sources []v1alpha1.ApplicationSource, noCache bool, noRevisionCache bool, localManifests []string, hasMultipleSources bool) *comparisonResult {
|
||||
ts := stats.NewTimingStats()
|
||||
appLabelKey, resourceOverrides, resFilter, err := m.getComparisonSettings()
|
||||
|
||||
@@ -333,12 +362,24 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *ap
|
||||
|
||||
// return unknown comparison result if basic comparison settings cannot be loaded
|
||||
if err != nil {
|
||||
return &comparisonResult{
|
||||
syncStatus: &v1alpha1.SyncStatus{
|
||||
ComparedTo: appv1.ComparedTo{Source: source, Destination: app.Spec.Destination},
|
||||
Status: appv1.SyncStatusCodeUnknown,
|
||||
},
|
||||
healthStatus: &appv1.HealthStatus{Status: health.HealthStatusUnknown},
|
||||
if hasMultipleSources {
|
||||
return &comparisonResult{
|
||||
syncStatus: &v1alpha1.SyncStatus{
|
||||
ComparedTo: appv1.ComparedTo{Destination: app.Spec.Destination, Sources: sources},
|
||||
Status: appv1.SyncStatusCodeUnknown,
|
||||
Revisions: revisions,
|
||||
},
|
||||
healthStatus: &appv1.HealthStatus{Status: health.HealthStatusUnknown},
|
||||
}
|
||||
} else {
|
||||
return &comparisonResult{
|
||||
syncStatus: &v1alpha1.SyncStatus{
|
||||
ComparedTo: appv1.ComparedTo{Source: sources[0], Destination: app.Spec.Destination},
|
||||
Status: appv1.SyncStatusCodeUnknown,
|
||||
Revision: revisions[0],
|
||||
},
|
||||
healthStatus: &appv1.HealthStatus{Status: health.HealthStatusUnknown},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -356,11 +397,21 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *ap
|
||||
logCtx.Infof("Comparing app state (cluster: %s, namespace: %s)", app.Spec.Destination.Server, app.Spec.Destination.Namespace)
|
||||
|
||||
var targetObjs []*unstructured.Unstructured
|
||||
var manifestInfo *apiclient.ManifestResponse
|
||||
now := metav1.Now()
|
||||
|
||||
var manifestInfoMap map[*v1alpha1.ApplicationSource]*apiclient.ManifestResponse
|
||||
|
||||
if len(localManifests) == 0 {
|
||||
targetObjs, manifestInfo, err = m.getRepoObjs(app, source, appLabelKey, revision, noCache, noRevisionCache, verifySignature, project)
|
||||
// If the length of revisions is not same as the length of sources,
|
||||
// we take the revisions from the sources directly for all the sources.
|
||||
if len(revisions) != len(sources) {
|
||||
revisions = make([]string, 0)
|
||||
for _, source := range sources {
|
||||
revisions = append(revisions, source.TargetRevision)
|
||||
}
|
||||
}
|
||||
|
||||
targetObjs, manifestInfoMap, err = m.getRepoObjs(app, sources, appLabelKey, revisions, noCache, noRevisionCache, verifySignature, project)
|
||||
if err != nil {
|
||||
targetObjs = make([]*unstructured.Unstructured, 0)
|
||||
conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: err.Error(), LastTransitionTime: &now})
|
||||
@@ -382,7 +433,10 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *ap
|
||||
failedToLoadObjs = true
|
||||
}
|
||||
}
|
||||
manifestInfo = nil
|
||||
// empty out manifestInfoMap
|
||||
for as := range manifestInfoMap {
|
||||
delete(manifestInfoMap, as)
|
||||
}
|
||||
}
|
||||
ts.AddCheckpoint("git_ms")
|
||||
|
||||
@@ -416,6 +470,7 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *ap
|
||||
conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: err.Error(), LastTransitionTime: &now})
|
||||
failedToLoadObjs = true
|
||||
}
|
||||
|
||||
logCtx.Debugf("Retrieved lived manifests")
|
||||
|
||||
// filter out all resources which are not permitted in the application project
|
||||
@@ -459,10 +514,16 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *ap
|
||||
log.Warnf("Could not get compare options from ConfigMap (assuming defaults): %v", err)
|
||||
compareOptions = settings.GetDefaultDiffOptions()
|
||||
}
|
||||
manifestRevisions := make([]string, 0)
|
||||
|
||||
for _, manifestInfo := range manifestInfoMap {
|
||||
manifestRevisions = append(manifestRevisions, manifestInfo.Revision)
|
||||
}
|
||||
|
||||
// restore comparison using cached diff result if previous comparison was performed for the same revision
|
||||
revisionChanged := manifestInfo == nil || app.Status.Sync.Revision != manifestInfo.Revision
|
||||
specChanged := !reflect.DeepEqual(app.Status.Sync.ComparedTo, appv1.ComparedTo{Source: app.Spec.Source, Destination: app.Spec.Destination})
|
||||
revisionChanged := len(manifestInfoMap) != len(sources) || !reflect.DeepEqual(app.Status.Sync.Revisions, manifestRevisions)
|
||||
specChanged := !reflect.DeepEqual(app.Status.Sync.ComparedTo, appv1.ComparedTo{Source: app.Spec.GetSource(), Destination: app.Spec.Destination, Sources: sources})
|
||||
|
||||
_, refreshRequested := app.IsRefreshRequested()
|
||||
noCache = noCache || refreshRequested || app.Status.Expired(m.statusRefreshTimeout) || specChanged || revisionChanged
|
||||
|
||||
@@ -514,7 +575,7 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *ap
|
||||
}
|
||||
gvk := obj.GroupVersionKind()
|
||||
|
||||
isSelfReferencedObj := m.isSelfReferencedObj(liveObj, appLabelKey, trackingMethod)
|
||||
isSelfReferencedObj := m.isSelfReferencedObj(liveObj, targetObj, app.GetName(), appLabelKey, trackingMethod)
|
||||
|
||||
resState := v1alpha1.ResourceStatus{
|
||||
Namespace: obj.GetNamespace(),
|
||||
@@ -591,16 +652,32 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *ap
|
||||
if failedToLoadObjs {
|
||||
syncCode = v1alpha1.SyncStatusCodeUnknown
|
||||
}
|
||||
syncStatus := v1alpha1.SyncStatus{
|
||||
ComparedTo: appv1.ComparedTo{
|
||||
Source: source,
|
||||
Destination: app.Spec.Destination,
|
||||
},
|
||||
Status: syncCode,
|
||||
var revision string
|
||||
|
||||
if !hasMultipleSources && len(manifestRevisions) > 0 {
|
||||
revision = manifestRevisions[0]
|
||||
}
|
||||
if manifestInfo != nil {
|
||||
syncStatus.Revision = manifestInfo.Revision
|
||||
var syncStatus v1alpha1.SyncStatus
|
||||
if hasMultipleSources {
|
||||
syncStatus = v1alpha1.SyncStatus{
|
||||
ComparedTo: appv1.ComparedTo{
|
||||
Destination: app.Spec.Destination,
|
||||
Sources: sources,
|
||||
},
|
||||
Status: syncCode,
|
||||
Revisions: manifestRevisions,
|
||||
}
|
||||
} else {
|
||||
syncStatus = v1alpha1.SyncStatus{
|
||||
ComparedTo: appv1.ComparedTo{
|
||||
Destination: app.Spec.Destination,
|
||||
Source: app.Spec.GetSource(),
|
||||
},
|
||||
Status: syncCode,
|
||||
Revision: revision,
|
||||
}
|
||||
}
|
||||
|
||||
ts.AddCheckpoint("sync_ms")
|
||||
|
||||
healthStatus, err := setApplicationHealth(managedResources, resourceSummaries, resourceOverrides, app, m.persistResourceHealth)
|
||||
@@ -611,8 +688,10 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *ap
|
||||
// Git has already performed the signature verification via its GPG interface, and the result is available
|
||||
// in the manifest info received from the repository server. We now need to form our opinion about the result
|
||||
// and stop processing if we do not agree about the outcome.
|
||||
if gpg.IsGPGEnabled() && verifySignature && manifestInfo != nil {
|
||||
conditions = append(conditions, verifyGnuPGSignature(revision, project, manifestInfo)...)
|
||||
for _, manifestInfo := range manifestInfoMap {
|
||||
if gpg.IsGPGEnabled() && verifySignature && manifestInfo != nil {
|
||||
conditions = append(conditions, verifyGnuPGSignature(manifestInfo.Revision, project, manifestInfo)...)
|
||||
}
|
||||
}
|
||||
|
||||
compRes := comparisonResult{
|
||||
@@ -624,9 +703,18 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *ap
|
||||
diffConfig: diffConfig,
|
||||
diffResultList: diffResults,
|
||||
}
|
||||
if manifestInfo != nil {
|
||||
compRes.appSourceType = v1alpha1.ApplicationSourceType(manifestInfo.SourceType)
|
||||
|
||||
if hasMultipleSources {
|
||||
for _, manifestInfo := range manifestInfoMap {
|
||||
compRes.appSourceTypes = append(compRes.appSourceTypes, appv1.ApplicationSourceType(manifestInfo.SourceType))
|
||||
}
|
||||
} else {
|
||||
for _, manifestInfo := range manifestInfoMap {
|
||||
compRes.appSourceType = v1alpha1.ApplicationSourceType(manifestInfo.SourceType)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
app.Status.SetConditions(conditions, map[appv1.ApplicationConditionType]bool{
|
||||
appv1.ApplicationConditionComparisonError: true,
|
||||
appv1.ApplicationConditionSharedResourceWarning: true,
|
||||
@@ -638,18 +726,29 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *ap
|
||||
return &compRes
|
||||
}
|
||||
|
||||
func (m *appStateManager) persistRevisionHistory(app *v1alpha1.Application, revision string, source v1alpha1.ApplicationSource, startedAt metav1.Time) error {
|
||||
func (m *appStateManager) persistRevisionHistory(app *v1alpha1.Application, revision string, source v1alpha1.ApplicationSource, revisions []string, sources []v1alpha1.ApplicationSource, hasMultipleSources bool, startedAt metav1.Time) error {
|
||||
var nextID int64
|
||||
if len(app.Status.History) > 0 {
|
||||
nextID = app.Status.History.LastRevisionHistory().ID + 1
|
||||
}
|
||||
app.Status.History = append(app.Status.History, v1alpha1.RevisionHistory{
|
||||
Revision: revision,
|
||||
DeployedAt: metav1.NewTime(time.Now().UTC()),
|
||||
DeployStartedAt: &startedAt,
|
||||
ID: nextID,
|
||||
Source: source,
|
||||
})
|
||||
|
||||
if hasMultipleSources {
|
||||
app.Status.History = append(app.Status.History, v1alpha1.RevisionHistory{
|
||||
DeployedAt: metav1.NewTime(time.Now().UTC()),
|
||||
DeployStartedAt: &startedAt,
|
||||
ID: nextID,
|
||||
Sources: sources,
|
||||
Revisions: revisions,
|
||||
})
|
||||
} else {
|
||||
app.Status.History = append(app.Status.History, v1alpha1.RevisionHistory{
|
||||
Revision: revision,
|
||||
DeployedAt: metav1.NewTime(time.Now().UTC()),
|
||||
DeployStartedAt: &startedAt,
|
||||
ID: nextID,
|
||||
Source: source,
|
||||
})
|
||||
}
|
||||
|
||||
app.Status.History = app.Status.History.Trunc(app.Spec.GetRevisionHistoryLimit())
|
||||
|
||||
@@ -699,12 +798,13 @@ func NewAppStateManager(
|
||||
}
|
||||
|
||||
// isSelfReferencedObj returns whether the given obj is managed by the application
|
||||
// according to the values in the tracking annotation. It returns true when all
|
||||
// of the properties in the annotation (name, namespace, group and kind) match
|
||||
// the properties of the inspected object, or if the tracking method used does
|
||||
// not provide the required properties for matching.
|
||||
func (m *appStateManager) isSelfReferencedObj(obj *unstructured.Unstructured, appLabelKey string, trackingMethod v1alpha1.TrackingMethod) bool {
|
||||
if obj == nil {
|
||||
// according to the values of the tracking id (aka app instance value) annotation.
|
||||
// It returns true when all of the properties of the tracking id (app name, namespace,
|
||||
// group and kind) match the properties of the live object, or if the tracking method
|
||||
// used does not provide the required properties for matching.
|
||||
// Reference: https://github.com/argoproj/argo-cd/issues/8683
|
||||
func (m *appStateManager) isSelfReferencedObj(live, config *unstructured.Unstructured, appName, appLabelKey string, trackingMethod v1alpha1.TrackingMethod) bool {
|
||||
if live == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -714,17 +814,42 @@ func (m *appStateManager) isSelfReferencedObj(obj *unstructured.Unstructured, ap
|
||||
return true
|
||||
}
|
||||
|
||||
// In order for us to assume obj to be managed by this application, the
|
||||
// values from the annotation have to match the properties from the live
|
||||
// object. Cluster scoped objects carry the app's destination namespace
|
||||
// in the tracking annotation, but are unique in GVK + name combination.
|
||||
appInstance := m.resourceTracking.GetAppInstance(obj, appLabelKey, trackingMethod)
|
||||
if appInstance != nil {
|
||||
return (obj.GetNamespace() == appInstance.Namespace || obj.GetNamespace() == "") &&
|
||||
obj.GetName() == appInstance.Name &&
|
||||
obj.GetObjectKind().GroupVersionKind().Group == appInstance.Group &&
|
||||
obj.GetObjectKind().GroupVersionKind().Kind == appInstance.Kind
|
||||
// config != nil is the best-case scenario for constructing an accurate
|
||||
// Tracking ID. `config` is the "desired state" (from git/helm/etc.).
|
||||
// Using the desired state is important when there is an ApiGroup upgrade.
|
||||
// When upgrading, the comparison must be made with the new tracking ID.
|
||||
// Example:
|
||||
// live resource annotation will be:
|
||||
// ingress-app:extensions/Ingress:default/some-ingress
|
||||
// when it should be:
|
||||
// ingress-app:networking.k8s.io/Ingress:default/some-ingress
|
||||
// More details in: https://github.com/argoproj/argo-cd/pull/11012
|
||||
var aiv argo.AppInstanceValue
|
||||
if config != nil {
|
||||
aiv = argo.UnstructuredToAppInstanceValue(config, appName, "")
|
||||
return isSelfReferencedObj(live, aiv)
|
||||
}
|
||||
|
||||
// If config is nil then compare the live resource with the value
|
||||
// of the annotation. In this case, in order to validate if obj is
|
||||
// managed by this application, the values from the annotation have
|
||||
// to match the properties from the live object. Cluster scoped objects
|
||||
// carry the app's destination namespace in the tracking annotation,
|
||||
// but are unique in GVK + name combination.
|
||||
appInstance := m.resourceTracking.GetAppInstance(live, appLabelKey, trackingMethod)
|
||||
if appInstance != nil {
|
||||
return isSelfReferencedObj(live, *appInstance)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// isSelfReferencedObj returns true if the given Tracking ID (`aiv`) matches
|
||||
// the given object. It returns false when the ID doesn't match. This sometimes
|
||||
// happens when a tracking label or annotation gets accidentally copied to a
|
||||
// different resource.
|
||||
func isSelfReferencedObj(obj *unstructured.Unstructured, aiv argo.AppInstanceValue) bool {
|
||||
return (obj.GetNamespace() == aiv.Namespace || obj.GetNamespace() == "") &&
|
||||
obj.GetName() == aiv.Name &&
|
||||
obj.GetObjectKind().GroupVersionKind().Group == aiv.Group &&
|
||||
obj.GetObjectKind().GroupVersionKind().Kind == aiv.Kind
|
||||
}
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
v1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@@ -37,7 +38,11 @@ func TestCompareAppStateEmpty(t *testing.T) {
|
||||
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
|
||||
}
|
||||
ctrl := newFakeController(&data)
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, "", app.Spec.Source, false, false, nil)
|
||||
sources := make([]argoappv1.ApplicationSource, 0)
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "")
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
|
||||
assert.NotNil(t, compRes)
|
||||
assert.NotNil(t, compRes.syncStatus)
|
||||
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
|
||||
@@ -60,7 +65,11 @@ func TestCompareAppStateMissing(t *testing.T) {
|
||||
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
|
||||
}
|
||||
ctrl := newFakeController(&data)
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, "", app.Spec.Source, false, false, nil)
|
||||
sources := make([]argoappv1.ApplicationSource, 0)
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "")
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
|
||||
assert.NotNil(t, compRes)
|
||||
assert.NotNil(t, compRes.syncStatus)
|
||||
assert.Equal(t, argoappv1.SyncStatusCodeOutOfSync, compRes.syncStatus.Status)
|
||||
@@ -87,7 +96,11 @@ func TestCompareAppStateExtra(t *testing.T) {
|
||||
},
|
||||
}
|
||||
ctrl := newFakeController(&data)
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, "", app.Spec.Source, false, false, nil)
|
||||
sources := make([]argoappv1.ApplicationSource, 0)
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "")
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
|
||||
assert.NotNil(t, compRes)
|
||||
assert.Equal(t, argoappv1.SyncStatusCodeOutOfSync, compRes.syncStatus.Status)
|
||||
assert.Equal(t, 1, len(compRes.resources))
|
||||
@@ -113,7 +126,11 @@ func TestCompareAppStateHook(t *testing.T) {
|
||||
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
|
||||
}
|
||||
ctrl := newFakeController(&data)
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, "", app.Spec.Source, false, false, nil)
|
||||
sources := make([]argoappv1.ApplicationSource, 0)
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "")
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
|
||||
assert.NotNil(t, compRes)
|
||||
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
|
||||
assert.Equal(t, 0, len(compRes.resources))
|
||||
@@ -140,7 +157,11 @@ func TestCompareAppStateSkipHook(t *testing.T) {
|
||||
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
|
||||
}
|
||||
ctrl := newFakeController(&data)
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, "", app.Spec.Source, false, false, nil)
|
||||
sources := make([]argoappv1.ApplicationSource, 0)
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "")
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
|
||||
assert.NotNil(t, compRes)
|
||||
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
|
||||
assert.Equal(t, 1, len(compRes.resources))
|
||||
@@ -166,7 +187,11 @@ func TestCompareAppStateCompareOptionIgnoreExtraneous(t *testing.T) {
|
||||
}
|
||||
ctrl := newFakeController(&data)
|
||||
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, "", app.Spec.Source, false, false, nil)
|
||||
sources := make([]argoappv1.ApplicationSource, 0)
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "")
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
|
||||
|
||||
assert.NotNil(t, compRes)
|
||||
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
|
||||
@@ -194,7 +219,11 @@ func TestCompareAppStateExtraHook(t *testing.T) {
|
||||
},
|
||||
}
|
||||
ctrl := newFakeController(&data)
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, "", app.Spec.Source, false, false, nil)
|
||||
sources := make([]argoappv1.ApplicationSource, 0)
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "")
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
|
||||
|
||||
assert.NotNil(t, compRes)
|
||||
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
|
||||
@@ -237,7 +266,11 @@ func TestCompareAppStateDuplicatedNamespacedResources(t *testing.T) {
|
||||
},
|
||||
}
|
||||
ctrl := newFakeController(&data)
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, "", app.Spec.Source, false, false, nil)
|
||||
sources := make([]argoappv1.ApplicationSource, 0)
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "")
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
|
||||
|
||||
assert.NotNil(t, compRes)
|
||||
assert.Equal(t, 1, len(app.Status.Conditions))
|
||||
@@ -288,7 +321,11 @@ func TestSetHealth(t *testing.T) {
|
||||
},
|
||||
})
|
||||
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, "", app.Spec.Source, false, false, nil)
|
||||
sources := make([]argoappv1.ApplicationSource, 0)
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "")
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
|
||||
|
||||
assert.Equal(t, health.HealthStatusHealthy, compRes.healthStatus.Status)
|
||||
}
|
||||
@@ -320,7 +357,11 @@ func TestSetHealthSelfReferencedApp(t *testing.T) {
|
||||
},
|
||||
})
|
||||
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, "", app.Spec.Source, false, false, nil)
|
||||
sources := make([]argoappv1.ApplicationSource, 0)
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "")
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
|
||||
|
||||
assert.Equal(t, health.HealthStatusHealthy, compRes.healthStatus.Status)
|
||||
}
|
||||
@@ -390,7 +431,11 @@ func TestReturnUnknownComparisonStateOnSettingLoadError(t *testing.T) {
|
||||
},
|
||||
})
|
||||
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, "", app.Spec.Source, false, false, nil)
|
||||
sources := make([]argoappv1.ApplicationSource, 0)
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "")
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
|
||||
|
||||
assert.Equal(t, health.HealthStatusUnknown, compRes.healthStatus.Status)
|
||||
assert.Equal(t, argoappv1.SyncStatusCodeUnknown, compRes.syncStatus.Status)
|
||||
@@ -437,7 +482,7 @@ func Test_appStateManager_persistRevisionHistory(t *testing.T) {
|
||||
app.Spec.RevisionHistoryLimit = &i
|
||||
}
|
||||
addHistory := func() {
|
||||
err := manager.persistRevisionHistory(app, "my-revision", argoappv1.ApplicationSource{}, metav1.Time{})
|
||||
err := manager.persistRevisionHistory(app, "my-revision", argoappv1.ApplicationSource{}, []string{}, []argoappv1.ApplicationSource{}, false, metav1.Time{})
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
addHistory()
|
||||
@@ -473,7 +518,7 @@ func Test_appStateManager_persistRevisionHistory(t *testing.T) {
|
||||
assert.Len(t, app.Status.History, 9)
|
||||
|
||||
metav1NowTime := metav1.NewTime(time.Now())
|
||||
err := manager.persistRevisionHistory(app, "my-revision", argoappv1.ApplicationSource{}, metav1NowTime)
|
||||
err := manager.persistRevisionHistory(app, "my-revision", argoappv1.ApplicationSource{}, []string{}, []argoappv1.ApplicationSource{}, false, metav1NowTime)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, app.Status.History.LastRevisionHistory().DeployStartedAt, &metav1NowTime)
|
||||
}
|
||||
@@ -527,7 +572,11 @@ func TestSignedResponseNoSignatureRequired(t *testing.T) {
|
||||
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
|
||||
}
|
||||
ctrl := newFakeController(&data)
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, "", app.Spec.Source, false, false, nil)
|
||||
sources := make([]argoappv1.ApplicationSource, 0)
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "")
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
|
||||
assert.NotNil(t, compRes)
|
||||
assert.NotNil(t, compRes.syncStatus)
|
||||
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
|
||||
@@ -549,7 +598,11 @@ func TestSignedResponseNoSignatureRequired(t *testing.T) {
|
||||
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
|
||||
}
|
||||
ctrl := newFakeController(&data)
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, "", app.Spec.Source, false, false, nil)
|
||||
sources := make([]argoappv1.ApplicationSource, 0)
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "")
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &defaultProj, revisions, sources, false, false, nil, false)
|
||||
assert.NotNil(t, compRes)
|
||||
assert.NotNil(t, compRes.syncStatus)
|
||||
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
|
||||
@@ -578,7 +631,11 @@ func TestSignedResponseSignatureRequired(t *testing.T) {
|
||||
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
|
||||
}
|
||||
ctrl := newFakeController(&data)
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &signedProj, "", app.Spec.Source, false, false, nil)
|
||||
sources := make([]argoappv1.ApplicationSource, 0)
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "")
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &signedProj, revisions, sources, false, false, nil, false)
|
||||
assert.NotNil(t, compRes)
|
||||
assert.NotNil(t, compRes.syncStatus)
|
||||
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
|
||||
@@ -600,7 +657,11 @@ func TestSignedResponseSignatureRequired(t *testing.T) {
|
||||
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
|
||||
}
|
||||
ctrl := newFakeController(&data)
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &signedProj, "abc123", app.Spec.Source, false, false, nil)
|
||||
sources := make([]argoappv1.ApplicationSource, 0)
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "abc123")
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &signedProj, revisions, sources, false, false, nil, false)
|
||||
assert.NotNil(t, compRes)
|
||||
assert.NotNil(t, compRes.syncStatus)
|
||||
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
|
||||
@@ -622,7 +683,11 @@ func TestSignedResponseSignatureRequired(t *testing.T) {
|
||||
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
|
||||
}
|
||||
ctrl := newFakeController(&data)
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &signedProj, "abc123", app.Spec.Source, false, false, nil)
|
||||
sources := make([]argoappv1.ApplicationSource, 0)
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "abc123")
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &signedProj, revisions, sources, false, false, nil, false)
|
||||
assert.NotNil(t, compRes)
|
||||
assert.NotNil(t, compRes.syncStatus)
|
||||
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
|
||||
@@ -644,7 +709,11 @@ func TestSignedResponseSignatureRequired(t *testing.T) {
|
||||
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
|
||||
}
|
||||
ctrl := newFakeController(&data)
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &signedProj, "abc123", app.Spec.Source, false, false, nil)
|
||||
sources := make([]argoappv1.ApplicationSource, 0)
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "abc123")
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &signedProj, revisions, sources, false, false, nil, false)
|
||||
assert.NotNil(t, compRes)
|
||||
assert.NotNil(t, compRes.syncStatus)
|
||||
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
|
||||
@@ -669,7 +738,11 @@ func TestSignedResponseSignatureRequired(t *testing.T) {
|
||||
ctrl := newFakeController(&data)
|
||||
testProj := signedProj
|
||||
testProj.Spec.SignatureKeys[0].KeyID = "4AEE18F83AFDEB24"
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &testProj, "abc123", app.Spec.Source, false, false, nil)
|
||||
sources := make([]argoappv1.ApplicationSource, 0)
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "abc123")
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &testProj, revisions, sources, false, false, nil, false)
|
||||
assert.NotNil(t, compRes)
|
||||
assert.NotNil(t, compRes.syncStatus)
|
||||
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
|
||||
@@ -694,7 +767,11 @@ func TestSignedResponseSignatureRequired(t *testing.T) {
|
||||
// it doesn't matter for our test whether local manifests are valid
|
||||
localManifests := []string{"foobar"}
|
||||
ctrl := newFakeController(&data)
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &signedProj, "abc123", app.Spec.Source, false, false, localManifests)
|
||||
sources := make([]argoappv1.ApplicationSource, 0)
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "abc123")
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &signedProj, revisions, sources, false, false, localManifests, false)
|
||||
assert.NotNil(t, compRes)
|
||||
assert.NotNil(t, compRes.syncStatus)
|
||||
assert.Equal(t, argoappv1.SyncStatusCodeUnknown, compRes.syncStatus.Status)
|
||||
@@ -719,7 +796,11 @@ func TestSignedResponseSignatureRequired(t *testing.T) {
|
||||
managedLiveObjs: make(map[kube.ResourceKey]*unstructured.Unstructured),
|
||||
}
|
||||
ctrl := newFakeController(&data)
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &signedProj, "abc123", app.Spec.Source, false, false, nil)
|
||||
sources := make([]argoappv1.ApplicationSource, 0)
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "abc123")
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &signedProj, revisions, sources, false, false, nil, false)
|
||||
assert.NotNil(t, compRes)
|
||||
assert.NotNil(t, compRes.syncStatus)
|
||||
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
|
||||
@@ -728,7 +809,7 @@ func TestSignedResponseSignatureRequired(t *testing.T) {
|
||||
assert.Len(t, app.Status.Conditions, 0)
|
||||
}
|
||||
|
||||
// Signature required and local manifests supplied and GPG subystem is disabled - sync
|
||||
// Signature required and local manifests supplied and GPG subsystem is disabled - sync
|
||||
{
|
||||
app := newFakeApp()
|
||||
data := fakeData{
|
||||
@@ -744,7 +825,11 @@ func TestSignedResponseSignatureRequired(t *testing.T) {
|
||||
// it doesn't matter for our test whether local manifests are valid
|
||||
localManifests := []string{""}
|
||||
ctrl := newFakeController(&data)
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &signedProj, "abc123", app.Spec.Source, false, false, localManifests)
|
||||
sources := make([]argoappv1.ApplicationSource, 0)
|
||||
sources = append(sources, app.Spec.GetSource())
|
||||
revisions := make([]string, 0)
|
||||
revisions = append(revisions, "abc123")
|
||||
compRes := ctrl.appStateManager.CompareAppState(app, &signedProj, revisions, sources, false, false, localManifests, false)
|
||||
assert.NotNil(t, compRes)
|
||||
assert.NotNil(t, compRes.syncStatus)
|
||||
assert.Equal(t, argoappv1.SyncStatusCodeSynced, compRes.syncStatus.Status)
|
||||
@@ -852,6 +937,19 @@ func TestIsLiveResourceManaged(t *testing.T) {
|
||||
},
|
||||
},
|
||||
})
|
||||
managedWrongAPIGroup := kube.MustToUnstructured(&networkingv1.Ingress{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "networking.k8s.io/v1",
|
||||
Kind: "Ingress",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "some-ingress",
|
||||
Namespace: "default",
|
||||
Annotations: map[string]string{
|
||||
common.AnnotationKeyAppInstance: "guestbook:extensions/Ingress:default/some-ingress",
|
||||
},
|
||||
},
|
||||
})
|
||||
ctrl := newFakeController(&fakeData{
|
||||
apps: []runtime.Object{app, &defaultProj},
|
||||
manifestResponse: &apiclient.ManifestResponse{
|
||||
@@ -870,30 +968,69 @@ func TestIsLiveResourceManaged(t *testing.T) {
|
||||
})
|
||||
|
||||
manager := ctrl.appStateManager.(*appStateManager)
|
||||
appName := "guestbook"
|
||||
|
||||
// Managed resource w/ annotations
|
||||
assert.True(t, manager.isSelfReferencedObj(managedObj, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel))
|
||||
assert.True(t, manager.isSelfReferencedObj(managedObj, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation))
|
||||
t.Run("will return true if trackingid matches the resource", func(t *testing.T) {
|
||||
// given
|
||||
t.Parallel()
|
||||
configObj := managedObj.DeepCopy()
|
||||
|
||||
// Managed resource w/ label
|
||||
assert.True(t, manager.isSelfReferencedObj(managedObjWithLabel, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel))
|
||||
// then
|
||||
assert.True(t, manager.isSelfReferencedObj(managedObj, configObj, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel))
|
||||
assert.True(t, manager.isSelfReferencedObj(managedObj, configObj, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation))
|
||||
})
|
||||
t.Run("will return true if tracked with label", func(t *testing.T) {
|
||||
// given
|
||||
t.Parallel()
|
||||
configObj := managedObjWithLabel.DeepCopy()
|
||||
|
||||
// Wrong resource name
|
||||
assert.True(t, manager.isSelfReferencedObj(unmanagedObjWrongName, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel))
|
||||
assert.False(t, manager.isSelfReferencedObj(unmanagedObjWrongName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation))
|
||||
// then
|
||||
assert.True(t, manager.isSelfReferencedObj(managedObjWithLabel, configObj, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel))
|
||||
})
|
||||
t.Run("will handle if trackingId has wrong resource name and config is nil", func(t *testing.T) {
|
||||
// given
|
||||
t.Parallel()
|
||||
|
||||
// Wrong resource group
|
||||
assert.True(t, manager.isSelfReferencedObj(unmanagedObjWrongGroup, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel))
|
||||
assert.False(t, manager.isSelfReferencedObj(unmanagedObjWrongGroup, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation))
|
||||
// then
|
||||
assert.True(t, manager.isSelfReferencedObj(unmanagedObjWrongName, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel))
|
||||
assert.False(t, manager.isSelfReferencedObj(unmanagedObjWrongName, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation))
|
||||
})
|
||||
t.Run("will handle if trackingId has wrong resource group and config is nil", func(t *testing.T) {
|
||||
// given
|
||||
t.Parallel()
|
||||
|
||||
// Wrong resource kind
|
||||
assert.True(t, manager.isSelfReferencedObj(unmanagedObjWrongKind, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel))
|
||||
assert.False(t, manager.isSelfReferencedObj(unmanagedObjWrongKind, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation))
|
||||
// then
|
||||
assert.True(t, manager.isSelfReferencedObj(unmanagedObjWrongGroup, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel))
|
||||
assert.False(t, manager.isSelfReferencedObj(unmanagedObjWrongGroup, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation))
|
||||
})
|
||||
t.Run("will handle if trackingId has wrong kind and config is nil", func(t *testing.T) {
|
||||
// given
|
||||
t.Parallel()
|
||||
|
||||
// Wrong resource namespace
|
||||
assert.True(t, manager.isSelfReferencedObj(unmanagedObjWrongNamespace, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel))
|
||||
assert.False(t, manager.isSelfReferencedObj(unmanagedObjWrongNamespace, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotationAndLabel))
|
||||
// then
|
||||
assert.True(t, manager.isSelfReferencedObj(unmanagedObjWrongKind, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel))
|
||||
assert.False(t, manager.isSelfReferencedObj(unmanagedObjWrongKind, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation))
|
||||
})
|
||||
t.Run("will handle if trackingId has wrong namespace and config is nil", func(t *testing.T) {
|
||||
// given
|
||||
t.Parallel()
|
||||
|
||||
// Nil resource
|
||||
assert.True(t, manager.isSelfReferencedObj(nil, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation))
|
||||
// then
|
||||
assert.True(t, manager.isSelfReferencedObj(unmanagedObjWrongNamespace, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodLabel))
|
||||
assert.False(t, manager.isSelfReferencedObj(unmanagedObjWrongNamespace, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotationAndLabel))
|
||||
})
|
||||
t.Run("will return true if live is nil", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
assert.True(t, manager.isSelfReferencedObj(nil, nil, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation))
|
||||
})
|
||||
|
||||
t.Run("will handle upgrade in desired state APIGroup", func(t *testing.T) {
|
||||
// given
|
||||
t.Parallel()
|
||||
config := managedWrongAPIGroup.DeepCopy()
|
||||
delete(config.GetAnnotations(), common.AnnotationKeyAppInstance)
|
||||
|
||||
// then
|
||||
assert.True(t, manager.isSelfReferencedObj(managedWrongAPIGroup, config, appName, common.AnnotationKeyAppInstance, argo.TrackingMethodAnnotation))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -9,6 +9,8 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
cdcommon "github.com/argoproj/argo-cd/v2/common"
|
||||
|
||||
"github.com/argoproj/gitops-engine/pkg/sync"
|
||||
"github.com/argoproj/gitops-engine/pkg/sync/common"
|
||||
"github.com/argoproj/gitops-engine/pkg/utils/kube"
|
||||
@@ -20,7 +22,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/managedfields"
|
||||
"k8s.io/kubectl/pkg/util/openapi"
|
||||
|
||||
cdcommon "github.com/argoproj/argo-cd/v2/common"
|
||||
"github.com/argoproj/argo-cd/v2/controller/metrics"
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
listersv1alpha1 "github.com/argoproj/argo-cd/v2/pkg/client/listers/application/v1alpha1"
|
||||
@@ -65,6 +66,8 @@ func (m *appStateManager) SyncAppState(app *v1alpha1.Application, state *v1alpha
|
||||
var syncOp v1alpha1.SyncOperation
|
||||
var syncRes *v1alpha1.SyncOperationResult
|
||||
var source v1alpha1.ApplicationSource
|
||||
var sources []v1alpha1.ApplicationSource
|
||||
revisions := make([]string, 0)
|
||||
|
||||
if state.Operation.Sync == nil {
|
||||
state.Phase = common.OperationFailed
|
||||
@@ -82,31 +85,53 @@ func (m *appStateManager) SyncAppState(app *v1alpha1.Application, state *v1alpha
|
||||
return
|
||||
}
|
||||
|
||||
if syncOp.Source == nil {
|
||||
// normal sync case (where source is taken from app.spec.source)
|
||||
source = app.Spec.Source
|
||||
if syncOp.Source == nil || (syncOp.Sources != nil && len(syncOp.Sources) > 0) {
|
||||
// normal sync case (where source is taken from app.spec.sources)
|
||||
if app.Spec.HasMultipleSources() {
|
||||
sources = app.Spec.Sources
|
||||
} else {
|
||||
// normal sync case (where source is taken from app.spec.source)
|
||||
source = app.Spec.GetSource()
|
||||
sources = make([]v1alpha1.ApplicationSource, 0)
|
||||
}
|
||||
} else {
|
||||
// rollback case
|
||||
source = *state.Operation.Sync.Source
|
||||
if app.Spec.HasMultipleSources() {
|
||||
sources = state.Operation.Sync.Sources
|
||||
} else {
|
||||
source = *state.Operation.Sync.Source
|
||||
sources = make([]v1alpha1.ApplicationSource, 0)
|
||||
}
|
||||
}
|
||||
|
||||
if state.SyncResult != nil {
|
||||
syncRes = state.SyncResult
|
||||
revision = state.SyncResult.Revision
|
||||
revisions = append(revisions, state.SyncResult.Revisions...)
|
||||
} else {
|
||||
syncRes = &v1alpha1.SyncOperationResult{}
|
||||
// status.operationState.syncResult.source. must be set properly since auto-sync relies
|
||||
// on this information to decide if it should sync (if source is different than the last
|
||||
// sync attempt)
|
||||
syncRes.Source = source
|
||||
if app.Spec.HasMultipleSources() {
|
||||
syncRes.Sources = sources
|
||||
} else {
|
||||
syncRes.Source = source
|
||||
}
|
||||
state.SyncResult = syncRes
|
||||
}
|
||||
|
||||
if revision == "" {
|
||||
// if we get here, it means we did not remember a commit SHA which we should be syncing to.
|
||||
// This typically indicates we are just about to begin a brand new sync/rollback operation.
|
||||
// Take the value in the requested operation. We will resolve this to a SHA later.
|
||||
revision = syncOp.Revision
|
||||
// if we get here, it means we did not remember a commit SHA which we should be syncing to.
|
||||
// This typically indicates we are just about to begin a brand new sync/rollback operation.
|
||||
// Take the value in the requested operation. We will resolve this to a SHA later.
|
||||
if app.Spec.HasMultipleSources() {
|
||||
if len(revisions) != len(sources) {
|
||||
revisions = syncOp.Revisions
|
||||
}
|
||||
} else {
|
||||
if revision == "" {
|
||||
revision = syncOp.Revision
|
||||
}
|
||||
}
|
||||
|
||||
proj, err := argo.GetAppProject(app, listersv1alpha1.NewAppProjectLister(m.projInformer.GetIndexer()), m.namespace, m.settingsMgr, m.db, context.TODO())
|
||||
@@ -116,10 +141,23 @@ func (m *appStateManager) SyncAppState(app *v1alpha1.Application, state *v1alpha
|
||||
return
|
||||
}
|
||||
|
||||
compareResult := m.CompareAppState(app, proj, revision, source, false, true, syncOp.Manifests)
|
||||
if app.Spec.HasMultipleSources() {
|
||||
revisions = syncRes.Revisions
|
||||
} else {
|
||||
revisions = append(revisions, revision)
|
||||
}
|
||||
|
||||
if !app.Spec.HasMultipleSources() {
|
||||
sources = []v1alpha1.ApplicationSource{source}
|
||||
revisions = []string{revision}
|
||||
}
|
||||
|
||||
compareResult := m.CompareAppState(app, proj, revisions, sources, false, true, syncOp.Manifests, app.Spec.HasMultipleSources())
|
||||
// We now have a concrete commit SHA. Save this in the sync result revision so that we remember
|
||||
// what we should be syncing to when resuming operations.
|
||||
|
||||
syncRes.Revision = compareResult.syncStatus.Revision
|
||||
syncRes.Revisions = compareResult.syncStatus.Revisions
|
||||
|
||||
// If there are any comparison or spec errors error conditions do not perform the operation
|
||||
if errConditions := app.Status.GetConditions(map[v1alpha1.ApplicationConditionType]bool{
|
||||
@@ -212,14 +250,7 @@ func (m *appStateManager) SyncAppState(app *v1alpha1.Application, state *v1alpha
|
||||
}
|
||||
trackingMethod := argo.GetTrackingMethod(m.settingsMgr)
|
||||
|
||||
syncCtx, cleanup, err := sync.NewSyncContext(
|
||||
compareResult.syncStatus.Revision,
|
||||
reconciliationResult,
|
||||
restConfig,
|
||||
rawConfig,
|
||||
m.kubectl,
|
||||
app.Spec.Destination.Namespace,
|
||||
openAPISchema,
|
||||
opts := []sync.SyncOpt{
|
||||
sync.WithLogr(logutils.NewLogrusLogger(logEntry)),
|
||||
sync.WithHealthOverride(lua.ResourceHealthOverrides(resourceOverrides)),
|
||||
sync.WithPermissionValidator(func(un *unstructured.Unstructured, res *v1.APIResource) error {
|
||||
@@ -246,16 +277,9 @@ func (m *appStateManager) SyncAppState(app *v1alpha1.Application, state *v1alpha
|
||||
sync.WithResourcesFilter(func(key kube.ResourceKey, target *unstructured.Unstructured, live *unstructured.Unstructured) bool {
|
||||
return (len(syncOp.Resources) == 0 ||
|
||||
argo.ContainsSyncResource(key.Name, key.Namespace, schema.GroupVersionKind{Kind: key.Kind, Group: key.Group}, syncOp.Resources)) &&
|
||||
m.isSelfReferencedObj(live, appLabelKey, trackingMethod)
|
||||
m.isSelfReferencedObj(live, target, app.GetName(), appLabelKey, trackingMethod)
|
||||
}),
|
||||
sync.WithManifestValidation(!syncOp.SyncOptions.HasOption(common.SyncOptionsDisableValidation)),
|
||||
sync.WithNamespaceCreation(syncOp.SyncOptions.HasOption("CreateNamespace=true"), func(un *unstructured.Unstructured) bool {
|
||||
if un != nil && kube.GetAppInstanceLabel(un, cdcommon.LabelKeyAppInstance) != "" {
|
||||
kube.UnsetLabel(un, cdcommon.LabelKeyAppInstance)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}),
|
||||
sync.WithSyncWaveHook(delayBetweenSyncWaves),
|
||||
sync.WithPruneLast(syncOp.SyncOptions.HasOption(common.SyncOptionPruneLast)),
|
||||
sync.WithResourceModificationChecker(syncOp.SyncOptions.HasOption("ApplyOutOfSyncOnly=true"), compareResult.diffResultList),
|
||||
@@ -263,6 +287,21 @@ func (m *appStateManager) SyncAppState(app *v1alpha1.Application, state *v1alpha
|
||||
sync.WithReplace(syncOp.SyncOptions.HasOption(common.SyncOptionReplace)),
|
||||
sync.WithServerSideApply(syncOp.SyncOptions.HasOption(common.SyncOptionServerSideApply)),
|
||||
sync.WithServerSideApplyManager(cdcommon.ArgoCDSSAManager),
|
||||
}
|
||||
|
||||
if syncOp.SyncOptions.HasOption("CreateNamespace=true") {
|
||||
opts = append(opts, sync.WithNamespaceModifier(syncNamespace(m.resourceTracking, appLabelKey, trackingMethod, app.Name, app.Spec.SyncPolicy)))
|
||||
}
|
||||
|
||||
syncCtx, cleanup, err := sync.NewSyncContext(
|
||||
compareResult.syncStatus.Revision,
|
||||
reconciliationResult,
|
||||
restConfig,
|
||||
rawConfig,
|
||||
m.kubectl,
|
||||
app.Spec.Destination.Namespace,
|
||||
openAPISchema,
|
||||
opts...,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
@@ -301,7 +340,7 @@ func (m *appStateManager) SyncAppState(app *v1alpha1.Application, state *v1alpha
|
||||
logEntry.WithField("duration", time.Since(start)).Info("sync/terminate complete")
|
||||
|
||||
if !syncOp.DryRun && len(syncOp.Resources) == 0 && state.Phase.Successful() {
|
||||
err := m.persistRevisionHistory(app, compareResult.syncStatus.Revision, source, state.StartedAt)
|
||||
err := m.persistRevisionHistory(app, compareResult.syncStatus.Revision, source, compareResult.syncStatus.Revisions, compareResult.syncStatus.ComparedTo.Sources, app.Spec.HasMultipleSources(), state.StartedAt)
|
||||
if err != nil {
|
||||
state.Phase = common.OperationError
|
||||
state.Message = fmt.Sprintf("failed to record sync to history: %v", err)
|
||||
|
||||
55
controller/sync_namespace.go
Normal file
55
controller/sync_namespace.go
Normal file
@@ -0,0 +1,55 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo"
|
||||
gitopscommon "github.com/argoproj/gitops-engine/pkg/sync/common"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
)
|
||||
|
||||
// syncNamespace determine if Argo CD should create and/or manage the namespace
|
||||
// where the application will be deployed.
|
||||
func syncNamespace(resourceTracking argo.ResourceTracking, appLabelKey string, trackingMethod v1alpha1.TrackingMethod, appName string, syncPolicy *v1alpha1.SyncPolicy) func(m, l *unstructured.Unstructured) (bool, error) {
|
||||
// This function must return true for the managed namespace to be synced.
|
||||
return func(managedNs, liveNs *unstructured.Unstructured) (bool, error) {
|
||||
if managedNs == nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
isNewNamespace := liveNs == nil
|
||||
isManagedNamespace := syncPolicy != nil && syncPolicy.ManagedNamespaceMetadata != nil
|
||||
|
||||
// should only sync the namespace if it doesn't exist in k8s or if
|
||||
// syncPolicy is defined to manage the metadata
|
||||
if !isManagedNamespace && !isNewNamespace {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if isManagedNamespace {
|
||||
managedNamespaceMetadata := syncPolicy.ManagedNamespaceMetadata
|
||||
managedNs.SetLabels(managedNamespaceMetadata.Labels)
|
||||
// managedNamespaceMetadata relies on SSA in order to avoid overriding
|
||||
// existing labels and annotations in namespaces
|
||||
managedNs.SetAnnotations(appendSSAAnnotation(managedNamespaceMetadata.Annotations))
|
||||
}
|
||||
|
||||
// TODO: https://github.com/argoproj/argo-cd/issues/11196
|
||||
// err := resourceTracking.SetAppInstance(managedNs, appLabelKey, appName, "", trackingMethod)
|
||||
// if err != nil {
|
||||
// return false, fmt.Errorf("failed to set app instance tracking on the namespace %s: %s", managedNs.GetName(), err)
|
||||
// }
|
||||
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
// appendSSAAnnotation will set the managed namespace to be synced
|
||||
// with server-side apply
|
||||
func appendSSAAnnotation(in map[string]string) map[string]string {
|
||||
r := map[string]string{}
|
||||
for k, v := range in {
|
||||
r[k] = v
|
||||
}
|
||||
r[gitopscommon.AnnotationSyncOptions] = gitopscommon.SyncOptionServerSideApply
|
||||
return r
|
||||
}
|
||||
261
controller/sync_namespace_test.go
Normal file
261
controller/sync_namespace_test.go
Normal file
@@ -0,0 +1,261 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"github.com/argoproj/argo-cd/v2/common"
|
||||
"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
|
||||
"github.com/argoproj/argo-cd/v2/util/argo"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func createFakeNamespace(uid string, resourceVersion string, labels map[string]string, annotations map[string]string) *unstructured.Unstructured {
|
||||
un := unstructured.Unstructured{}
|
||||
un.SetUID(types.UID(uid))
|
||||
un.SetResourceVersion(resourceVersion)
|
||||
un.SetLabels(labels)
|
||||
un.SetAnnotations(annotations)
|
||||
un.SetKind("Namespace")
|
||||
un.SetName("some-namespace")
|
||||
return &un
|
||||
}
|
||||
|
||||
func Test_shouldNamespaceSync(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
syncPolicy *v1alpha1.SyncPolicy
|
||||
managedNs *unstructured.Unstructured
|
||||
liveNs *unstructured.Unstructured
|
||||
expected bool
|
||||
expectedLabels map[string]string
|
||||
expectedAnnotations map[string]string
|
||||
}{
|
||||
{
|
||||
name: "liveNs is nil and syncPolicy is nil",
|
||||
expected: false,
|
||||
managedNs: nil,
|
||||
liveNs: nil,
|
||||
syncPolicy: nil,
|
||||
},
|
||||
{
|
||||
name: "liveNs is nil and syncPolicy is not nil",
|
||||
expected: false,
|
||||
managedNs: nil,
|
||||
liveNs: nil,
|
||||
syncPolicy: &v1alpha1.SyncPolicy{
|
||||
ManagedNamespaceMetadata: nil,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "liveNs is nil and syncPolicy has labels and annotations",
|
||||
expected: false,
|
||||
managedNs: nil,
|
||||
liveNs: nil,
|
||||
expectedLabels: map[string]string{"my-cool-label": "some-value"},
|
||||
expectedAnnotations: map[string]string{"my-cool-annotation": "some-value"},
|
||||
syncPolicy: &v1alpha1.SyncPolicy{
|
||||
ManagedNamespaceMetadata: &v1alpha1.ManagedNamespaceMetadata{
|
||||
Labels: map[string]string{"my-cool-label": "some-value"},
|
||||
Annotations: map[string]string{"my-cool-annotation": "some-value"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "namespace does not yet exist and managedNamespaceMetadata nil",
|
||||
expected: true,
|
||||
expectedLabels: map[string]string{},
|
||||
expectedAnnotations: map[string]string{},
|
||||
managedNs: createFakeNamespace("", "", map[string]string{}, map[string]string{}),
|
||||
liveNs: nil,
|
||||
syncPolicy: &v1alpha1.SyncPolicy{
|
||||
ManagedNamespaceMetadata: nil,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "namespace does not yet exist and managedNamespaceMetadata not nil",
|
||||
expected: true,
|
||||
expectedAnnotations: map[string]string{"argocd.argoproj.io/sync-options": "ServerSideApply=true"},
|
||||
managedNs: createFakeNamespace("", "", map[string]string{}, map[string]string{}),
|
||||
liveNs: nil,
|
||||
syncPolicy: &v1alpha1.SyncPolicy{
|
||||
ManagedNamespaceMetadata: &v1alpha1.ManagedNamespaceMetadata{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "namespace does not yet exist and managedNamespaceMetadata has empty labels map",
|
||||
expected: true,
|
||||
expectedLabels: map[string]string{},
|
||||
expectedAnnotations: map[string]string{"argocd.argoproj.io/sync-options": "ServerSideApply=true"},
|
||||
managedNs: createFakeNamespace("", "", map[string]string{}, map[string]string{}),
|
||||
liveNs: nil,
|
||||
syncPolicy: &v1alpha1.SyncPolicy{
|
||||
ManagedNamespaceMetadata: &v1alpha1.ManagedNamespaceMetadata{
|
||||
Labels: map[string]string{},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "namespace does not yet exist and managedNamespaceMetadata has empty annotations map",
|
||||
expected: true,
|
||||
expectedAnnotations: map[string]string{"argocd.argoproj.io/sync-options": "ServerSideApply=true"},
|
||||
managedNs: createFakeNamespace("", "", map[string]string{}, map[string]string{}),
|
||||
liveNs: nil,
|
||||
syncPolicy: &v1alpha1.SyncPolicy{
|
||||
ManagedNamespaceMetadata: &v1alpha1.ManagedNamespaceMetadata{
|
||||
Annotations: map[string]string{},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "namespace does not yet exist and managedNamespaceMetadata has empty annotations and labels map",
|
||||
expected: true,
|
||||
expectedLabels: map[string]string{},
|
||||
expectedAnnotations: map[string]string{"argocd.argoproj.io/sync-options": "ServerSideApply=true"},
|
||||
managedNs: createFakeNamespace("", "", map[string]string{}, map[string]string{}),
|
||||
liveNs: nil,
|
||||
syncPolicy: &v1alpha1.SyncPolicy{
|
||||
ManagedNamespaceMetadata: &v1alpha1.ManagedNamespaceMetadata{
|
||||
Labels: map[string]string{},
|
||||
Annotations: map[string]string{},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "namespace does not yet exist and managedNamespaceMetadata has labels",
|
||||
expected: true,
|
||||
expectedLabels: map[string]string{"my-cool-label": "some-value"},
|
||||
expectedAnnotations: map[string]string{"argocd.argoproj.io/sync-options": "ServerSideApply=true"},
|
||||
managedNs: createFakeNamespace("", "", map[string]string{}, map[string]string{}),
|
||||
liveNs: nil,
|
||||
syncPolicy: &v1alpha1.SyncPolicy{
|
||||
ManagedNamespaceMetadata: &v1alpha1.ManagedNamespaceMetadata{
|
||||
Labels: map[string]string{"my-cool-label": "some-value"},
|
||||
Annotations: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "namespace does not yet exist and managedNamespaceMetadata has annotations",
|
||||
expected: true,
|
||||
expectedAnnotations: map[string]string{"my-cool-annotation": "some-value", "argocd.argoproj.io/sync-options": "ServerSideApply=true"},
|
||||
managedNs: createFakeNamespace("", "", map[string]string{}, map[string]string{}),
|
||||
liveNs: nil,
|
||||
syncPolicy: &v1alpha1.SyncPolicy{
|
||||
ManagedNamespaceMetadata: &v1alpha1.ManagedNamespaceMetadata{
|
||||
Labels: nil,
|
||||
Annotations: map[string]string{"my-cool-annotation": "some-value"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "namespace does not yet exist and managedNamespaceMetadata has annotations and labels",
|
||||
expected: true,
|
||||
expectedLabels: map[string]string{"my-cool-label": "some-value"},
|
||||
expectedAnnotations: map[string]string{"my-cool-annotation": "some-value", "argocd.argoproj.io/sync-options": "ServerSideApply=true"},
|
||||
managedNs: createFakeNamespace("", "", map[string]string{}, map[string]string{}),
|
||||
liveNs: nil,
|
||||
syncPolicy: &v1alpha1.SyncPolicy{
|
||||
ManagedNamespaceMetadata: &v1alpha1.ManagedNamespaceMetadata{
|
||||
Labels: map[string]string{"my-cool-label": "some-value"},
|
||||
Annotations: map[string]string{"my-cool-annotation": "some-value"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "namespace exists with no labels or annotations and managedNamespaceMetadata has labels",
|
||||
expected: true,
|
||||
expectedLabels: map[string]string{"my-cool-label": "some-value"},
|
||||
expectedAnnotations: map[string]string{"argocd.argoproj.io/sync-options": "ServerSideApply=true"},
|
||||
managedNs: createFakeNamespace("", "", map[string]string{}, map[string]string{}),
|
||||
liveNs: createFakeNamespace("something", "1", map[string]string{}, map[string]string{}),
|
||||
syncPolicy: &v1alpha1.SyncPolicy{
|
||||
ManagedNamespaceMetadata: &v1alpha1.ManagedNamespaceMetadata{
|
||||
Labels: map[string]string{"my-cool-label": "some-value"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "namespace exists with no labels or annotations and managedNamespaceMetadata has annotations",
|
||||
expected: true,
|
||||
expectedAnnotations: map[string]string{"my-cool-annotation": "some-value", "argocd.argoproj.io/sync-options": "ServerSideApply=true"},
|
||||
managedNs: createFakeNamespace("", "", map[string]string{}, map[string]string{}),
|
||||
liveNs: createFakeNamespace("something", "1", map[string]string{}, map[string]string{}),
|
||||
syncPolicy: &v1alpha1.SyncPolicy{
|
||||
ManagedNamespaceMetadata: &v1alpha1.ManagedNamespaceMetadata{
|
||||
Annotations: map[string]string{"my-cool-annotation": "some-value"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "namespace exists with no labels or annotations and managedNamespaceMetadata has annotations and labels",
|
||||
expected: true,
|
||||
expectedLabels: map[string]string{"my-cool-label": "some-value"},
|
||||
expectedAnnotations: map[string]string{"my-cool-annotation": "some-value", "argocd.argoproj.io/sync-options": "ServerSideApply=true"},
|
||||
managedNs: createFakeNamespace("", "", map[string]string{}, map[string]string{}),
|
||||
liveNs: createFakeNamespace("something", "1", map[string]string{}, map[string]string{}),
|
||||
syncPolicy: &v1alpha1.SyncPolicy{
|
||||
ManagedNamespaceMetadata: &v1alpha1.ManagedNamespaceMetadata{
|
||||
Labels: map[string]string{"my-cool-label": "some-value"},
|
||||
Annotations: map[string]string{"my-cool-annotation": "some-value"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "namespace exists with labels and managedNamespaceMetadata has mismatching labels",
|
||||
expected: true,
|
||||
expectedAnnotations: map[string]string{"argocd.argoproj.io/sync-options": "ServerSideApply=true"},
|
||||
expectedLabels: map[string]string{"my-cool-label": "some-value", "my-other-label": "some-other-value"},
|
||||
managedNs: createFakeNamespace("", "", map[string]string{}, map[string]string{}),
|
||||
liveNs: createFakeNamespace("something", "1", map[string]string{"my-cool-label": "some-value"}, map[string]string{}),
|
||||
syncPolicy: &v1alpha1.SyncPolicy{
|
||||
ManagedNamespaceMetadata: &v1alpha1.ManagedNamespaceMetadata{
|
||||
Labels: map[string]string{"my-cool-label": "some-value", "my-other-label": "some-other-value"},
|
||||
Annotations: map[string]string{},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "namespace exists with annotations and managedNamespaceMetadata has mismatching annotations",
|
||||
expected: true,
|
||||
expectedLabels: map[string]string{},
|
||||
expectedAnnotations: map[string]string{"my-cool-annotation": "some-value", "argocd.argoproj.io/sync-options": "ServerSideApply=true"},
|
||||
managedNs: createFakeNamespace("", "", map[string]string{}, map[string]string{}),
|
||||
liveNs: createFakeNamespace("something", "1", map[string]string{}, map[string]string{"my-cool-annotation": "some-value", "my-other-annotation": "some-other-value"}),
|
||||
syncPolicy: &v1alpha1.SyncPolicy{
|
||||
ManagedNamespaceMetadata: &v1alpha1.ManagedNamespaceMetadata{
|
||||
Labels: map[string]string{},
|
||||
Annotations: map[string]string{"my-cool-annotation": "some-value"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "namespace exists with annotations and labels managedNamespaceMetadata has mismatching annotations and labels",
|
||||
expected: true,
|
||||
expectedLabels: map[string]string{"my-cool-label": "some-value", "my-other-label": "some-other-value"},
|
||||
expectedAnnotations: map[string]string{"my-cool-annotation": "some-value", "my-other-annotation": "some-other-value", "argocd.argoproj.io/sync-options": "ServerSideApply=true"},
|
||||
managedNs: createFakeNamespace("", "", map[string]string{}, map[string]string{}),
|
||||
liveNs: createFakeNamespace("something", "1", map[string]string{"my-cool-label": "some-value"}, map[string]string{"my-cool-annotation": "some-value"}),
|
||||
syncPolicy: &v1alpha1.SyncPolicy{
|
||||
ManagedNamespaceMetadata: &v1alpha1.ManagedNamespaceMetadata{
|
||||
Labels: map[string]string{"my-cool-label": "some-value", "my-other-label": "some-other-value"},
|
||||
Annotations: map[string]string{"my-cool-annotation": "some-value", "my-other-annotation": "some-other-value"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
actual, err := syncNamespace(argo.NewResourceTracking(), common.LabelKeyAppInstance, argo.TrackingMethodAnnotation, "some-app", tt.syncPolicy)(tt.managedNs, tt.liveNs)
|
||||
assert.NoError(t, err)
|
||||
|
||||
if tt.managedNs != nil {
|
||||
assert.Equal(t, tt.expectedLabels, tt.managedNs.GetLabels())
|
||||
assert.Equal(t, tt.expectedAnnotations, tt.managedNs.GetAnnotations())
|
||||
}
|
||||
|
||||
assert.Equalf(t, tt.expected, actual, "syncNamespace(%v)", tt.syncPolicy)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -50,12 +50,12 @@ func TestPersistRevisionHistory(t *testing.T) {
|
||||
}}
|
||||
ctrl.appStateManager.SyncAppState(app, opState)
|
||||
// Ensure we record spec.source into sync result
|
||||
assert.Equal(t, app.Spec.Source, opState.SyncResult.Source)
|
||||
assert.Equal(t, app.Spec.GetSource(), opState.SyncResult.Source)
|
||||
|
||||
updatedApp, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(app.Namespace).Get(context.Background(), app.Name, v1.GetOptions{})
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 1, len(updatedApp.Status.History))
|
||||
assert.Equal(t, app.Spec.Source, updatedApp.Status.History[0].Source)
|
||||
assert.Equal(t, app.Spec.GetSource(), updatedApp.Status.History[0].Source)
|
||||
assert.Equal(t, "abc123", updatedApp.Status.History[0].Revision)
|
||||
}
|
||||
|
||||
|
||||
BIN
docs/assets/argocd-arch-authn-authz.jpg
Normal file
BIN
docs/assets/argocd-arch-authn-authz.jpg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 270 KiB |
BIN
docs/assets/repo-add-google-cloud-source.png
Normal file
BIN
docs/assets/repo-add-google-cloud-source.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 107 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 60 KiB After Width: | Height: | Size: 55 KiB |
@@ -39,6 +39,31 @@ rm argocd-linux-amd64
|
||||
|
||||
You should now be able to run `argocd` commands.
|
||||
|
||||
|
||||
## Mac (M1)
|
||||
|
||||
### Download With Curl
|
||||
|
||||
You can view the latest version of Argo CD at the link above or run the following command to grab the version:
|
||||
|
||||
```bash
|
||||
VERSION=$(curl --silent "https://api.github.com/repos/argoproj/argo-cd/releases/latest" | grep '"tag_name"' | sed -E 's/.*"([^"]+)".*/\1/')
|
||||
```
|
||||
|
||||
Replace `VERSION` in the command below with the version of Argo CD you would like to download:
|
||||
|
||||
```bash
|
||||
curl -sSL -o argocd-darwin-arm64 https://github.com/argoproj/argo-cd/releases/download/$VERSION/argocd-darwin-arm64
|
||||
```
|
||||
|
||||
Install the Argo CD CLI binary:
|
||||
|
||||
```bash
|
||||
sudo install -m 555 argocd-darwin-arm64 /usr/local/bin/argocd
|
||||
rm argocd-darwin-arm64
|
||||
```
|
||||
|
||||
|
||||
## Mac
|
||||
|
||||
### Homebrew
|
||||
|
||||
109
docs/developer-guide/architecture/authz-authn.md
Normal file
109
docs/developer-guide/architecture/authz-authn.md
Normal file
@@ -0,0 +1,109 @@
|
||||
# Authentication and Authorization
|
||||
|
||||
This document describes how authentication (authn) and authorization
|
||||
(authz) are implemented in Argo CD. There is a clear distinction in
|
||||
the code base of when and how these two security concepts are
|
||||
enforced.
|
||||
|
||||
## Logical layers
|
||||
|
||||
The diagram bellow suggests 4 different logical layers (represented by
|
||||
4 boxes: HTTP, gRPC, AuthN and AuthZ) inside Argo CD API server that
|
||||
collaborate to provide authentication and authorization.
|
||||
|
||||
- **HTTP**: The HTTP layer groups the *logical elements* that
|
||||
collaborate to handle HTTP requests. Every incoming request reaches
|
||||
the same HTTP server at the same port (8080). This server will
|
||||
analyze the request headers and dispatch to the proper internal
|
||||
server: gRPC or standard HTTP.
|
||||
|
||||
- **gRPC**: The [gRPC][4] layer groups the logical elements responsible for
|
||||
the gRPC implementation.
|
||||
|
||||
- **AuthN**: The AuthN represents the layer responsible for
|
||||
authentication.
|
||||
|
||||
- **AuthZ**: The AuthZ represents the layer responsible for
|
||||
authorization.
|
||||
|
||||

|
||||
|
||||
## Logical elements
|
||||
|
||||
The logical elements (identified by numbers) can represent an object,
|
||||
a function or a component in the code base. Note that this particular
|
||||
distinction is not represented in the diagram.
|
||||
|
||||
Incoming requests can reach Argo CD API server from the web UI as well
|
||||
as from the `argocd` CLI. The responsibility of the represented
|
||||
elements are described below with their respective numbers:
|
||||
|
||||
1. **Cmux**: Uses the [cmux][1] library to provide a connection
|
||||
multiplexer capability making it possible to use the same port to
|
||||
handle standard HTTP as well as gRPC requests. It is responsible
|
||||
for inspecting incoming requests and dispatch to appropriate
|
||||
internal servers. If the request version is `http1.x` it will
|
||||
delegate to the *http mux*. If the request version is `http2` and
|
||||
has the header `content-type: application/grpc`, it will delegate
|
||||
to the *gRPC Server*.
|
||||
|
||||
1. **HTTP mux**: A [standard HTTP multiplexer][8] that will handle non
|
||||
gRPC requests. It is responsible for serving a unified [REST
|
||||
API][3] to the web UI exposing all gRPC and non-gRPC services.
|
||||
|
||||
1. **gRPC-gateway**: Uses the [grpc-gateway][2] library to translate
|
||||
internal gRPC services and expose them as a [REST API][3]. The
|
||||
great majority of API services in Argo CD are implemented in gRPC.
|
||||
The grpc-gateway makes it possible to access gRPC services from the
|
||||
web UI.
|
||||
|
||||
1. **Server**: The internal gRPC Server responsible for handling gRPC
|
||||
requests.
|
||||
|
||||
1. **AuthN**: Is responsible for invoking the authentication logic. It
|
||||
is registered as a gRPC interceptor which will automatically
|
||||
trigger for every gRPC request.
|
||||
|
||||
1. **Session Manager**: Is the object responsible for managing Argo CD
|
||||
API server session. It provides the functionality to verify the
|
||||
validity of the authentication token provided in the request.
|
||||
Depending on how Argo CD is configured it may or may not delegate
|
||||
to an external AuthN provider to verify the token.
|
||||
|
||||
1. **AuthN Provider**: Describes the component that can be plugged in
|
||||
Argo CD API server to provide the authentication functionality such
|
||||
as the login and the token verification process.
|
||||
|
||||
1. **Service Method**: represents the method implementing the business
|
||||
logic (core functionality) requested. An example of business logic
|
||||
is: `List Applications`. Service methods are also responsible for
|
||||
invoking the [RBAC][7] enforcement function to validate if the
|
||||
authenticated user has permission to execute this method.
|
||||
|
||||
1. **RBAC**: Is a collection of functions to provide the capability to
|
||||
verify if the user has permission to execute a specific action in
|
||||
Argo CD. It does so by validating the incoming request action
|
||||
against predefined [RBAC][7] rules that can be configured in Argo CD
|
||||
API server as well as in Argo CD `Project` CRD.
|
||||
|
||||
1. **Casbin**: Uses the [Casbin][5] library to enforce [RBAC][7] rules.
|
||||
|
||||
1. **AuthN Middleware**: Is an [HTTP Middleware][6] configured to
|
||||
invoke the logic to verify the token for HTTP services that are not
|
||||
implemented as gRPC and requires authentication.
|
||||
|
||||
1. **HTTP Handler**: represents the http handlers responsible for
|
||||
invoking the business logic (core functionality) requested. An
|
||||
example of business logic is: `List Applications`. Http handlers
|
||||
are also responsible for invoking the [RBAC][7] enforcement function to
|
||||
validate if the authenticated user has permission to execute this
|
||||
business logic.
|
||||
|
||||
[1]: https://github.com/soheilhy/cmux
|
||||
[2]: https://github.com/grpc-ecosystem/grpc-gateway
|
||||
[3]: https://en.wikipedia.org/wiki/Representational_state_transfer
|
||||
[4]: https://grpc.io/
|
||||
[5]: https://casbin.org/
|
||||
[6]: https://github.com/golang/go/wiki/LearnServerProgramming#middleware
|
||||
[7]: https://en.wikipedia.org/wiki/Role-based_access_control
|
||||
[8]: https://pkg.go.dev/net/http#ServeMux
|
||||
@@ -1,15 +0,0 @@
|
||||
Money given to the Argo CD project as part of the Internet Bug Bounty program is used in three ways:
|
||||
|
||||
1. To reward CVE patch contributors
|
||||
2. To offer bounties on security enhancements (as announced by label/comment on Issues)
|
||||
3. To sponsor security-relevant dependencies
|
||||
|
||||
If someone’s primary full-time job responsibility is to work on Argo CD, then their eligibility to receive this money is limited. (Determining this is up to the maintainers’ discretion. Someone who contributes an average of three commits per week during work hours probably meets the definition. A first-time contributor who uses Argo CD daily as an SRE does not.)
|
||||
|
||||
A full-time Argo CD author is not eligible to receive rewards for CVE patch contributions. This avoids any risk of the appearance that a full-time Argo CD author is incentivized to introduce CVEs.
|
||||
|
||||
A full-time Argo CD author is eligible to receive bounties for security enhancements if and only if the vast majority of the work is done in their free time (non-work hours). Busy work like resolving merge conflicts during work hours is acceptable (to avoid over-burdening the process).
|
||||
|
||||
An Argo CD dependency is eligible to receive donations if it is listed in the Argo CD SBOM or if it is a binary invoked by Argo CD (like Helm). The dependency is not eligible for donations if a full-time Argo CD author is the primary author of the dependency.
|
||||
|
||||
Offers and transfers of rewards, bounties, and donations will be made from time to time by the Argo CD maintainers, based on the current project needs and the amount of money available from IBB. The process should be lightweight and consensus-based for now. If necessary, a more structured system can be established based on experience gained from early rewards/bounties/donations
|
||||
@@ -65,12 +65,12 @@ make builder-image IMAGE_NAMESPACE=argoproj IMAGE_TAG=v1.0.0
|
||||
|
||||
## Public CD
|
||||
|
||||
Every commit to master is built and published to `ghcr.io/argoproj/argocd:<version>-<short-sha>`. The list of images is available at
|
||||
Every commit to master is built and published to `ghcr.io/argoproj/argo-cd/argocd:<version>-<short-sha>`. The list of images is available at
|
||||
https://github.com/argoproj/argo-cd/packages.
|
||||
|
||||
!!! note
|
||||
GitHub docker registry [requires](https://github.community/t5/GitHub-Actions/docker-pull-from-public-GitHub-Package-Registry-fail-with-quot/m-p/32888#M1294) authentication to read
|
||||
even publicly available packages. Follow the steps from Kubernetes [documentation](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry)
|
||||
to configure image pull secret if you want to use `ghcr.io/argoproj/argocd` image.
|
||||
to configure image pull secret if you want to use `ghcr.io/argoproj/argo-cd/argocd` image.
|
||||
|
||||
The image is automatically deployed to the dev Argo CD instance: [https://cd.apps.argoproj.io/](https://cd.apps.argoproj.io/)
|
||||
|
||||
@@ -103,7 +103,7 @@ Design documents are usually submitted as PR and use [this template](https://git
|
||||
|
||||
Our community regularly meets virtually to discuss issues, ideas and enhancements around Argo CD. We do invite you to join this virtual meetings if you want to bring up certain things (including your enhancement proposals), participate in our triaging or just want to get to know other contributors.
|
||||
|
||||
The current cadence of our meetings is weekly, every Thursday at 4pm UTC (9am Pacific, 12pm Eastern, 6pm Central European, 9:30pm Indian). We use Zoom to conduct these meetings.
|
||||
The current cadence of our meetings is weekly, every Thursday at 4:15pm UTC (8:15am Pacific, 11:15am Eastern, 5:15pm Central European, 9:45pm Indian). We use Zoom to conduct these meetings.
|
||||
|
||||
* [Agenda document (Google Docs, includes Zoom link)](https://docs.google.com/document/d/1xkoFkVviB70YBzSEa4bDnu-rUZ1sIFtwKKG1Uw8XsY8)
|
||||
|
||||
|
||||
112
docs/developer-guide/contributors-quickstart.md
Normal file
112
docs/developer-guide/contributors-quickstart.md
Normal file
@@ -0,0 +1,112 @@
|
||||
# Contributors Quick-Start
|
||||
|
||||
This guide is a starting point for first-time contributors running Argo CD locally for the first time.
|
||||
|
||||
It skips advanced topics such as codegen, which are covered in the [running locally guide](running-locally.md)
|
||||
and the [toolchain guide](toolchain-guide.md).
|
||||
|
||||
## Getting Started
|
||||
|
||||
### Install Go
|
||||
|
||||
- Install version 1.18 or newer (Verify version by running `go version`)
|
||||
|
||||
- Get current value of `GOPATH` env:
|
||||
```shell
|
||||
go env | grep path
|
||||
```
|
||||
- Change directory into that path
|
||||
```shell
|
||||
cd <path>
|
||||
```
|
||||
|
||||
### Clone the Argo CD repo
|
||||
|
||||
```shell
|
||||
mkdir -p src/github.com/argoproj/ &&
|
||||
cd src/github.com/argoproj &&
|
||||
git clone https://github.com/argoproj/argo-cd.git
|
||||
```
|
||||
|
||||
### Install Docker
|
||||
|
||||
<https://docs.docker.com/engine/install/>
|
||||
|
||||
### Install or Upgrade `kind` (Optional - Should work with any local cluster)
|
||||
|
||||
<https://kind.sigs.k8s.io/docs/user/quick-start/>
|
||||
|
||||
### Start Your Local Cluster
|
||||
|
||||
```shell
|
||||
kind create cluster
|
||||
```
|
||||
|
||||
### Install Argo CD
|
||||
|
||||
```shell
|
||||
kubectl create namespace argocd &&
|
||||
kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/master/manifests/install.yaml
|
||||
```
|
||||
|
||||
Set kubectl config to avoid specifying the namespace in every kubectl command.
|
||||
All following commands in this guide assume the namespace is already set.
|
||||
|
||||
```shell
|
||||
kubectl config set-context --current --namespace=argocd
|
||||
```
|
||||
|
||||
### Install `yarn`
|
||||
|
||||
<https://classic.yarnpkg.com/lang/en/docs/install/>
|
||||
|
||||
### Install `goreman`
|
||||
|
||||
<https://github.com/mattn/goreman#getting-started>
|
||||
|
||||
### Run Argo CD
|
||||
|
||||
```shell
|
||||
cd argo-cd
|
||||
make start-local ARGOCD_GPG_ENABLED=false
|
||||
```
|
||||
|
||||
- Navigate to <localhost:4000> to the ArgoCD UI on browser
|
||||
- It may take a few minutes for the UI to be responsive
|
||||
|
||||
!!! note
|
||||
If the UI is not working, check the logs from `make start-local`. The logs are `DEBUG` level by default. If the logs are
|
||||
too noisy to find the problem, try editing log levels for the commands in the `Procfile` in the root of the Argo CD repo.
|
||||
|
||||
## Making Changes
|
||||
|
||||
### UI Changes
|
||||
|
||||
Modifying the User-Interface (by editing .tsx or .scss files) auto-reloads the changes on port 4000.
|
||||
|
||||
### Backend Changes
|
||||
|
||||
Modifying the API server, repo server, or a controller requires restarting the current `make start-local` session to reflect the changes.
|
||||
|
||||
### CLI Changes
|
||||
|
||||
Modifying the CLI requires restarting the current `make start-local` session to reflect the changes.
|
||||
|
||||
To test most CLI commands, you will need to log in.
|
||||
|
||||
First, get the auto-generated secret:
|
||||
|
||||
```shell
|
||||
kubectl -n argocd get secret argocd-initial-admin-secret -o jsonpath="{.data.password}" | base64 -d; echo
|
||||
```
|
||||
|
||||
Then log in using that password and username `admin`:
|
||||
|
||||
```shell
|
||||
dist/argocd login localhost:8080
|
||||
```
|
||||
|
||||
---
|
||||
Congrats on making it to the end of this runbook! 🚀
|
||||
|
||||
For more on Argo CD, find us in Slack - <https://slack.cncf.io/> [#argo-contributors](https://cloud-native.slack.com/archives/C020XM04CUW)
|
||||
@@ -20,13 +20,15 @@ curl -sSfL https://raw.githubusercontent.com/argoproj/argo-cd/stable/manifests/i
|
||||
## Connect
|
||||
Connect to one of the services, for example, to debug the main ArgoCD server run:
|
||||
```shell
|
||||
kubectl config set-context --current --namespace argocd
|
||||
telepresence helm install # Installs telepresence into your cluster
|
||||
telepresence connect # Starts the connection to your cluster
|
||||
telepresence intercept argocd-server --port 8083:8083 --port 8080:8080 --env-file .envrc.remote --namespace argocd # Starts the interception
|
||||
telepresence connect # Starts the connection to your cluster (bound to the current namespace)
|
||||
telepresence intercept argocd-server --port 8080:http --env-file .envrc.remote # Starts the interception
|
||||
```
|
||||
* `--port` forwards traffic of remote ports 8080 and 8083 to the same ports locally
|
||||
* `--port` forwards traffic of remote port http to 8080 locally (use `--port 8080:https` if argocd-server terminates TLS)
|
||||
* `--env-file` writes all the environment variables of the remote pod into a local file, the variables are also set on the subprocess of the `--run` command
|
||||
* `--namespace` specifies that the `argocd-server` is located in the `argocd` namespace
|
||||
|
||||
With this, any traffic that hits your argocd-server service in the cluster (e.g through a LB / ingress) will be forwarded to your laptop on port 8080. So that you can now start argocd-server locally to debug or test new code. If you launch argocd-server using the environment variables in `.envrc.remote`, he is able to fetch all the configmaps, secrets and so one from the cluster and transparently connect to the other microservices so that no further configuration should be neccesary and he behaves exactly the same as in the cluster.
|
||||
|
||||
List current status of Telepresence using:
|
||||
```shell
|
||||
@@ -63,11 +65,11 @@ Once a connection is established, use your favorite tools to start the server lo
|
||||
* Run `./dist/argocd-server`
|
||||
|
||||
### VSCode
|
||||
In VSCode use the integrated terminal to run the Telepresence command to connect. Then, to run argocd-server service use the following configuration.
|
||||
Update the configuration file to point to kubeconfig file: `KUBECONFIG=` (required)
|
||||
In VSCode use the following launch configuration to run argocd-server:
|
||||
|
||||
```json
|
||||
{
|
||||
"name": "Launch",
|
||||
"name": "Launch argocd-server",
|
||||
"type": "go",
|
||||
"request": "launch",
|
||||
"mode": "auto",
|
||||
@@ -82,3 +84,4 @@ Update the configuration file to point to kubeconfig file: `KUBECONFIG=` (requir
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
@@ -1,49 +1,78 @@
|
||||
# Release Process And Cadence
|
||||
|
||||
Argo CD is being developed using the following process:
|
||||
## Release Cycle
|
||||
|
||||
* Maintainers commit to work on set of features and enhancements and create GitHub milestone to track the work.
|
||||
* We are trying to avoid delaying release and prefer moving the feature into the next release if we cannot complete it on time.
|
||||
* The new release is published every **3 months**.
|
||||
* Critical bug-fixes are cherry-picked into the release branch and delivered using patch releases as frequently as needed.
|
||||
### Schedule
|
||||
|
||||
## Release Planning
|
||||
These are the upcoming releases dates:
|
||||
|
||||
We are using GitHub milestones to perform release planning and tracking. Each release milestone includes two type of issues:
|
||||
| Release | Release Planning Meeting | Release Candidate 1 | General Availability | Release Champion | Checklist |
|
||||
|---------|--------------------------|-----------------------|----------------------|-------------------------------------------------------|---------------------------------------------------------------|
|
||||
| v2.6 | Monday, Dec. 12, 2022 | Monday, Dec. 19, 2022 | Monday, Feb. 6, 2023 | [William Tam](https://github.com/wtam2018) | [checklist](https://github.com/argoproj/argo-cd/issues/11563) |
|
||||
| v2.7 | Monday, Mar. 6, 2023 | Monday, Mar. 20, 2023 | Monday, May. 1, 2023 | [Pavel Kostohrys](https://github.com/pasha-codefresh) |
|
||||
| v2.8 | Monday, Jun. 5, 2023 | Monday, Jun. 19, 2023 | Monday, Aug. 7, 2023 | [Keith Chong](https://github.keithchong)
|
||||
| v2.9 | Monday, Sep. 4, 2023 | Monday, Sep. 18, 2023 | Monday, Nov. 6, 2023 |
|
||||
|
||||
* Issues that maintainers committed to working on. Maintainers decide which features they are committing to work on during the next release based on
|
||||
their availability. Typically issues added offline by each maintainer and finalized during the contributors' meeting. Each such issue should be
|
||||
assigned to maintainer who plans to implement and test it.
|
||||
* Nice to have improvements contributed by community contributors. Nice to have issues are typically not critical, smallish enhancements that could
|
||||
be contributed by community contributors. Maintainers are not committing to implement them but committing to review PR from the community.
|
||||
Actual release dates might differ from the plan by a few days.
|
||||
|
||||
The milestone should have a clear description of the most important features as well as the expected end date. This should provide clarity to end-users
|
||||
about what to expect from the next release and when.
|
||||
### Release Process
|
||||
|
||||
In addition to the next milestone, we need to maintain a draft of the upcoming release milestone.
|
||||
#### Minor Releases (e.g. 2.x.0)
|
||||
|
||||
## Community Contributions
|
||||
A minor Argo CD release occurs four times a year, once every three months. Each General Availability (GA) release is
|
||||
preceded by several Release Candidates (RCs). The first RC is released three weeks before the scheduled GA date. This
|
||||
effectively means that there is a three-week feature freeze.
|
||||
|
||||
We receive a lot of contributions from our awesome community, and we're very grateful for that fact. However, reviewing and testing PRs is a lot of (unplanned) work and therefore, we cannot guarantee that contributions (especially large or complex ones) made by the community receive a timely review within a release's time frame. Maintainers may decide on their own to put work on a PR together with the contributor and in this case, the maintainer will self-assigned the PR and thereby committing to review, eventually merge and later test it on the release scope.
|
||||
These are the approximate release dates:
|
||||
|
||||
## Release Testing
|
||||
* The first Monday of February
|
||||
* The first Monday of May
|
||||
* The first Monday of August
|
||||
* The first Monday of November
|
||||
|
||||
We need to make sure that each change, both from maintainers and community contributors, is tested well and have someone who is going to fix last-minute
|
||||
bugs. In order to ensure it, each merged pull request must have an assigned maintainer before it gets merged. The assigned maintainer will be working on
|
||||
testing the introduced changes and fixing of any introduced bugs.
|
||||
Dates may be shifted slightly to accommodate holidays. Those shifts should be minimal.
|
||||
|
||||
We have a code freeze period two weeks before the release until the release branch is created. During code freeze no feature PR should be merged and it is ok
|
||||
to merge bug fixes.
|
||||
#### Patch Releases (e.g. 2.5.x)
|
||||
|
||||
Maintainers assigned to a PR that's been merged should drive testing and work on fixing last-minute issues. For tracking purposes after verifying PR the assigned
|
||||
the maintainer should label it with a `verified` label.
|
||||
Argo CD patch releases occur on an as-needed basis. Only the three most recent minor versions are eligible for patch
|
||||
releases. Versions older than the three most recent minor versions are considered EOL and will not receive bug fixes or
|
||||
security updates.
|
||||
|
||||
## Releasing
|
||||
#### Minor Release Planning Meeting
|
||||
|
||||
The releasing procedure is described in [releasing](./releasing.md) document. Before closing the release milestone following should be verified:
|
||||
Roughly two weeks before the RC date, there will be a meeting to discuss which features are planned for the RC. This meeting is
|
||||
for contributors to advocate for certain features. Features which have at least one approver (besides the contributor)
|
||||
who can assure they will review/merge by the RC date will be included in the release milestone. All other features will
|
||||
be dropped from the milestone (and potentially shifted to the next one).
|
||||
|
||||
- [ ] All merged PRs and verified (verify and remove `needs-verification` label):
|
||||
- [ ] Triage issues reported by `yarn audit` and ensure there are no exploitable security issues.
|
||||
- [ ] Roadmap is updated based one current release changes
|
||||
- [ ] Next release milestone is created
|
||||
- [ ] Upcoming release milestone is updated
|
||||
Since not everyone will be able to attend the meeting, there will be a meeting doc. Contributors can add their feature
|
||||
to a table, and Approvers can add their name to the table. Features with a corresponding approver will remain in the
|
||||
release milestone.
|
||||
|
||||
#### Release Champion
|
||||
|
||||
To help manage all the steps involved in a release, we will have a Release Champion. The Release Champion will be
|
||||
responsible for a checklist of items for their release. The checklist is an issue template in the Argo CD repository.
|
||||
|
||||
The Release Champion can be anyone in the Argo CD community. Some tasks (like cherry-picking bug fixes and cutting
|
||||
releases) require [Approver](https://github.com/argoproj/argoproj/blob/master/community/membership.md#community-membership)
|
||||
membership. The Release Champion can delegate tasks when necessary and will be responsible for coordinating with the
|
||||
Approver.
|
||||
|
||||
### Feature Acceptance Criteria
|
||||
|
||||
To be eligible for inclusion in a minor release, a new feature must meet the following criteria before the release’s RC
|
||||
date.
|
||||
|
||||
If it is a large feature that involves significant design decisions, that feature must be described in a Proposal, and
|
||||
that Proposal must be reviewed and merged.
|
||||
|
||||
The feature PR must include:
|
||||
|
||||
* Tests (passing)
|
||||
* Documentation
|
||||
* If necessary, a note in the Upgrading docs for the planned minor release
|
||||
* The PR must be reviewed, approved, and merged by an Approver.
|
||||
|
||||
If these criteria are not met by the RC date, the feature will be ineligible for inclusion in the RC series or GA for
|
||||
that minor release. It will have to wait for the next minor release.
|
||||
|
||||
@@ -117,6 +117,27 @@ The Docker container for the virtualized toolchain will use the following local
|
||||
|
||||
The following steps are required no matter whether you chose to use a virtualized or a local toolchain.
|
||||
|
||||
!!!note "Docker privileges"
|
||||
If you opt in to use the virtualized toolchain, you will need to have the
|
||||
appropriate privileges to interact with the Docker daemon. It is not
|
||||
recommended to work as the root user, and if your user does not have the
|
||||
permissions to talk to the Docker user, but you have `sudo` setup on your
|
||||
system, you can set the environment variable `SUDO` to `sudo` in order to
|
||||
have the build scripts make any calls to the `docker` CLI using sudo,
|
||||
without affecting the other parts of the build scripts (which should be
|
||||
executed with your normal user privileges).
|
||||
|
||||
You can either set this before calling `make`, like so for example:
|
||||
|
||||
```
|
||||
SUDO=sudo make sometarget
|
||||
```
|
||||
|
||||
Or you can opt to export this permanently to your environment, for example
|
||||
```
|
||||
export SUDO=sudo
|
||||
```
|
||||
|
||||
### Clone the Argo CD repository from your personal fork on GitHub
|
||||
|
||||
* `mkdir -p ~/go/src/github.com/argoproj`
|
||||
|
||||
@@ -78,10 +78,10 @@ The API server can then be accessed using https://localhost:8080
|
||||
The initial password for the `admin` account is auto-generated and stored as
|
||||
clear text in the field `password` in a secret named `argocd-initial-admin-secret`
|
||||
in your Argo CD installation namespace. You can simply retrieve this password
|
||||
using `kubectl`:
|
||||
using the `argocd` CLI:
|
||||
|
||||
```bash
|
||||
kubectl -n argocd get secret argocd-initial-admin-secret -o jsonpath="{.data.password}" | base64 -d; echo
|
||||
argocd admin initial-password
|
||||
```
|
||||
|
||||
!!! warning
|
||||
|
||||
220
docs/operator-manual/app-any-namespace.md
Normal file
220
docs/operator-manual/app-any-namespace.md
Normal file
@@ -0,0 +1,220 @@
|
||||
# Applications in any namespace
|
||||
|
||||
**Current feature state**: Beta
|
||||
|
||||
!!! warning
|
||||
Please read this documentation carefully before you enable this feature. Misconfiguration could lead to potential security issues.
|
||||
|
||||
## Introduction
|
||||
|
||||
As of version 2.5, Argo CD supports managing `Application` resources in namespaces other than the control plane's namespace (which is usually `argocd`), but this feature has to be explicitly enabled and configured appropriately.
|
||||
|
||||
Argo CD administrators can define a certain set of namespaces where `Application` resources may be created, updated and reconciled in. However, applications in these additional namespaces will only be allowed to use certain `AppProjects`, as configured by the Argo CD administrators. This allows ordinary Argo CD users (e.g. application teams) to use patterns like declarative management of `Application` resources, implementing app-of-apps and others without the risk of a privilege escalation through usage of other `AppProjects` that would exceed the permissions granted to the application teams.
|
||||
|
||||
Some manual steps will need to be performed by the Argo CD administrator in order to enable this feature.
|
||||
|
||||
!!! note
|
||||
This feature is considered beta as of now. Some of the implementation details may change over the course of time until it is promoted to a stable status. We will be happy if early adopters use this feature and provide us with bug reports and feedback.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
### Cluster-scoped Argo CD installation
|
||||
|
||||
This feature can only be enabled and used when your Argo CD is installed as a cluster-wide instance, so it has permissions to list and manipulate resources on a cluster scope. It will *not* work with an Argo CD installed in namespace-scoped mode.
|
||||
|
||||
### Switch resource tracking method
|
||||
|
||||
Also, while technically not necessary, it is strongly suggested that you switch the application tracking method from the default `label` setting to either `annotation` or `annotation+label`. The reasonsing for this is, that application names will be a composite of the namespace's name and the name of the `Application`, and this can easily exceed the 63 characters length limit imposed on label values. Annotations have a notably greater length limit.
|
||||
|
||||
To enable annotation based resource tracking, refer to the documentation about [resource tracking methods](../../user-guide/resource_tracking/)
|
||||
|
||||
## Implementation details
|
||||
|
||||
### Overview
|
||||
|
||||
In order for an application to be managed and reconciled outside the Argo CD's control plane namespace, two prerequisites must match:
|
||||
|
||||
1. The `Application`'s namespace must be explicitly enabled using the `--application-namespaces` parameter for the `argocd-application-controller` and `argocd-server` workloads. This parameter controls the list of namespaces that Argo CD will be allowed to source `Application` resources from globally. Any namespace not configured here cannot be used from any `AppProject`.
|
||||
1. The `AppProject` referenced by the `.spec.project` field of the `Application` must have the namespace listed in its `.spec.sourceNamespaces` field. This setting will determine whether an `Application` may use a certain `AppProject`. If an `Application` specifies an `AppProject` that is not allowed, Argo CD refuses to process this `Application`. As stated above, any namespace configured in the `.spec.sourceNamespaces` field must also be enabled globally.
|
||||
|
||||
`Applications` in different namespaces can be created and managed just like any other `Application` in the `argocd` namespace previously, either declaratively or through the Argo CD API (e.g. using the CLI, the web UI, the REST API, etc).
|
||||
|
||||
### Reconfigure Argo CD to allow certain namespaces
|
||||
|
||||
#### Change workload startup parameters
|
||||
|
||||
In order to enable this feature, the Argo CD administrator must reconfigure the `argocd-server` and `argocd-application-controller` workloads to add the `--application-namespaces` parameter to the container's startup command.
|
||||
|
||||
The `--application-namespaces` parameter takes a comma-separated list of namespaces where `Applications` are to be allowed in. Each entry of the list supports shell-style wildcards such as `*`, so for example the entry `app-team-*` would match `app-team-one` and `app-team-two`. To enable all namespaces on the cluster where Argo CD is running on, you can just specify `*`, i.e. `--application-namespaces=*`.
|
||||
|
||||
The startup parameters for both, the `argocd-server` and the `argocd-application-controller` can also be conveniently set up and kept in sync by specifying the `application.namespaces` settings in the `argocd-cmd-params-cm` ConfigMap _instead_ of changing the manifests for the respective workloads. For example:
|
||||
|
||||
```yaml
|
||||
data:
|
||||
application.namespaces: app-team-one, app-team-two
|
||||
```
|
||||
|
||||
would allow the `app-team-one` and `app-team-two` namespaces for managing `Application` resources. After a change to the `argocd-cmd-params-cm` namespace, the appropriate workloads need to be restarted:
|
||||
|
||||
```bash
|
||||
kubectl rollout restart -n argocd deployment argocd-server
|
||||
kubectl rollout restart -n argocd statefulset argocd-application-controller
|
||||
```
|
||||
|
||||
#### Adapt Kubernetes RBAC
|
||||
|
||||
We decided to not extend the Kubernetes RBAC for the `argocd-server` workload by default for the time being. If you want `Applications` in other namespaces to be managed by the Argo CD API (i.e. the CLI and UI), you need to extend the Kubernetes permissions for the `argocd-server` ServiceAccount.
|
||||
|
||||
We supply a `ClusterRole` and `ClusterRoleBinding` suitable for this purpose in the `examples/k8s-rbac/argocd-server-applications` directory. For a default Argo CD installation (i.e. installed to the `argocd` namespace), you can just apply them as-is:
|
||||
|
||||
```shell
|
||||
kubectl apply -f examples/k8s-rbac/argocd-server-applications/
|
||||
```
|
||||
|
||||
!!! note
|
||||
At some later point in time, we may make this cluster role part of the default installation manifests.
|
||||
|
||||
### Allowing additional namespaces in an AppProject
|
||||
|
||||
Any user with Kubernetes access to the Argo CD control plane's namespace (`argocd`), especially those with permissions to create or update `Applications` in a declarative way, is to be considered an Argo CD admin.
|
||||
|
||||
This prevented unprivileged Argo CD users from declaratively creating or managing `Applications` in the past. Those users were constrained to using the API instead, subject to Argo CD RBAC which ensures only `Applications` in allowed `AppProjects` were created.
|
||||
|
||||
For an `Application` to be created outside the `argocd` namespace, the `AppProject` referred to in the `Application`'s `.spec.project` field must include the `Application`'s namespace in its `.spec.sourceNamespaces` field.
|
||||
|
||||
For example, consider the two following (incomplete) `AppProject` specs:
|
||||
|
||||
```yaml
|
||||
kind: AppProject
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
metadata:
|
||||
name: project-one
|
||||
namespace: argocd
|
||||
spec:
|
||||
sourceNamespaces:
|
||||
- namespace-one
|
||||
```
|
||||
|
||||
and
|
||||
|
||||
```yaml
|
||||
kind: AppProject
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
metadata:
|
||||
name: project-two
|
||||
namespace: argocd
|
||||
spec:
|
||||
sourceNamespaces:
|
||||
- namespace-two
|
||||
```
|
||||
|
||||
In order for an Application to set `.spec.project` to `project-one`, it would have to be created in either namespace `namespace-one` or `argocd`. Likewise, in order for an Application to set `.spec.project` to `project-two`, it would have to be created in either namespace `namespace-two` or `argocd`.
|
||||
|
||||
If an Application in `namespace-two` would set their `.spec.project` to `project-one` or an Application in `namespace-one` would set their `.spec.project` to `project-two`, Argo CD would consider this as a permission violation and refuse to reconcile the Application.
|
||||
|
||||
Also, the Argo CD API will enforce these constraints, regardless of the Argo CD RBAC permissions.
|
||||
|
||||
The `.spec.sourceNamespaces` field of the `AppProject` is a list that can contain an arbitrary amount of namespaces, and each entry supports shell-style wildcard, so that you can allow namespaces with patterns like `team-one-*`.
|
||||
|
||||
!!! warning
|
||||
Do not add user controlled namespaces in the `.spec.sourceNamespaces` field of any privileged AppProject like the `default` project. Always make sure that the AppProject follows the principle of granting least required privileges. Never grant access to the `argocd` namespace within the AppProject.
|
||||
|
||||
!!! note
|
||||
For backwards compatibility, Applications in the Argo CD control plane's namespace (`argocd`) are allowed to set their `.spec.project` field to reference any AppProject, regardless of the restrictions placed by the AppProject's `.spec.sourceNamespaces` field.
|
||||
|
||||
### Application names
|
||||
|
||||
For the CLI and UI, applications are now referred to and displayed as in the format `<namespace>/<name>`.
|
||||
|
||||
For backwards compatibility, if the namespace of the Application is the control plane's namespace (i.e. `argocd`), the `<namespace>` can be omitted from the application name when referring to it. For example, the application names `argocd/someapp` and `someapp` are semantically the same and refer to the same application in the CLI and the UI.
|
||||
|
||||
### Application RBAC
|
||||
|
||||
The RBAC syntax for Application objects has been changed from `<project>/<application>` to `<project>/<namespace>/<application>` to accomodate the need to restrict access based on the source namespace of the Application to be managed.
|
||||
|
||||
For backwards compatibility, Applications in the `argocd` namespace can still be refered to as `<project>/<application>` in the RBAC policy rules.
|
||||
|
||||
Wildcards do not make any distinction between project and application namespaces yet. For example, the following RBAC rule would match any application belonging to project `foo`, regardless of the namespace it is created in:
|
||||
|
||||
```
|
||||
p, somerole, applications, get, foo/*, allow
|
||||
```
|
||||
|
||||
If you want to restrict access to be granted only to `Applications` in project `foo` within namespace `bar`, the rule would need to be adapted as follows:
|
||||
|
||||
```
|
||||
p, somerole, applications, get, foo/bar/*, allow
|
||||
```
|
||||
|
||||
## Managing applications in other namespaces
|
||||
|
||||
### Declaratively
|
||||
|
||||
For declarative management of Applications, just create the Application from a YAML or JSON manifest in the desired namespace. Make sure that the `.spec.project` field refers to an AppProject that allows this namespace. For example, the following (incomplete) Application manifest creates an Application in the namespace `some-namespace`:
|
||||
|
||||
```yaml
|
||||
kind: Application
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
metadata:
|
||||
name: some-app
|
||||
namespace: some-namespace
|
||||
spec:
|
||||
project: some-project
|
||||
# ...
|
||||
```
|
||||
|
||||
The project `some-project` will then need to specify `some-namespace` in the list of allowed source namespaces, e.g.
|
||||
|
||||
```yaml
|
||||
kind: AppProject
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
metadata:
|
||||
name: some-project
|
||||
namespace: argocd
|
||||
spec:
|
||||
sourceNamespaces:
|
||||
- some-namespace
|
||||
```
|
||||
|
||||
### Using the CLI
|
||||
|
||||
You can use all existing Argo CD CLI commands for managing applications in other namespaces, exactly as you would use the CLI to manage applications in the control plane's namespace.
|
||||
|
||||
For example, to retrieve the `Application` named `foo` in the namespace `bar`, you can use the following CLI command:
|
||||
|
||||
```shell
|
||||
argocd app get foo/bar
|
||||
```
|
||||
|
||||
Likewise, to manage this application, keep referring to it as `foo/bar`:
|
||||
|
||||
```bash
|
||||
# Create an application
|
||||
argocd app create foo/bar ...
|
||||
# Sync the application
|
||||
argocd app sync foo/bar
|
||||
# Delete the application
|
||||
argocd app delete foo/bar
|
||||
# Retrieve application's manifest
|
||||
argocd app manifests foo/bar
|
||||
```
|
||||
|
||||
As stated previously, for applications in the Argo CD's control plane namespace, you can omit the namespace from the application name.
|
||||
|
||||
### Using the UI
|
||||
|
||||
Similar to the CLI, you can refer to the application in the UI as `foo/bar`.
|
||||
|
||||
For example, to create an application named `bar` in the namespace `foo` in the web UI, set the application name in the creation dialogue's _Application Name_ field to `foo/bar`. If the namespace is omitted, the control plane's namespace will be used.
|
||||
|
||||
### Using the REST API
|
||||
|
||||
If you are using the REST API, the namespace for `Application` cannot be specified as the application name, and resources need to be specified using the optional `appNamespace` query parameter. For example, to work with the `Application` resource named `foo` in the namespace `bar`, the request would look like follows:
|
||||
|
||||
```bash
|
||||
GET /api/v1/applications/foo?appNamespace=bar
|
||||
```
|
||||
|
||||
For other operations such as `POST` and `PUT`, the `appNamespace` parameter must be part of the request's payload.
|
||||
|
||||
For `Application` resources in the control plane namespace, this parameter can be omitted.
|
||||
@@ -45,6 +45,9 @@ spec:
|
||||
valueFiles:
|
||||
- values-prod.yaml
|
||||
|
||||
# Ignore locally missing valueFiles when installing Helm chart. Defaults to false
|
||||
ignoreMissingValueFiles: false
|
||||
|
||||
# Values file as block file
|
||||
values: |
|
||||
ingress:
|
||||
@@ -61,6 +64,9 @@ spec:
|
||||
hosts:
|
||||
- mydomain.example.com
|
||||
|
||||
# Skip custom resource definition installation if chart contains custom resource definitions. Defaults to false
|
||||
skipCrds: false
|
||||
|
||||
# Optional Helm version to template with. If omitted it will fall back to look at the 'apiVersion' in Chart.yaml
|
||||
# and decide which Helm binary to use automatically. This field can be either 'v2' or 'v3'.
|
||||
version: v2
|
||||
@@ -108,6 +114,7 @@ spec:
|
||||
|
||||
# plugin specific config
|
||||
plugin:
|
||||
# NOTE: this field is deprecated in v2.5 and must be removed to use sidecar-based plugins.
|
||||
# Only set the plugin name if the plugin is defined in argocd-cm.
|
||||
# If the plugin is defined as a sidecar, omit the name. The plugin will be automatically matched with the
|
||||
# Application according to the plugin's discovery rules.
|
||||
@@ -116,10 +123,29 @@ spec:
|
||||
env:
|
||||
- name: FOO
|
||||
value: bar
|
||||
# Plugin parameters are new in v2.5.
|
||||
parameters:
|
||||
- name: string-param
|
||||
string: example-string
|
||||
- name: array-param
|
||||
array: [item1, item2]
|
||||
- name: map-param
|
||||
map:
|
||||
param-name: param-value
|
||||
|
||||
# Sources field specifies the list of sources for the application
|
||||
sources:
|
||||
- repoURL: https://github.com/argoproj/argocd-example-apps.git # Can point to either a Helm chart repo or a git repo.
|
||||
targetRevision: HEAD # For Helm, this refers to the chart version.
|
||||
path: guestbook # This has no meaning for Helm charts pulled directly from a Helm repo instead of git.
|
||||
ref: my-repo # For Helm, acts as a reference to this source for fetching values files from this source. Has no meaning when under `source` field
|
||||
|
||||
# Destination cluster and namespace to deploy the application
|
||||
destination:
|
||||
# cluster API URL
|
||||
server: https://kubernetes.default.svc
|
||||
# or cluster name
|
||||
# name: in-cluster
|
||||
# The namespace will only be set for namespace-scoped resources that have not set a value for .metadata.namespace
|
||||
namespace: guestbook
|
||||
|
||||
@@ -134,6 +160,15 @@ spec:
|
||||
- CreateNamespace=true # Namespace Auto-Creation ensures that namespace specified as the application destination exists in the destination cluster.
|
||||
- PrunePropagationPolicy=foreground # Supported policies are background, foreground and orphan.
|
||||
- PruneLast=true # Allow the ability for resource pruning to happen as a final, implicit wave of a sync operation
|
||||
managedNamespaceMetadata: # Sets the metadata for the application namespace. Only valid if CreateNamespace=true (see above), otherwise it's a no-op.
|
||||
labels: # The labels to set on the application namespace
|
||||
any: label
|
||||
you: like
|
||||
annotations: # The annotations to set on the application namespace
|
||||
the: same
|
||||
applies: for
|
||||
annotations: on-the-namespace
|
||||
|
||||
# The retry feature is available since v1.7
|
||||
retry:
|
||||
limit: 5 # number of failed sync attempt retries; unlimited number of attempts if less than 0
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user